]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-3.19.1-201503092204.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-3.19.1-201503092204.patch
CommitLineData
8cf17962
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index a311db8..415b28c 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 176d4fe..17ceefa 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1191,6 +1191,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+
327 hashdist= [KNL,NUMA] Large hashes allocated during boot
328 are distributed across NUMA nodes. Defaults on
329 for 64-bit NUMA, off otherwise.
330@@ -2283,6 +2287,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
331 noexec=on: enable non-executable mappings (default)
332 noexec=off: disable non-executable mappings
333
334+ nopcid [X86-64]
335+ Disable PCID (Process-Context IDentifier) even if it
336+ is supported by the processor.
337+
338 nosmap [X86]
339 Disable SMAP (Supervisor Mode Access Prevention)
340 even if it is supported by processor.
341@@ -2584,6 +2592,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
342 the specified number of seconds. This is to be used if
343 your oopses keep scrolling off the screen.
344
345+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
346+ virtualization environments that don't cope well with the
347+ expand down segment used by UDEREF on X86-32 or the frequent
348+ page table updates on X86-64.
349+
350+ pax_sanitize_slab=
351+ Format: { 0 | 1 | off | fast | full }
352+ Options '0' and '1' are only provided for backward
353+ compatibility, 'off' or 'fast' should be used instead.
354+ 0|off : disable slab object sanitization
355+ 1|fast: enable slab object sanitization excluding
356+ whitelisted slabs (default)
357+ full : sanitize all slabs, even the whitelisted ones
358+
359+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
360+
361+ pax_extra_latent_entropy
362+ Enable a very simple form of latent entropy extraction
363+ from the first 4GB of memory as the bootmem allocator
364+ passes the memory pages to the buddy allocator.
365+
366+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
367+ when the processor supports PCID.
368+
369 pcbit= [HW,ISDN]
370
371 pcd. [PARIDE]
372diff --git a/Makefile b/Makefile
373index 688777b..2821d8c 100644
374--- a/Makefile
375+++ b/Makefile
376@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
377 HOSTCC = gcc
378 HOSTCXX = g++
379 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
380-HOSTCXXFLAGS = -O2
381+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
382+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
383+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
384
385 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
386 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
387@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
388 # Rules shared between *config targets and build targets
389
390 # Basic helpers built in scripts/
391-PHONY += scripts_basic
392-scripts_basic:
393+PHONY += scripts_basic gcc-plugins
394+scripts_basic: gcc-plugins
395 $(Q)$(MAKE) $(build)=scripts/basic
396 $(Q)rm -f .tmp_quiet_recordmcount
397
398@@ -622,6 +624,72 @@ endif
399 # Tell gcc to never replace conditional load with a non-conditional one
400 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
401
402+ifndef DISABLE_PAX_PLUGINS
403+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
404+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
405+else
406+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
407+endif
408+ifneq ($(PLUGINCC),)
409+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
410+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
411+endif
412+ifdef CONFIG_PAX_MEMORY_STACKLEAK
413+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
414+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
415+endif
416+ifdef CONFIG_KALLOCSTAT_PLUGIN
417+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
418+endif
419+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
420+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
421+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
422+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
423+endif
424+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
425+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
426+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
427+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
428+endif
429+endif
430+ifdef CONFIG_CHECKER_PLUGIN
431+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
432+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
433+endif
434+endif
435+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
436+ifdef CONFIG_PAX_SIZE_OVERFLOW
437+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
438+endif
439+ifdef CONFIG_PAX_LATENT_ENTROPY
440+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
441+endif
442+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
443+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
444+endif
445+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
446+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
447+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
448+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
450+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
451+ifeq ($(KBUILD_EXTMOD),)
452+gcc-plugins:
453+ $(Q)$(MAKE) $(build)=tools/gcc
454+else
455+gcc-plugins: ;
456+endif
457+else
458+gcc-plugins:
459+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
460+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
461+else
462+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
463+endif
464+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
465+endif
466+endif
467+
468 ifdef CONFIG_READABLE_ASM
469 # Disable optimizations that make assembler listings hard to read.
470 # reorder blocks reorders the control in the function
471@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
472 else
473 KBUILD_CFLAGS += -g
474 endif
475-KBUILD_AFLAGS += -Wa,-gdwarf-2
476+KBUILD_AFLAGS += -Wa,--gdwarf-2
477 endif
478 ifdef CONFIG_DEBUG_INFO_DWARF4
479 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
480@@ -879,7 +947,7 @@ export mod_sign_cmd
481
482
483 ifeq ($(KBUILD_EXTMOD),)
484-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
485+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
486
487 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
488 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
489@@ -926,6 +994,8 @@ endif
490
491 # The actual objects are generated when descending,
492 # make sure no implicit rule kicks in
493+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
496
497 # Handle descending into subdirectories listed in $(vmlinux-dirs)
498@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499 # Error messages still appears in the original language
500
501 PHONY += $(vmlinux-dirs)
502-$(vmlinux-dirs): prepare scripts
503+$(vmlinux-dirs): gcc-plugins prepare scripts
504 $(Q)$(MAKE) $(build)=$@
505
506 define filechk_kernel.release
507@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
508
509 archprepare: archheaders archscripts prepare1 scripts_basic
510
511+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
512+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
513 prepare0: archprepare FORCE
514 $(Q)$(MAKE) $(build)=.
515
516 # All the preparing..
517+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
518 prepare: prepare0
519
520 # Generate some files
521@@ -1095,6 +1168,8 @@ all: modules
522 # using awk while concatenating to the final file.
523
524 PHONY += modules
525+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
526+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
527 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
528 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
529 @$(kecho) ' Building modules, stage 2.';
530@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
531
532 # Target to prepare building external modules
533 PHONY += modules_prepare
534-modules_prepare: prepare scripts
535+modules_prepare: gcc-plugins prepare scripts
536
537 # Target to install modules
538 PHONY += modules_install
539@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
540 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
541 signing_key.priv signing_key.x509 x509.genkey \
542 extra_certificates signing_key.x509.keyid \
543- signing_key.x509.signer
544+ signing_key.x509.signer \
545+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
546+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
547+ tools/gcc/randomize_layout_seed.h
548
549 # clean - Delete most, but leave enough to build external modules
550 #
551@@ -1215,7 +1293,7 @@ distclean: mrproper
552 @find $(srctree) $(RCS_FIND_IGNORE) \
553 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
554 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
555- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
556+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
557 -type f -print | xargs rm -f
558
559
560@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
561 $(module-dirs): crmodverdir $(objtree)/Module.symvers
562 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
563
564+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
565+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
566 modules: $(module-dirs)
567 @$(kecho) ' Building modules, stage 2.';
568 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
569@@ -1521,17 +1601,21 @@ else
570 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
571 endif
572
573-%.s: %.c prepare scripts FORCE
574+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
575+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
576+%.s: %.c gcc-plugins prepare scripts FORCE
577 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
578 %.i: %.c prepare scripts FORCE
579 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
580-%.o: %.c prepare scripts FORCE
581+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
582+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
583+%.o: %.c gcc-plugins prepare scripts FORCE
584 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
585 %.lst: %.c prepare scripts FORCE
586 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
587-%.s: %.S prepare scripts FORCE
588+%.s: %.S gcc-plugins prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.o: %.S prepare scripts FORCE
591+%.o: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593 %.symtypes: %.c prepare scripts FORCE
594 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
595@@ -1543,11 +1627,15 @@ endif
596 $(build)=$(build-dir)
597 # Make sure the latest headers are built for Documentation
598 Documentation/: headers_install
599-%/: prepare scripts FORCE
600+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
601+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
602+%/: gcc-plugins prepare scripts FORCE
603 $(cmd_crmodverdir)
604 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
605 $(build)=$(build-dir)
606-%.ko: prepare scripts FORCE
607+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
608+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
609+%.ko: gcc-plugins prepare scripts FORCE
610 $(cmd_crmodverdir)
611 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
612 $(build)=$(build-dir) $(@:.ko=.o)
613diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
614index 8f8eafb..3405f46 100644
615--- a/arch/alpha/include/asm/atomic.h
616+++ b/arch/alpha/include/asm/atomic.h
617@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
618 #define atomic_dec(v) atomic_sub(1,(v))
619 #define atomic64_dec(v) atomic64_sub(1,(v))
620
621+#define atomic64_read_unchecked(v) atomic64_read(v)
622+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
623+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
624+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
625+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
626+#define atomic64_inc_unchecked(v) atomic64_inc(v)
627+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
628+#define atomic64_dec_unchecked(v) atomic64_dec(v)
629+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
630+
631 #endif /* _ALPHA_ATOMIC_H */
632diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
633index ad368a9..fbe0f25 100644
634--- a/arch/alpha/include/asm/cache.h
635+++ b/arch/alpha/include/asm/cache.h
636@@ -4,19 +4,19 @@
637 #ifndef __ARCH_ALPHA_CACHE_H
638 #define __ARCH_ALPHA_CACHE_H
639
640+#include <linux/const.h>
641
642 /* Bytes per L1 (data) cache line. */
643 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
644-# define L1_CACHE_BYTES 64
645 # define L1_CACHE_SHIFT 6
646 #else
647 /* Both EV4 and EV5 are write-through, read-allocate,
648 direct-mapped, physical.
649 */
650-# define L1_CACHE_BYTES 32
651 # define L1_CACHE_SHIFT 5
652 #endif
653
654+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
655 #define SMP_CACHE_BYTES L1_CACHE_BYTES
656
657 #endif
658diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
659index 968d999..d36b2df 100644
660--- a/arch/alpha/include/asm/elf.h
661+++ b/arch/alpha/include/asm/elf.h
662@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
663
664 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
665
666+#ifdef CONFIG_PAX_ASLR
667+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
668+
669+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
670+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
671+#endif
672+
673 /* $0 is set by ld.so to a pointer to a function which might be
674 registered using atexit. This provides a mean for the dynamic
675 linker to call DT_FINI functions for shared libraries that have
676diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
677index aab14a0..b4fa3e7 100644
678--- a/arch/alpha/include/asm/pgalloc.h
679+++ b/arch/alpha/include/asm/pgalloc.h
680@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
681 pgd_set(pgd, pmd);
682 }
683
684+static inline void
685+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
686+{
687+ pgd_populate(mm, pgd, pmd);
688+}
689+
690 extern pgd_t *pgd_alloc(struct mm_struct *mm);
691
692 static inline void
693diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
694index d8f9b7e..f6222fa 100644
695--- a/arch/alpha/include/asm/pgtable.h
696+++ b/arch/alpha/include/asm/pgtable.h
697@@ -102,6 +102,17 @@ struct vm_area_struct;
698 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
699 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
700 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
704+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
705+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
706+#else
707+# define PAGE_SHARED_NOEXEC PAGE_SHARED
708+# define PAGE_COPY_NOEXEC PAGE_COPY
709+# define PAGE_READONLY_NOEXEC PAGE_READONLY
710+#endif
711+
712 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
713
714 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
715diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
716index 2fd00b7..cfd5069 100644
717--- a/arch/alpha/kernel/module.c
718+++ b/arch/alpha/kernel/module.c
719@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
720
721 /* The small sections were sorted to the end of the segment.
722 The following should definitely cover them. */
723- gp = (u64)me->module_core + me->core_size - 0x8000;
724+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
725 got = sechdrs[me->arch.gotsecindex].sh_addr;
726
727 for (i = 0; i < n; i++) {
728diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
729index e51f578..16c64a3 100644
730--- a/arch/alpha/kernel/osf_sys.c
731+++ b/arch/alpha/kernel/osf_sys.c
732@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
733 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
734
735 static unsigned long
736-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
737- unsigned long limit)
738+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
739+ unsigned long limit, unsigned long flags)
740 {
741 struct vm_unmapped_area_info info;
742+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
743
744 info.flags = 0;
745 info.length = len;
746@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
747 info.high_limit = limit;
748 info.align_mask = 0;
749 info.align_offset = 0;
750+ info.threadstack_offset = offset;
751 return vm_unmapped_area(&info);
752 }
753
754@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
755 merely specific addresses, but regions of memory -- perhaps
756 this feature should be incorporated into all ports? */
757
758+#ifdef CONFIG_PAX_RANDMMAP
759+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
760+#endif
761+
762 if (addr) {
763- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
764+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
765 if (addr != (unsigned long) -ENOMEM)
766 return addr;
767 }
768
769 /* Next, try allocating at TASK_UNMAPPED_BASE. */
770- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
771- len, limit);
772+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
773+
774 if (addr != (unsigned long) -ENOMEM)
775 return addr;
776
777 /* Finally, try allocating in low memory. */
778- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
779+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
780
781 return addr;
782 }
783diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
784index 9d0ac09..479a962 100644
785--- a/arch/alpha/mm/fault.c
786+++ b/arch/alpha/mm/fault.c
787@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
788 __reload_thread(pcb);
789 }
790
791+#ifdef CONFIG_PAX_PAGEEXEC
792+/*
793+ * PaX: decide what to do with offenders (regs->pc = fault address)
794+ *
795+ * returns 1 when task should be killed
796+ * 2 when patched PLT trampoline was detected
797+ * 3 when unpatched PLT trampoline was detected
798+ */
799+static int pax_handle_fetch_fault(struct pt_regs *regs)
800+{
801+
802+#ifdef CONFIG_PAX_EMUPLT
803+ int err;
804+
805+ do { /* PaX: patched PLT emulation #1 */
806+ unsigned int ldah, ldq, jmp;
807+
808+ err = get_user(ldah, (unsigned int *)regs->pc);
809+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
810+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
811+
812+ if (err)
813+ break;
814+
815+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
816+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
817+ jmp == 0x6BFB0000U)
818+ {
819+ unsigned long r27, addr;
820+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
821+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
822+
823+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
824+ err = get_user(r27, (unsigned long *)addr);
825+ if (err)
826+ break;
827+
828+ regs->r27 = r27;
829+ regs->pc = r27;
830+ return 2;
831+ }
832+ } while (0);
833+
834+ do { /* PaX: patched PLT emulation #2 */
835+ unsigned int ldah, lda, br;
836+
837+ err = get_user(ldah, (unsigned int *)regs->pc);
838+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
839+ err |= get_user(br, (unsigned int *)(regs->pc+8));
840+
841+ if (err)
842+ break;
843+
844+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
845+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
846+ (br & 0xFFE00000U) == 0xC3E00000U)
847+ {
848+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
849+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
850+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
851+
852+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
853+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
854+ return 2;
855+ }
856+ } while (0);
857+
858+ do { /* PaX: unpatched PLT emulation */
859+ unsigned int br;
860+
861+ err = get_user(br, (unsigned int *)regs->pc);
862+
863+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
864+ unsigned int br2, ldq, nop, jmp;
865+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
866+
867+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
868+ err = get_user(br2, (unsigned int *)addr);
869+ err |= get_user(ldq, (unsigned int *)(addr+4));
870+ err |= get_user(nop, (unsigned int *)(addr+8));
871+ err |= get_user(jmp, (unsigned int *)(addr+12));
872+ err |= get_user(resolver, (unsigned long *)(addr+16));
873+
874+ if (err)
875+ break;
876+
877+ if (br2 == 0xC3600000U &&
878+ ldq == 0xA77B000CU &&
879+ nop == 0x47FF041FU &&
880+ jmp == 0x6B7B0000U)
881+ {
882+ regs->r28 = regs->pc+4;
883+ regs->r27 = addr+16;
884+ regs->pc = resolver;
885+ return 3;
886+ }
887+ }
888+ } while (0);
889+#endif
890+
891+ return 1;
892+}
893+
894+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
895+{
896+ unsigned long i;
897+
898+ printk(KERN_ERR "PAX: bytes at PC: ");
899+ for (i = 0; i < 5; i++) {
900+ unsigned int c;
901+ if (get_user(c, (unsigned int *)pc+i))
902+ printk(KERN_CONT "???????? ");
903+ else
904+ printk(KERN_CONT "%08x ", c);
905+ }
906+ printk("\n");
907+}
908+#endif
909
910 /*
911 * This routine handles page faults. It determines the address,
912@@ -133,8 +251,29 @@ retry:
913 good_area:
914 si_code = SEGV_ACCERR;
915 if (cause < 0) {
916- if (!(vma->vm_flags & VM_EXEC))
917+ if (!(vma->vm_flags & VM_EXEC)) {
918+
919+#ifdef CONFIG_PAX_PAGEEXEC
920+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
921+ goto bad_area;
922+
923+ up_read(&mm->mmap_sem);
924+ switch (pax_handle_fetch_fault(regs)) {
925+
926+#ifdef CONFIG_PAX_EMUPLT
927+ case 2:
928+ case 3:
929+ return;
930+#endif
931+
932+ }
933+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
934+ do_group_exit(SIGKILL);
935+#else
936 goto bad_area;
937+#endif
938+
939+ }
940 } else if (!cause) {
941 /* Allow reads even for write-only mappings */
942 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
943diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
944index 97d07ed..2931f2b 100644
945--- a/arch/arm/Kconfig
946+++ b/arch/arm/Kconfig
947@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
948
949 config UACCESS_WITH_MEMCPY
950 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
951- depends on MMU
952+ depends on MMU && !PAX_MEMORY_UDEREF
953 default y if CPU_FEROCEON
954 help
955 Implement faster copy_to_user and clear_user methods for CPU
956@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
957 config KEXEC
958 bool "Kexec system call (EXPERIMENTAL)"
959 depends on (!SMP || PM_SLEEP_SMP)
960+ depends on !GRKERNSEC_KMEM
961 help
962 kexec is a system call that implements the ability to shutdown your
963 current kernel, and to start another kernel. It is like a reboot
964diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
965index e22c119..eaa807d 100644
966--- a/arch/arm/include/asm/atomic.h
967+++ b/arch/arm/include/asm/atomic.h
968@@ -18,17 +18,41 @@
969 #include <asm/barrier.h>
970 #include <asm/cmpxchg.h>
971
972+#ifdef CONFIG_GENERIC_ATOMIC64
973+#include <asm-generic/atomic64.h>
974+#endif
975+
976 #define ATOMIC_INIT(i) { (i) }
977
978 #ifdef __KERNEL__
979
980+#ifdef CONFIG_THUMB2_KERNEL
981+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
982+#else
983+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
984+#endif
985+
986+#define _ASM_EXTABLE(from, to) \
987+" .pushsection __ex_table,\"a\"\n"\
988+" .align 3\n" \
989+" .long " #from ", " #to"\n" \
990+" .popsection"
991+
992 /*
993 * On ARM, ordinary assignment (str instruction) doesn't clear the local
994 * strex/ldrex monitor on some implementations. The reason we can use it for
995 * atomic_set() is the clrex or dummy strex done on every exception return.
996 */
997 #define atomic_read(v) ACCESS_ONCE((v)->counter)
998+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
999+{
1000+ return ACCESS_ONCE(v->counter);
1001+}
1002 #define atomic_set(v,i) (((v)->counter) = (i))
1003+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1004+{
1005+ v->counter = i;
1006+}
1007
1008 #if __LINUX_ARM_ARCH__ >= 6
1009
1010@@ -38,26 +62,50 @@
1011 * to ensure that the update happens.
1012 */
1013
1014-#define ATOMIC_OP(op, c_op, asm_op) \
1015-static inline void atomic_##op(int i, atomic_t *v) \
1016+#ifdef CONFIG_PAX_REFCOUNT
1017+#define __OVERFLOW_POST \
1018+ " bvc 3f\n" \
1019+ "2: " REFCOUNT_TRAP_INSN "\n"\
1020+ "3:\n"
1021+#define __OVERFLOW_POST_RETURN \
1022+ " bvc 3f\n" \
1023+" mov %0, %1\n" \
1024+ "2: " REFCOUNT_TRAP_INSN "\n"\
1025+ "3:\n"
1026+#define __OVERFLOW_EXTABLE \
1027+ "4:\n" \
1028+ _ASM_EXTABLE(2b, 4b)
1029+#else
1030+#define __OVERFLOW_POST
1031+#define __OVERFLOW_POST_RETURN
1032+#define __OVERFLOW_EXTABLE
1033+#endif
1034+
1035+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1036+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1037 { \
1038 unsigned long tmp; \
1039 int result; \
1040 \
1041 prefetchw(&v->counter); \
1042- __asm__ __volatile__("@ atomic_" #op "\n" \
1043+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1044 "1: ldrex %0, [%3]\n" \
1045 " " #asm_op " %0, %0, %4\n" \
1046+ post_op \
1047 " strex %1, %0, [%3]\n" \
1048 " teq %1, #0\n" \
1049-" bne 1b" \
1050+" bne 1b\n" \
1051+ extable \
1052 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1053 : "r" (&v->counter), "Ir" (i) \
1054 : "cc"); \
1055 } \
1056
1057-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1058-static inline int atomic_##op##_return(int i, atomic_t *v) \
1059+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\
1060+ __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1061+
1062+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1063+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1064 { \
1065 unsigned long tmp; \
1066 int result; \
1067@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1068 smp_mb(); \
1069 prefetchw(&v->counter); \
1070 \
1071- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1072+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1073 "1: ldrex %0, [%3]\n" \
1074 " " #asm_op " %0, %0, %4\n" \
1075+ post_op \
1076 " strex %1, %0, [%3]\n" \
1077 " teq %1, #0\n" \
1078-" bne 1b" \
1079+" bne 1b\n" \
1080+ extable \
1081 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1082 : "r" (&v->counter), "Ir" (i) \
1083 : "cc"); \
1084@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1085 return result; \
1086 }
1087
1088+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\
1089+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1090+
1091 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1092 {
1093 int oldval;
1094@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1095 __asm__ __volatile__ ("@ atomic_add_unless\n"
1096 "1: ldrex %0, [%4]\n"
1097 " teq %0, %5\n"
1098-" beq 2f\n"
1099-" add %1, %0, %6\n"
1100+" beq 4f\n"
1101+" adds %1, %0, %6\n"
1102+
1103+#ifdef CONFIG_PAX_REFCOUNT
1104+" bvc 3f\n"
1105+"2: " REFCOUNT_TRAP_INSN "\n"
1106+"3:\n"
1107+#endif
1108+
1109 " strex %2, %1, [%4]\n"
1110 " teq %2, #0\n"
1111 " bne 1b\n"
1112-"2:"
1113+"4:"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+ _ASM_EXTABLE(2b, 4b)
1117+#endif
1118+
1119 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1120 : "r" (&v->counter), "r" (u), "r" (a)
1121 : "cc");
1122@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1123 return oldval;
1124 }
1125
1126+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1127+{
1128+ unsigned long oldval, res;
1129+
1130+ smp_mb();
1131+
1132+ do {
1133+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1134+ "ldrex %1, [%3]\n"
1135+ "mov %0, #0\n"
1136+ "teq %1, %4\n"
1137+ "strexeq %0, %5, [%3]\n"
1138+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1139+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1140+ : "cc");
1141+ } while (res);
1142+
1143+ smp_mb();
1144+
1145+ return oldval;
1146+}
1147+
1148 #else /* ARM_ARCH_6 */
1149
1150 #ifdef CONFIG_SMP
1151 #error SMP not supported on pre-ARMv6 CPUs
1152 #endif
1153
1154-#define ATOMIC_OP(op, c_op, asm_op) \
1155-static inline void atomic_##op(int i, atomic_t *v) \
1156+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1157+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1158 { \
1159 unsigned long flags; \
1160 \
1161@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1162 raw_local_irq_restore(flags); \
1163 } \
1164
1165-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1166-static inline int atomic_##op##_return(int i, atomic_t *v) \
1167+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1168+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1169+
1170+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1171+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1172 { \
1173 unsigned long flags; \
1174 int val; \
1175@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1176 return val; \
1177 }
1178
1179+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1180+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1181+
1182 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1183 {
1184 int ret;
1185@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 return ret;
1187 }
1188
1189+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1190+{
1191+ return atomic_cmpxchg((atomic_t *)v, old, new);
1192+}
1193+
1194 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1195 {
1196 int c, old;
1197@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1198
1199 #undef ATOMIC_OPS
1200 #undef ATOMIC_OP_RETURN
1201+#undef __ATOMIC_OP_RETURN
1202 #undef ATOMIC_OP
1203+#undef __ATOMIC_OP
1204
1205 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1206+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1207+{
1208+ return xchg(&v->counter, new);
1209+}
1210
1211 #define atomic_inc(v) atomic_add(1, v)
1212+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1213+{
1214+ atomic_add_unchecked(1, v);
1215+}
1216 #define atomic_dec(v) atomic_sub(1, v)
1217+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1218+{
1219+ atomic_sub_unchecked(1, v);
1220+}
1221
1222 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1223+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1224+{
1225+ return atomic_add_return_unchecked(1, v) == 0;
1226+}
1227 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1228 #define atomic_inc_return(v) (atomic_add_return(1, v))
1229+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1230+{
1231+ return atomic_add_return_unchecked(1, v);
1232+}
1233 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1234 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1235
1236@@ -216,6 +336,14 @@ typedef struct {
1237 long long counter;
1238 } atomic64_t;
1239
1240+#ifdef CONFIG_PAX_REFCOUNT
1241+typedef struct {
1242+ long long counter;
1243+} atomic64_unchecked_t;
1244+#else
1245+typedef atomic64_t atomic64_unchecked_t;
1246+#endif
1247+
1248 #define ATOMIC64_INIT(i) { (i) }
1249
1250 #ifdef CONFIG_ARM_LPAE
1251@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1252 return result;
1253 }
1254
1255+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1256+{
1257+ long long result;
1258+
1259+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1260+" ldrd %0, %H0, [%1]"
1261+ : "=&r" (result)
1262+ : "r" (&v->counter), "Qo" (v->counter)
1263+ );
1264+
1265+ return result;
1266+}
1267+
1268 static inline void atomic64_set(atomic64_t *v, long long i)
1269 {
1270 __asm__ __volatile__("@ atomic64_set\n"
1271@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1272 : "r" (&v->counter), "r" (i)
1273 );
1274 }
1275+
1276+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1277+{
1278+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1279+" strd %2, %H2, [%1]"
1280+ : "=Qo" (v->counter)
1281+ : "r" (&v->counter), "r" (i)
1282+ );
1283+}
1284 #else
1285 static inline long long atomic64_read(const atomic64_t *v)
1286 {
1287@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1288 return result;
1289 }
1290
1291+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1292+{
1293+ long long result;
1294+
1295+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1296+" ldrexd %0, %H0, [%1]"
1297+ : "=&r" (result)
1298+ : "r" (&v->counter), "Qo" (v->counter)
1299+ );
1300+
1301+ return result;
1302+}
1303+
1304 static inline void atomic64_set(atomic64_t *v, long long i)
1305 {
1306 long long tmp;
1307@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1308 : "r" (&v->counter), "r" (i)
1309 : "cc");
1310 }
1311+
1312+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1313+{
1314+ long long tmp;
1315+
1316+ prefetchw(&v->counter);
1317+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1318+"1: ldrexd %0, %H0, [%2]\n"
1319+" strexd %0, %3, %H3, [%2]\n"
1320+" teq %0, #0\n"
1321+" bne 1b"
1322+ : "=&r" (tmp), "=Qo" (v->counter)
1323+ : "r" (&v->counter), "r" (i)
1324+ : "cc");
1325+}
1326 #endif
1327
1328-#define ATOMIC64_OP(op, op1, op2) \
1329-static inline void atomic64_##op(long long i, atomic64_t *v) \
1330+#undef __OVERFLOW_POST_RETURN
1331+#define __OVERFLOW_POST_RETURN \
1332+ " bvc 3f\n" \
1333+" mov %0, %1\n" \
1334+" mov %H0, %H1\n" \
1335+ "2: " REFCOUNT_TRAP_INSN "\n"\
1336+ "3:\n"
1337+
1338+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1339+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1340 { \
1341 long long result; \
1342 unsigned long tmp; \
1343 \
1344 prefetchw(&v->counter); \
1345- __asm__ __volatile__("@ atomic64_" #op "\n" \
1346+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1347 "1: ldrexd %0, %H0, [%3]\n" \
1348 " " #op1 " %Q0, %Q0, %Q4\n" \
1349 " " #op2 " %R0, %R0, %R4\n" \
1350+ post_op \
1351 " strexd %1, %0, %H0, [%3]\n" \
1352 " teq %1, #0\n" \
1353-" bne 1b" \
1354+" bne 1b\n" \
1355+ extable \
1356 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1357 : "r" (&v->counter), "r" (i) \
1358 : "cc"); \
1359 } \
1360
1361-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1362-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1363+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \
1364+ __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1365+
1366+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1367+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1368 { \
1369 long long result; \
1370 unsigned long tmp; \
1371@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1372 smp_mb(); \
1373 prefetchw(&v->counter); \
1374 \
1375- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1376+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1377 "1: ldrexd %0, %H0, [%3]\n" \
1378 " " #op1 " %Q0, %Q0, %Q4\n" \
1379 " " #op2 " %R0, %R0, %R4\n" \
1380+ post_op \
1381 " strexd %1, %0, %H0, [%3]\n" \
1382 " teq %1, #0\n" \
1383-" bne 1b" \
1384+" bne 1b\n" \
1385+ extable \
1386 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1387 : "r" (&v->counter), "r" (i) \
1388 : "cc"); \
1389@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1390 return result; \
1391 }
1392
1393+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \
1394+ __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1395+
1396 #define ATOMIC64_OPS(op, op1, op2) \
1397 ATOMIC64_OP(op, op1, op2) \
1398 ATOMIC64_OP_RETURN(op, op1, op2)
1399@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1400
1401 #undef ATOMIC64_OPS
1402 #undef ATOMIC64_OP_RETURN
1403+#undef __ATOMIC64_OP_RETURN
1404 #undef ATOMIC64_OP
1405+#undef __ATOMIC64_OP
1406+#undef __OVERFLOW_EXTABLE
1407+#undef __OVERFLOW_POST_RETURN
1408+#undef __OVERFLOW_POST
1409
1410 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1411 long long new)
1412@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1413 return oldval;
1414 }
1415
1416+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1417+ long long new)
1418+{
1419+ long long oldval;
1420+ unsigned long res;
1421+
1422+ smp_mb();
1423+
1424+ do {
1425+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1426+ "ldrexd %1, %H1, [%3]\n"
1427+ "mov %0, #0\n"
1428+ "teq %1, %4\n"
1429+ "teqeq %H1, %H4\n"
1430+ "strexdeq %0, %5, %H5, [%3]"
1431+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1432+ : "r" (&ptr->counter), "r" (old), "r" (new)
1433+ : "cc");
1434+ } while (res);
1435+
1436+ smp_mb();
1437+
1438+ return oldval;
1439+}
1440+
1441 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1442 {
1443 long long result;
1444@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1446 {
1447 long long result;
1448- unsigned long tmp;
1449+ u64 tmp;
1450
1451 smp_mb();
1452 prefetchw(&v->counter);
1453
1454 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1455-"1: ldrexd %0, %H0, [%3]\n"
1456-" subs %Q0, %Q0, #1\n"
1457-" sbc %R0, %R0, #0\n"
1458+"1: ldrexd %1, %H1, [%3]\n"
1459+" subs %Q0, %Q1, #1\n"
1460+" sbcs %R0, %R1, #0\n"
1461+
1462+#ifdef CONFIG_PAX_REFCOUNT
1463+" bvc 3f\n"
1464+" mov %Q0, %Q1\n"
1465+" mov %R0, %R1\n"
1466+"2: " REFCOUNT_TRAP_INSN "\n"
1467+"3:\n"
1468+#endif
1469+
1470 " teq %R0, #0\n"
1471-" bmi 2f\n"
1472+" bmi 4f\n"
1473 " strexd %1, %0, %H0, [%3]\n"
1474 " teq %1, #0\n"
1475 " bne 1b\n"
1476-"2:"
1477+"4:\n"
1478+
1479+#ifdef CONFIG_PAX_REFCOUNT
1480+ _ASM_EXTABLE(2b, 4b)
1481+#endif
1482+
1483 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1484 : "r" (&v->counter)
1485 : "cc");
1486@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1487 " teq %0, %5\n"
1488 " teqeq %H0, %H5\n"
1489 " moveq %1, #0\n"
1490-" beq 2f\n"
1491+" beq 4f\n"
1492 " adds %Q0, %Q0, %Q6\n"
1493-" adc %R0, %R0, %R6\n"
1494+" adcs %R0, %R0, %R6\n"
1495+
1496+#ifdef CONFIG_PAX_REFCOUNT
1497+" bvc 3f\n"
1498+"2: " REFCOUNT_TRAP_INSN "\n"
1499+"3:\n"
1500+#endif
1501+
1502 " strexd %2, %0, %H0, [%4]\n"
1503 " teq %2, #0\n"
1504 " bne 1b\n"
1505-"2:"
1506+"4:\n"
1507+
1508+#ifdef CONFIG_PAX_REFCOUNT
1509+ _ASM_EXTABLE(2b, 4b)
1510+#endif
1511+
1512 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1513 : "r" (&v->counter), "r" (u), "r" (a)
1514 : "cc");
1515@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1516
1517 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1518 #define atomic64_inc(v) atomic64_add(1LL, (v))
1519+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1520 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1521+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1522 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1523 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1524 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1525+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1526 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1527 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1528 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1529diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1530index d2f81e6..3c4dba5 100644
1531--- a/arch/arm/include/asm/barrier.h
1532+++ b/arch/arm/include/asm/barrier.h
1533@@ -67,7 +67,7 @@
1534 do { \
1535 compiletime_assert_atomic_type(*p); \
1536 smp_mb(); \
1537- ACCESS_ONCE(*p) = (v); \
1538+ ACCESS_ONCE_RW(*p) = (v); \
1539 } while (0)
1540
1541 #define smp_load_acquire(p) \
1542diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1543index 75fe66b..ba3dee4 100644
1544--- a/arch/arm/include/asm/cache.h
1545+++ b/arch/arm/include/asm/cache.h
1546@@ -4,8 +4,10 @@
1547 #ifndef __ASMARM_CACHE_H
1548 #define __ASMARM_CACHE_H
1549
1550+#include <linux/const.h>
1551+
1552 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1553-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1554+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1555
1556 /*
1557 * Memory returned by kmalloc() may be used for DMA, so we must make
1558@@ -24,5 +26,6 @@
1559 #endif
1560
1561 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1562+#define __read_only __attribute__ ((__section__(".data..read_only")))
1563
1564 #endif
1565diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1566index 2d46862..a35415b 100644
1567--- a/arch/arm/include/asm/cacheflush.h
1568+++ b/arch/arm/include/asm/cacheflush.h
1569@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1570 void (*dma_unmap_area)(const void *, size_t, int);
1571
1572 void (*dma_flush_range)(const void *, const void *);
1573-};
1574+} __no_const;
1575
1576 /*
1577 * Select the calling method
1578diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1579index 5233151..87a71fa 100644
1580--- a/arch/arm/include/asm/checksum.h
1581+++ b/arch/arm/include/asm/checksum.h
1582@@ -37,7 +37,19 @@ __wsum
1583 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1584
1585 __wsum
1586-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1587+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1588+
1589+static inline __wsum
1590+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1591+{
1592+ __wsum ret;
1593+ pax_open_userland();
1594+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1595+ pax_close_userland();
1596+ return ret;
1597+}
1598+
1599+
1600
1601 /*
1602 * Fold a partial checksum without adding pseudo headers
1603diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1604index abb2c37..96db950 100644
1605--- a/arch/arm/include/asm/cmpxchg.h
1606+++ b/arch/arm/include/asm/cmpxchg.h
1607@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1608
1609 #define xchg(ptr,x) \
1610 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1611+#define xchg_unchecked(ptr,x) \
1612+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1613
1614 #include <asm-generic/cmpxchg-local.h>
1615
1616diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1617index 6ddbe44..b5e38b1a 100644
1618--- a/arch/arm/include/asm/domain.h
1619+++ b/arch/arm/include/asm/domain.h
1620@@ -48,18 +48,37 @@
1621 * Domain types
1622 */
1623 #define DOMAIN_NOACCESS 0
1624-#define DOMAIN_CLIENT 1
1625 #ifdef CONFIG_CPU_USE_DOMAINS
1626+#define DOMAIN_USERCLIENT 1
1627+#define DOMAIN_KERNELCLIENT 1
1628 #define DOMAIN_MANAGER 3
1629+#define DOMAIN_VECTORS DOMAIN_USER
1630 #else
1631+
1632+#ifdef CONFIG_PAX_KERNEXEC
1633 #define DOMAIN_MANAGER 1
1634+#define DOMAIN_KERNEXEC 3
1635+#else
1636+#define DOMAIN_MANAGER 1
1637+#endif
1638+
1639+#ifdef CONFIG_PAX_MEMORY_UDEREF
1640+#define DOMAIN_USERCLIENT 0
1641+#define DOMAIN_UDEREF 1
1642+#define DOMAIN_VECTORS DOMAIN_KERNEL
1643+#else
1644+#define DOMAIN_USERCLIENT 1
1645+#define DOMAIN_VECTORS DOMAIN_USER
1646+#endif
1647+#define DOMAIN_KERNELCLIENT 1
1648+
1649 #endif
1650
1651 #define domain_val(dom,type) ((type) << (2*(dom)))
1652
1653 #ifndef __ASSEMBLY__
1654
1655-#ifdef CONFIG_CPU_USE_DOMAINS
1656+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1657 static inline void set_domain(unsigned val)
1658 {
1659 asm volatile(
1660@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1661 isb();
1662 }
1663
1664-#define modify_domain(dom,type) \
1665- do { \
1666- struct thread_info *thread = current_thread_info(); \
1667- unsigned int domain = thread->cpu_domain; \
1668- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1669- thread->cpu_domain = domain | domain_val(dom, type); \
1670- set_domain(thread->cpu_domain); \
1671- } while (0)
1672-
1673+extern void modify_domain(unsigned int dom, unsigned int type);
1674 #else
1675 static inline void set_domain(unsigned val) { }
1676 static inline void modify_domain(unsigned dom, unsigned type) { }
1677diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1678index afb9caf..9a0bac0 100644
1679--- a/arch/arm/include/asm/elf.h
1680+++ b/arch/arm/include/asm/elf.h
1681@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1682 the loader. We need to make sure that it is out of the way of the program
1683 that it will "exec", and that there is sufficient room for the brk. */
1684
1685-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1686+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1687+
1688+#ifdef CONFIG_PAX_ASLR
1689+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1690+
1691+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1692+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1693+#endif
1694
1695 /* When the program starts, a1 contains a pointer to a function to be
1696 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1697@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1698 extern void elf_set_personality(const struct elf32_hdr *);
1699 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1700
1701-struct mm_struct;
1702-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1703-#define arch_randomize_brk arch_randomize_brk
1704-
1705 #ifdef CONFIG_MMU
1706 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1707 struct linux_binprm;
1708diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1709index de53547..52b9a28 100644
1710--- a/arch/arm/include/asm/fncpy.h
1711+++ b/arch/arm/include/asm/fncpy.h
1712@@ -81,7 +81,9 @@
1713 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1714 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1715 \
1716+ pax_open_kernel(); \
1717 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1718+ pax_close_kernel(); \
1719 flush_icache_range((unsigned long)(dest_buf), \
1720 (unsigned long)(dest_buf) + (size)); \
1721 \
1722diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1723index 53e69da..3fdc896 100644
1724--- a/arch/arm/include/asm/futex.h
1725+++ b/arch/arm/include/asm/futex.h
1726@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1727 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1728 return -EFAULT;
1729
1730+ pax_open_userland();
1731+
1732 smp_mb();
1733 /* Prefetching cannot fault */
1734 prefetchw(uaddr);
1735@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1736 : "cc", "memory");
1737 smp_mb();
1738
1739+ pax_close_userland();
1740+
1741 *uval = val;
1742 return ret;
1743 }
1744@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1745 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1746 return -EFAULT;
1747
1748+ pax_open_userland();
1749+
1750 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1751 "1: " TUSER(ldr) " %1, [%4]\n"
1752 " teq %1, %2\n"
1753@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1754 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1755 : "cc", "memory");
1756
1757+ pax_close_userland();
1758+
1759 *uval = val;
1760 return ret;
1761 }
1762@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1763 return -EFAULT;
1764
1765 pagefault_disable(); /* implies preempt_disable() */
1766+ pax_open_userland();
1767
1768 switch (op) {
1769 case FUTEX_OP_SET:
1770@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1771 ret = -ENOSYS;
1772 }
1773
1774+ pax_close_userland();
1775 pagefault_enable(); /* subsumes preempt_enable() */
1776
1777 if (!ret) {
1778diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1779index 83eb2f7..ed77159 100644
1780--- a/arch/arm/include/asm/kmap_types.h
1781+++ b/arch/arm/include/asm/kmap_types.h
1782@@ -4,6 +4,6 @@
1783 /*
1784 * This is the "bare minimum". AIO seems to require this.
1785 */
1786-#define KM_TYPE_NR 16
1787+#define KM_TYPE_NR 17
1788
1789 #endif
1790diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1791index 9e614a1..3302cca 100644
1792--- a/arch/arm/include/asm/mach/dma.h
1793+++ b/arch/arm/include/asm/mach/dma.h
1794@@ -22,7 +22,7 @@ struct dma_ops {
1795 int (*residue)(unsigned int, dma_t *); /* optional */
1796 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1797 const char *type;
1798-};
1799+} __do_const;
1800
1801 struct dma_struct {
1802 void *addr; /* single DMA address */
1803diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1804index f98c7f3..e5c626d 100644
1805--- a/arch/arm/include/asm/mach/map.h
1806+++ b/arch/arm/include/asm/mach/map.h
1807@@ -23,17 +23,19 @@ struct map_desc {
1808
1809 /* types 0-3 are defined in asm/io.h */
1810 enum {
1811- MT_UNCACHED = 4,
1812- MT_CACHECLEAN,
1813- MT_MINICLEAN,
1814+ MT_UNCACHED_RW = 4,
1815+ MT_CACHECLEAN_RO,
1816+ MT_MINICLEAN_RO,
1817 MT_LOW_VECTORS,
1818 MT_HIGH_VECTORS,
1819- MT_MEMORY_RWX,
1820+ __MT_MEMORY_RWX,
1821 MT_MEMORY_RW,
1822- MT_ROM,
1823- MT_MEMORY_RWX_NONCACHED,
1824+ MT_MEMORY_RX,
1825+ MT_ROM_RX,
1826+ MT_MEMORY_RW_NONCACHED,
1827+ MT_MEMORY_RX_NONCACHED,
1828 MT_MEMORY_RW_DTCM,
1829- MT_MEMORY_RWX_ITCM,
1830+ MT_MEMORY_RX_ITCM,
1831 MT_MEMORY_RW_SO,
1832 MT_MEMORY_DMA_READY,
1833 };
1834diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1835index 891a56b..48f337e 100644
1836--- a/arch/arm/include/asm/outercache.h
1837+++ b/arch/arm/include/asm/outercache.h
1838@@ -36,7 +36,7 @@ struct outer_cache_fns {
1839
1840 /* This is an ARM L2C thing */
1841 void (*write_sec)(unsigned long, unsigned);
1842-};
1843+} __no_const;
1844
1845 extern struct outer_cache_fns outer_cache;
1846
1847diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1848index 4355f0e..cd9168e 100644
1849--- a/arch/arm/include/asm/page.h
1850+++ b/arch/arm/include/asm/page.h
1851@@ -23,6 +23,7 @@
1852
1853 #else
1854
1855+#include <linux/compiler.h>
1856 #include <asm/glue.h>
1857
1858 /*
1859@@ -114,7 +115,7 @@ struct cpu_user_fns {
1860 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1861 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1862 unsigned long vaddr, struct vm_area_struct *vma);
1863-};
1864+} __no_const;
1865
1866 #ifdef MULTI_USER
1867 extern struct cpu_user_fns cpu_user;
1868diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1869index 19cfab5..3f5c7e9 100644
1870--- a/arch/arm/include/asm/pgalloc.h
1871+++ b/arch/arm/include/asm/pgalloc.h
1872@@ -17,6 +17,7 @@
1873 #include <asm/processor.h>
1874 #include <asm/cacheflush.h>
1875 #include <asm/tlbflush.h>
1876+#include <asm/system_info.h>
1877
1878 #define check_pgt_cache() do { } while (0)
1879
1880@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1881 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1882 }
1883
1884+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1885+{
1886+ pud_populate(mm, pud, pmd);
1887+}
1888+
1889 #else /* !CONFIG_ARM_LPAE */
1890
1891 /*
1892@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1893 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1894 #define pmd_free(mm, pmd) do { } while (0)
1895 #define pud_populate(mm,pmd,pte) BUG()
1896+#define pud_populate_kernel(mm,pmd,pte) BUG()
1897
1898 #endif /* CONFIG_ARM_LPAE */
1899
1900@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1901 __free_page(pte);
1902 }
1903
1904+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1905+{
1906+#ifdef CONFIG_ARM_LPAE
1907+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1908+#else
1909+ if (addr & SECTION_SIZE)
1910+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1911+ else
1912+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1913+#endif
1914+ flush_pmd_entry(pmdp);
1915+}
1916+
1917 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1918 pmdval_t prot)
1919 {
1920diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1921index 5e68278..1869bae 100644
1922--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1923+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924@@ -27,7 +27,7 @@
1925 /*
1926 * - section
1927 */
1928-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1929+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1930 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1931 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1932 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1933@@ -39,6 +39,7 @@
1934 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1935 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1936 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1937+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1938
1939 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1940 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1941@@ -68,6 +69,7 @@
1942 * - extended small page/tiny page
1943 */
1944 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1945+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1946 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1947 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1948 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1949diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1950index f027941..f36ce30 100644
1951--- a/arch/arm/include/asm/pgtable-2level.h
1952+++ b/arch/arm/include/asm/pgtable-2level.h
1953@@ -126,6 +126,9 @@
1954 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1955 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1956
1957+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1958+#define L_PTE_PXN (_AT(pteval_t, 0))
1959+
1960 /*
1961 * These are the memory types, defined to be compatible with
1962 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1963diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1964index a31ecdad..95e98d4 100644
1965--- a/arch/arm/include/asm/pgtable-3level.h
1966+++ b/arch/arm/include/asm/pgtable-3level.h
1967@@ -81,6 +81,7 @@
1968 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1969 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1970 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1971+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1973 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1974 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1975@@ -92,10 +93,12 @@
1976 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1977 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1978 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1979+#define PMD_SECT_RDONLY PMD_SECT_AP2
1980
1981 /*
1982 * To be used in assembly code with the upper page attributes.
1983 */
1984+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1985 #define L_PTE_XN_HIGH (1 << (54 - 32))
1986 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1987
1988diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1989index d5cac54..906ea3e 100644
1990--- a/arch/arm/include/asm/pgtable.h
1991+++ b/arch/arm/include/asm/pgtable.h
1992@@ -33,6 +33,9 @@
1993 #include <asm/pgtable-2level.h>
1994 #endif
1995
1996+#define ktla_ktva(addr) (addr)
1997+#define ktva_ktla(addr) (addr)
1998+
1999 /*
2000 * Just any arbitrary offset to the start of the vmalloc VM area: the
2001 * current 8MB value just means that there will be a 8MB "hole" after the
2002@@ -48,6 +51,9 @@
2003 #define LIBRARY_TEXT_START 0x0c000000
2004
2005 #ifndef __ASSEMBLY__
2006+extern pteval_t __supported_pte_mask;
2007+extern pmdval_t __supported_pmd_mask;
2008+
2009 extern void __pte_error(const char *file, int line, pte_t);
2010 extern void __pmd_error(const char *file, int line, pmd_t);
2011 extern void __pgd_error(const char *file, int line, pgd_t);
2012@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2013 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2014 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2015
2016+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2017+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2018+
2019+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2020+#include <asm/domain.h>
2021+#include <linux/thread_info.h>
2022+#include <linux/preempt.h>
2023+
2024+static inline int test_domain(int domain, int domaintype)
2025+{
2026+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2027+}
2028+#endif
2029+
2030+#ifdef CONFIG_PAX_KERNEXEC
2031+static inline unsigned long pax_open_kernel(void) {
2032+#ifdef CONFIG_ARM_LPAE
2033+ /* TODO */
2034+#else
2035+ preempt_disable();
2036+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2037+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2038+#endif
2039+ return 0;
2040+}
2041+
2042+static inline unsigned long pax_close_kernel(void) {
2043+#ifdef CONFIG_ARM_LPAE
2044+ /* TODO */
2045+#else
2046+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2047+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2048+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2049+ preempt_enable_no_resched();
2050+#endif
2051+ return 0;
2052+}
2053+#else
2054+static inline unsigned long pax_open_kernel(void) { return 0; }
2055+static inline unsigned long pax_close_kernel(void) { return 0; }
2056+#endif
2057+
2058 /*
2059 * This is the lowest virtual address we can permit any user space
2060 * mapping to be mapped at. This is particularly important for
2061@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2062 /*
2063 * The pgprot_* and protection_map entries will be fixed up in runtime
2064 * to include the cachable and bufferable bits based on memory policy,
2065- * as well as any architecture dependent bits like global/ASID and SMP
2066- * shared mapping bits.
2067+ * as well as any architecture dependent bits like global/ASID, PXN,
2068+ * and SMP shared mapping bits.
2069 */
2070 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2071
2072@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2073 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2074 {
2075 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2076- L_PTE_NONE | L_PTE_VALID;
2077+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2078 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2079 return pte;
2080 }
2081diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2082index c25ef3e..735f14b 100644
2083--- a/arch/arm/include/asm/psci.h
2084+++ b/arch/arm/include/asm/psci.h
2085@@ -32,7 +32,7 @@ struct psci_operations {
2086 int (*affinity_info)(unsigned long target_affinity,
2087 unsigned long lowest_affinity_level);
2088 int (*migrate_info_type)(void);
2089-};
2090+} __no_const;
2091
2092 extern struct psci_operations psci_ops;
2093 extern struct smp_operations psci_smp_ops;
2094diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2095index 18f5a55..5072a40 100644
2096--- a/arch/arm/include/asm/smp.h
2097+++ b/arch/arm/include/asm/smp.h
2098@@ -107,7 +107,7 @@ struct smp_operations {
2099 int (*cpu_disable)(unsigned int cpu);
2100 #endif
2101 #endif
2102-};
2103+} __no_const;
2104
2105 struct of_cpu_method {
2106 const char *method;
2107diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2108index d890e41..3921292 100644
2109--- a/arch/arm/include/asm/thread_info.h
2110+++ b/arch/arm/include/asm/thread_info.h
2111@@ -78,9 +78,9 @@ struct thread_info {
2112 .flags = 0, \
2113 .preempt_count = INIT_PREEMPT_COUNT, \
2114 .addr_limit = KERNEL_DS, \
2115- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2116- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2117- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2118+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2119+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2120+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2121 .restart_block = { \
2122 .fn = do_no_restart_syscall, \
2123 }, \
2124@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2125 #define TIF_SYSCALL_AUDIT 9
2126 #define TIF_SYSCALL_TRACEPOINT 10
2127 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2128-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2129+/* within 8 bits of TIF_SYSCALL_TRACE
2130+ * to meet flexible second operand requirements
2131+ */
2132+#define TIF_GRSEC_SETXID 12
2133+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2134 #define TIF_USING_IWMMXT 17
2135 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2136 #define TIF_RESTORE_SIGMASK 20
2137@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2138 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2139 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2140 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2141+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2142
2143 /* Checks for any syscall work in entry-common.S */
2144 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2145- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2146+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2147
2148 /*
2149 * Change these and you break ASM code in entry-common.S
2150diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2151index 5f833f7..76e6644 100644
2152--- a/arch/arm/include/asm/tls.h
2153+++ b/arch/arm/include/asm/tls.h
2154@@ -3,6 +3,7 @@
2155
2156 #include <linux/compiler.h>
2157 #include <asm/thread_info.h>
2158+#include <asm/pgtable.h>
2159
2160 #ifdef __ASSEMBLY__
2161 #include <asm/asm-offsets.h>
2162@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2163 * at 0xffff0fe0 must be used instead. (see
2164 * entry-armv.S for details)
2165 */
2166+ pax_open_kernel();
2167 *((unsigned int *)0xffff0ff0) = val;
2168+ pax_close_kernel();
2169 #endif
2170 }
2171
2172diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2173index 4767eb9..bf00668 100644
2174--- a/arch/arm/include/asm/uaccess.h
2175+++ b/arch/arm/include/asm/uaccess.h
2176@@ -18,6 +18,7 @@
2177 #include <asm/domain.h>
2178 #include <asm/unified.h>
2179 #include <asm/compiler.h>
2180+#include <asm/pgtable.h>
2181
2182 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2183 #include <asm-generic/uaccess-unaligned.h>
2184@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2185 static inline void set_fs(mm_segment_t fs)
2186 {
2187 current_thread_info()->addr_limit = fs;
2188- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2189+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2190 }
2191
2192 #define segment_eq(a,b) ((a) == (b))
2193
2194+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2195+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2196+
2197+static inline void pax_open_userland(void)
2198+{
2199+
2200+#ifdef CONFIG_PAX_MEMORY_UDEREF
2201+ if (segment_eq(get_fs(), USER_DS)) {
2202+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2203+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2204+ }
2205+#endif
2206+
2207+}
2208+
2209+static inline void pax_close_userland(void)
2210+{
2211+
2212+#ifdef CONFIG_PAX_MEMORY_UDEREF
2213+ if (segment_eq(get_fs(), USER_DS)) {
2214+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2215+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2216+ }
2217+#endif
2218+
2219+}
2220+
2221 #define __addr_ok(addr) ({ \
2222 unsigned long flag; \
2223 __asm__("cmp %2, %0; movlo %0, #0" \
2224@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2225
2226 #define get_user(x,p) \
2227 ({ \
2228+ int __e; \
2229 might_fault(); \
2230- __get_user_check(x,p); \
2231+ pax_open_userland(); \
2232+ __e = __get_user_check(x,p); \
2233+ pax_close_userland(); \
2234+ __e; \
2235 })
2236
2237 extern int __put_user_1(void *, unsigned int);
2238@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2239
2240 #define put_user(x,p) \
2241 ({ \
2242+ int __e; \
2243 might_fault(); \
2244- __put_user_check(x,p); \
2245+ pax_open_userland(); \
2246+ __e = __put_user_check(x,p); \
2247+ pax_close_userland(); \
2248+ __e; \
2249 })
2250
2251 #else /* CONFIG_MMU */
2252@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2253
2254 #endif /* CONFIG_MMU */
2255
2256+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2257 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2258
2259 #define user_addr_max() \
2260@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2261 #define __get_user(x,ptr) \
2262 ({ \
2263 long __gu_err = 0; \
2264+ pax_open_userland(); \
2265 __get_user_err((x),(ptr),__gu_err); \
2266+ pax_close_userland(); \
2267 __gu_err; \
2268 })
2269
2270 #define __get_user_error(x,ptr,err) \
2271 ({ \
2272+ pax_open_userland(); \
2273 __get_user_err((x),(ptr),err); \
2274+ pax_close_userland(); \
2275 (void) 0; \
2276 })
2277
2278@@ -368,13 +409,17 @@ do { \
2279 #define __put_user(x,ptr) \
2280 ({ \
2281 long __pu_err = 0; \
2282+ pax_open_userland(); \
2283 __put_user_err((x),(ptr),__pu_err); \
2284+ pax_close_userland(); \
2285 __pu_err; \
2286 })
2287
2288 #define __put_user_error(x,ptr,err) \
2289 ({ \
2290+ pax_open_userland(); \
2291 __put_user_err((x),(ptr),err); \
2292+ pax_close_userland(); \
2293 (void) 0; \
2294 })
2295
2296@@ -474,11 +519,44 @@ do { \
2297
2298
2299 #ifdef CONFIG_MMU
2300-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2301-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2302+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2303+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2304+
2305+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2306+{
2307+ unsigned long ret;
2308+
2309+ check_object_size(to, n, false);
2310+ pax_open_userland();
2311+ ret = ___copy_from_user(to, from, n);
2312+ pax_close_userland();
2313+ return ret;
2314+}
2315+
2316+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2317+{
2318+ unsigned long ret;
2319+
2320+ check_object_size(from, n, true);
2321+ pax_open_userland();
2322+ ret = ___copy_to_user(to, from, n);
2323+ pax_close_userland();
2324+ return ret;
2325+}
2326+
2327 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2328-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2329+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2330 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2331+
2332+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2333+{
2334+ unsigned long ret;
2335+ pax_open_userland();
2336+ ret = ___clear_user(addr, n);
2337+ pax_close_userland();
2338+ return ret;
2339+}
2340+
2341 #else
2342 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2343 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2344@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2345
2346 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2347 {
2348+ if ((long)n < 0)
2349+ return n;
2350+
2351 if (access_ok(VERIFY_READ, from, n))
2352 n = __copy_from_user(to, from, n);
2353 else /* security hole - plug it */
2354@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2355
2356 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2357 {
2358+ if ((long)n < 0)
2359+ return n;
2360+
2361 if (access_ok(VERIFY_WRITE, to, n))
2362 n = __copy_to_user(to, from, n);
2363 return n;
2364diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2365index 5af0ed1..cea83883 100644
2366--- a/arch/arm/include/uapi/asm/ptrace.h
2367+++ b/arch/arm/include/uapi/asm/ptrace.h
2368@@ -92,7 +92,7 @@
2369 * ARMv7 groups of PSR bits
2370 */
2371 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2372-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2373+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2374 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2375 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2376
2377diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2378index a88671c..1cc895e 100644
2379--- a/arch/arm/kernel/armksyms.c
2380+++ b/arch/arm/kernel/armksyms.c
2381@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2382
2383 /* networking */
2384 EXPORT_SYMBOL(csum_partial);
2385-EXPORT_SYMBOL(csum_partial_copy_from_user);
2386+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2387 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2388 EXPORT_SYMBOL(__csum_ipv6_magic);
2389
2390@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2391 #ifdef CONFIG_MMU
2392 EXPORT_SYMBOL(copy_page);
2393
2394-EXPORT_SYMBOL(__copy_from_user);
2395-EXPORT_SYMBOL(__copy_to_user);
2396-EXPORT_SYMBOL(__clear_user);
2397+EXPORT_SYMBOL(___copy_from_user);
2398+EXPORT_SYMBOL(___copy_to_user);
2399+EXPORT_SYMBOL(___clear_user);
2400
2401 EXPORT_SYMBOL(__get_user_1);
2402 EXPORT_SYMBOL(__get_user_2);
2403diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2404index 2f5555d..d493c91 100644
2405--- a/arch/arm/kernel/entry-armv.S
2406+++ b/arch/arm/kernel/entry-armv.S
2407@@ -47,6 +47,87 @@
2408 9997:
2409 .endm
2410
2411+ .macro pax_enter_kernel
2412+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2413+ @ make aligned space for saved DACR
2414+ sub sp, sp, #8
2415+ @ save regs
2416+ stmdb sp!, {r1, r2}
2417+ @ read DACR from cpu_domain into r1
2418+ mov r2, sp
2419+ @ assume 8K pages, since we have to split the immediate in two
2420+ bic r2, r2, #(0x1fc0)
2421+ bic r2, r2, #(0x3f)
2422+ ldr r1, [r2, #TI_CPU_DOMAIN]
2423+ @ store old DACR on stack
2424+ str r1, [sp, #8]
2425+#ifdef CONFIG_PAX_KERNEXEC
2426+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2427+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2428+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2429+#endif
2430+#ifdef CONFIG_PAX_MEMORY_UDEREF
2431+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2432+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2433+#endif
2434+ @ write r1 to current_thread_info()->cpu_domain
2435+ str r1, [r2, #TI_CPU_DOMAIN]
2436+ @ write r1 to DACR
2437+ mcr p15, 0, r1, c3, c0, 0
2438+ @ instruction sync
2439+ instr_sync
2440+ @ restore regs
2441+ ldmia sp!, {r1, r2}
2442+#endif
2443+ .endm
2444+
2445+ .macro pax_open_userland
2446+#ifdef CONFIG_PAX_MEMORY_UDEREF
2447+ @ save regs
2448+ stmdb sp!, {r0, r1}
2449+ @ read DACR from cpu_domain into r1
2450+ mov r0, sp
2451+ @ assume 8K pages, since we have to split the immediate in two
2452+ bic r0, r0, #(0x1fc0)
2453+ bic r0, r0, #(0x3f)
2454+ ldr r1, [r0, #TI_CPU_DOMAIN]
2455+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2456+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2457+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2458+ @ write r1 to current_thread_info()->cpu_domain
2459+ str r1, [r0, #TI_CPU_DOMAIN]
2460+ @ write r1 to DACR
2461+ mcr p15, 0, r1, c3, c0, 0
2462+ @ instruction sync
2463+ instr_sync
2464+ @ restore regs
2465+ ldmia sp!, {r0, r1}
2466+#endif
2467+ .endm
2468+
2469+ .macro pax_close_userland
2470+#ifdef CONFIG_PAX_MEMORY_UDEREF
2471+ @ save regs
2472+ stmdb sp!, {r0, r1}
2473+ @ read DACR from cpu_domain into r1
2474+ mov r0, sp
2475+ @ assume 8K pages, since we have to split the immediate in two
2476+ bic r0, r0, #(0x1fc0)
2477+ bic r0, r0, #(0x3f)
2478+ ldr r1, [r0, #TI_CPU_DOMAIN]
2479+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2480+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2481+ @ write r1 to current_thread_info()->cpu_domain
2482+ str r1, [r0, #TI_CPU_DOMAIN]
2483+ @ write r1 to DACR
2484+ mcr p15, 0, r1, c3, c0, 0
2485+ @ instruction sync
2486+ instr_sync
2487+ @ restore regs
2488+ ldmia sp!, {r0, r1}
2489+#endif
2490+ .endm
2491+
2492 .macro pabt_helper
2493 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2494 #ifdef MULTI_PABORT
2495@@ -89,11 +170,15 @@
2496 * Invalid mode handlers
2497 */
2498 .macro inv_entry, reason
2499+
2500+ pax_enter_kernel
2501+
2502 sub sp, sp, #S_FRAME_SIZE
2503 ARM( stmib sp, {r1 - lr} )
2504 THUMB( stmia sp, {r0 - r12} )
2505 THUMB( str sp, [sp, #S_SP] )
2506 THUMB( str lr, [sp, #S_LR] )
2507+
2508 mov r1, #\reason
2509 .endm
2510
2511@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2512 .macro svc_entry, stack_hole=0, trace=1
2513 UNWIND(.fnstart )
2514 UNWIND(.save {r0 - pc} )
2515+
2516+ pax_enter_kernel
2517+
2518 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2519+
2520 #ifdef CONFIG_THUMB2_KERNEL
2521 SPFIX( str r0, [sp] ) @ temporarily saved
2522 SPFIX( mov r0, sp )
2523@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2524 ldmia r0, {r3 - r5}
2525 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2526 mov r6, #-1 @ "" "" "" ""
2527+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2528+ @ offset sp by 8 as done in pax_enter_kernel
2529+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2530+#else
2531 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2532+#endif
2533 SPFIX( addeq r2, r2, #4 )
2534 str r3, [sp, #-4]! @ save the "real" r0 copied
2535 @ from the exception stack
2536@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
2537 .macro usr_entry, trace=1
2538 UNWIND(.fnstart )
2539 UNWIND(.cantunwind ) @ don't unwind the user space
2540+
2541+ pax_enter_kernel_user
2542+
2543 sub sp, sp, #S_FRAME_SIZE
2544 ARM( stmib sp, {r1 - r12} )
2545 THUMB( stmia sp, {r0 - r12} )
2546@@ -478,7 +575,9 @@ __und_usr:
2547 tst r3, #PSR_T_BIT @ Thumb mode?
2548 bne __und_usr_thumb
2549 sub r4, r2, #4 @ ARM instr at LR - 4
2550+ pax_open_userland
2551 1: ldrt r0, [r4]
2552+ pax_close_userland
2553 ARM_BE8(rev r0, r0) @ little endian instruction
2554
2555 @ r0 = 32-bit ARM instruction which caused the exception
2556@@ -512,11 +611,15 @@ __und_usr_thumb:
2557 */
2558 .arch armv6t2
2559 #endif
2560+ pax_open_userland
2561 2: ldrht r5, [r4]
2562+ pax_close_userland
2563 ARM_BE8(rev16 r5, r5) @ little endian instruction
2564 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2565 blo __und_usr_fault_16 @ 16bit undefined instruction
2566+ pax_open_userland
2567 3: ldrht r0, [r2]
2568+ pax_close_userland
2569 ARM_BE8(rev16 r0, r0) @ little endian instruction
2570 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2571 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2572@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
2573 */
2574 .pushsection .fixup, "ax"
2575 .align 2
2576-4: str r4, [sp, #S_PC] @ retry current instruction
2577+4: pax_close_userland
2578+ str r4, [sp, #S_PC] @ retry current instruction
2579 ret r9
2580 .popsection
2581 .pushsection __ex_table,"a"
2582@@ -766,7 +870,7 @@ ENTRY(__switch_to)
2583 THUMB( str lr, [ip], #4 )
2584 ldr r4, [r2, #TI_TP_VALUE]
2585 ldr r5, [r2, #TI_TP_VALUE + 4]
2586-#ifdef CONFIG_CPU_USE_DOMAINS
2587+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2588 ldr r6, [r2, #TI_CPU_DOMAIN]
2589 #endif
2590 switch_tls r1, r4, r5, r3, r7
2591@@ -775,7 +879,7 @@ ENTRY(__switch_to)
2592 ldr r8, =__stack_chk_guard
2593 ldr r7, [r7, #TSK_STACK_CANARY]
2594 #endif
2595-#ifdef CONFIG_CPU_USE_DOMAINS
2596+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2598 #endif
2599 mov r5, r0
2600diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2601index f8ccc21..83d192f 100644
2602--- a/arch/arm/kernel/entry-common.S
2603+++ b/arch/arm/kernel/entry-common.S
2604@@ -11,18 +11,46 @@
2605 #include <asm/assembler.h>
2606 #include <asm/unistd.h>
2607 #include <asm/ftrace.h>
2608+#include <asm/domain.h>
2609 #include <asm/unwind.h>
2610
2611+#include "entry-header.S"
2612+
2613 #ifdef CONFIG_NEED_RET_TO_USER
2614 #include <mach/entry-macro.S>
2615 #else
2616 .macro arch_ret_to_user, tmp1, tmp2
2617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2618+ @ save regs
2619+ stmdb sp!, {r1, r2}
2620+ @ read DACR from cpu_domain into r1
2621+ mov r2, sp
2622+ @ assume 8K pages, since we have to split the immediate in two
2623+ bic r2, r2, #(0x1fc0)
2624+ bic r2, r2, #(0x3f)
2625+ ldr r1, [r2, #TI_CPU_DOMAIN]
2626+#ifdef CONFIG_PAX_KERNEXEC
2627+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2628+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2629+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2630+#endif
2631+#ifdef CONFIG_PAX_MEMORY_UDEREF
2632+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2633+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2635+#endif
2636+ @ write r1 to current_thread_info()->cpu_domain
2637+ str r1, [r2, #TI_CPU_DOMAIN]
2638+ @ write r1 to DACR
2639+ mcr p15, 0, r1, c3, c0, 0
2640+ @ instruction sync
2641+ instr_sync
2642+ @ restore regs
2643+ ldmia sp!, {r1, r2}
2644+#endif
2645 .endm
2646 #endif
2647
2648-#include "entry-header.S"
2649-
2650-
2651 .align 5
2652 /*
2653 * This is the fast syscall return path. We do as little as
2654@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2655 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2656 #endif
2657
2658+ /*
2659+ * do this here to avoid a performance hit of wrapping the code above
2660+ * that directly dereferences userland to parse the SWI instruction
2661+ */
2662+ pax_enter_kernel_user
2663+
2664 adr tbl, sys_call_table @ load syscall table pointer
2665
2666 #if defined(CONFIG_OABI_COMPAT)
2667diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2668index 1a0045a..9b4f34d 100644
2669--- a/arch/arm/kernel/entry-header.S
2670+++ b/arch/arm/kernel/entry-header.S
2671@@ -196,6 +196,60 @@
2672 msr cpsr_c, \rtemp @ switch back to the SVC mode
2673 .endm
2674
2675+ .macro pax_enter_kernel_user
2676+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2677+ @ save regs
2678+ stmdb sp!, {r0, r1}
2679+ @ read DACR from cpu_domain into r1
2680+ mov r0, sp
2681+ @ assume 8K pages, since we have to split the immediate in two
2682+ bic r0, r0, #(0x1fc0)
2683+ bic r0, r0, #(0x3f)
2684+ ldr r1, [r0, #TI_CPU_DOMAIN]
2685+#ifdef CONFIG_PAX_MEMORY_UDEREF
2686+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2687+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2688+#endif
2689+#ifdef CONFIG_PAX_KERNEXEC
2690+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2691+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2692+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2693+#endif
2694+ @ write r1 to current_thread_info()->cpu_domain
2695+ str r1, [r0, #TI_CPU_DOMAIN]
2696+ @ write r1 to DACR
2697+ mcr p15, 0, r1, c3, c0, 0
2698+ @ instruction sync
2699+ instr_sync
2700+ @ restore regs
2701+ ldmia sp!, {r0, r1}
2702+#endif
2703+ .endm
2704+
2705+ .macro pax_exit_kernel
2706+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2707+ @ save regs
2708+ stmdb sp!, {r0, r1}
2709+ @ read old DACR from stack into r1
2710+ ldr r1, [sp, #(8 + S_SP)]
2711+ sub r1, r1, #8
2712+ ldr r1, [r1]
2713+
2714+ @ write r1 to current_thread_info()->cpu_domain
2715+ mov r0, sp
2716+ @ assume 8K pages, since we have to split the immediate in two
2717+ bic r0, r0, #(0x1fc0)
2718+ bic r0, r0, #(0x3f)
2719+ str r1, [r0, #TI_CPU_DOMAIN]
2720+ @ write r1 to DACR
2721+ mcr p15, 0, r1, c3, c0, 0
2722+ @ instruction sync
2723+ instr_sync
2724+ @ restore regs
2725+ ldmia sp!, {r0, r1}
2726+#endif
2727+ .endm
2728+
2729 #ifndef CONFIG_THUMB2_KERNEL
2730 .macro svc_exit, rpsr, irq = 0
2731 .if \irq != 0
2732@@ -215,6 +269,9 @@
2733 blne trace_hardirqs_off
2734 #endif
2735 .endif
2736+
2737+ pax_exit_kernel
2738+
2739 msr spsr_cxsf, \rpsr
2740 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2741 @ We must avoid clrex due to Cortex-A15 erratum #830321
2742@@ -291,6 +348,9 @@
2743 blne trace_hardirqs_off
2744 #endif
2745 .endif
2746+
2747+ pax_exit_kernel
2748+
2749 ldr lr, [sp, #S_SP] @ top of the stack
2750 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2751
2752diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2753index 059c3da..8e45cfc 100644
2754--- a/arch/arm/kernel/fiq.c
2755+++ b/arch/arm/kernel/fiq.c
2756@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2757 void *base = vectors_page;
2758 unsigned offset = FIQ_OFFSET;
2759
2760+ pax_open_kernel();
2761 memcpy(base + offset, start, length);
2762+ pax_close_kernel();
2763+
2764 if (!cache_is_vipt_nonaliasing())
2765 flush_icache_range((unsigned long)base + offset, offset +
2766 length);
2767diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2768index 664eee8..f470938 100644
2769--- a/arch/arm/kernel/head.S
2770+++ b/arch/arm/kernel/head.S
2771@@ -437,7 +437,7 @@ __enable_mmu:
2772 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2773 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2774 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2775- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2776+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2777 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2778 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2779 #endif
2780diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2781index bea7db9..a210d10 100644
2782--- a/arch/arm/kernel/module.c
2783+++ b/arch/arm/kernel/module.c
2784@@ -38,12 +38,39 @@
2785 #endif
2786
2787 #ifdef CONFIG_MMU
2788-void *module_alloc(unsigned long size)
2789+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2790 {
2791+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2792+ return NULL;
2793 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2794- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2795+ GFP_KERNEL, prot, NUMA_NO_NODE,
2796 __builtin_return_address(0));
2797 }
2798+
2799+void *module_alloc(unsigned long size)
2800+{
2801+
2802+#ifdef CONFIG_PAX_KERNEXEC
2803+ return __module_alloc(size, PAGE_KERNEL);
2804+#else
2805+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2806+#endif
2807+
2808+}
2809+
2810+#ifdef CONFIG_PAX_KERNEXEC
2811+void module_memfree_exec(void *module_region)
2812+{
2813+ module_memfree(module_region);
2814+}
2815+EXPORT_SYMBOL(module_memfree_exec);
2816+
2817+void *module_alloc_exec(unsigned long size)
2818+{
2819+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2820+}
2821+EXPORT_SYMBOL(module_alloc_exec);
2822+#endif
2823 #endif
2824
2825 int
2826diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2827index 5038960..4aa71d8 100644
2828--- a/arch/arm/kernel/patch.c
2829+++ b/arch/arm/kernel/patch.c
2830@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2831 else
2832 __acquire(&patch_lock);
2833
2834+ pax_open_kernel();
2835 if (thumb2 && __opcode_is_thumb16(insn)) {
2836 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2837 size = sizeof(u16);
2838@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2839 *(u32 *)waddr = insn;
2840 size = sizeof(u32);
2841 }
2842+ pax_close_kernel();
2843
2844 if (waddr != addr) {
2845 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2846diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2847index fdfa3a7..5d208b8 100644
2848--- a/arch/arm/kernel/process.c
2849+++ b/arch/arm/kernel/process.c
2850@@ -207,6 +207,7 @@ void machine_power_off(void)
2851
2852 if (pm_power_off)
2853 pm_power_off();
2854+ BUG();
2855 }
2856
2857 /*
2858@@ -220,7 +221,7 @@ void machine_power_off(void)
2859 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2860 * to use. Implementing such co-ordination would be essentially impossible.
2861 */
2862-void machine_restart(char *cmd)
2863+__noreturn void machine_restart(char *cmd)
2864 {
2865 local_irq_disable();
2866 smp_send_stop();
2867@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2868
2869 show_regs_print_info(KERN_DEFAULT);
2870
2871- print_symbol("PC is at %s\n", instruction_pointer(regs));
2872- print_symbol("LR is at %s\n", regs->ARM_lr);
2873+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2874+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2875 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2876 "sp : %08lx ip : %08lx fp : %08lx\n",
2877 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2878@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2879 return 0;
2880 }
2881
2882-unsigned long arch_randomize_brk(struct mm_struct *mm)
2883-{
2884- unsigned long range_end = mm->brk + 0x02000000;
2885- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2886-}
2887-
2888 #ifdef CONFIG_MMU
2889 #ifdef CONFIG_KUSER_HELPERS
2890 /*
2891@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2892
2893 static int __init gate_vma_init(void)
2894 {
2895- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2896+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2897 return 0;
2898 }
2899 arch_initcall(gate_vma_init);
2900@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2901 return is_gate_vma(vma) ? "[vectors]" : NULL;
2902 }
2903
2904-/* If possible, provide a placement hint at a random offset from the
2905- * stack for the signal page.
2906- */
2907-static unsigned long sigpage_addr(const struct mm_struct *mm,
2908- unsigned int npages)
2909-{
2910- unsigned long offset;
2911- unsigned long first;
2912- unsigned long last;
2913- unsigned long addr;
2914- unsigned int slots;
2915-
2916- first = PAGE_ALIGN(mm->start_stack);
2917-
2918- last = TASK_SIZE - (npages << PAGE_SHIFT);
2919-
2920- /* No room after stack? */
2921- if (first > last)
2922- return 0;
2923-
2924- /* Just enough room? */
2925- if (first == last)
2926- return first;
2927-
2928- slots = ((last - first) >> PAGE_SHIFT) + 1;
2929-
2930- offset = get_random_int() % slots;
2931-
2932- addr = first + (offset << PAGE_SHIFT);
2933-
2934- return addr;
2935-}
2936-
2937-static struct page *signal_page;
2938-extern struct page *get_signal_page(void);
2939-
2940-static const struct vm_special_mapping sigpage_mapping = {
2941- .name = "[sigpage]",
2942- .pages = &signal_page,
2943-};
2944-
2945 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2946 {
2947 struct mm_struct *mm = current->mm;
2948- struct vm_area_struct *vma;
2949- unsigned long addr;
2950- unsigned long hint;
2951- int ret = 0;
2952-
2953- if (!signal_page)
2954- signal_page = get_signal_page();
2955- if (!signal_page)
2956- return -ENOMEM;
2957
2958 down_write(&mm->mmap_sem);
2959- hint = sigpage_addr(mm, 1);
2960- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2961- if (IS_ERR_VALUE(addr)) {
2962- ret = addr;
2963- goto up_fail;
2964- }
2965-
2966- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2967- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2968- &sigpage_mapping);
2969-
2970- if (IS_ERR(vma)) {
2971- ret = PTR_ERR(vma);
2972- goto up_fail;
2973- }
2974-
2975- mm->context.sigpage = addr;
2976-
2977- up_fail:
2978+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2979 up_write(&mm->mmap_sem);
2980- return ret;
2981+ return 0;
2982 }
2983 #endif
2984diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2985index f73891b..cf3004e 100644
2986--- a/arch/arm/kernel/psci.c
2987+++ b/arch/arm/kernel/psci.c
2988@@ -28,7 +28,7 @@
2989 #include <asm/psci.h>
2990 #include <asm/system_misc.h>
2991
2992-struct psci_operations psci_ops;
2993+struct psci_operations psci_ops __read_only;
2994
2995 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2996 typedef int (*psci_initcall_t)(const struct device_node *);
2997diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2998index ef9119f..31995a3 100644
2999--- a/arch/arm/kernel/ptrace.c
3000+++ b/arch/arm/kernel/ptrace.c
3001@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3002 regs->ARM_ip = ip;
3003 }
3004
3005+#ifdef CONFIG_GRKERNSEC_SETXID
3006+extern void gr_delayed_cred_worker(void);
3007+#endif
3008+
3009 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3010 {
3011 current_thread_info()->syscall = scno;
3012
3013+#ifdef CONFIG_GRKERNSEC_SETXID
3014+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3015+ gr_delayed_cred_worker();
3016+#endif
3017+
3018 /* Do the secure computing check first; failures should be fast. */
3019 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3020 if (secure_computing() == -1)
3021diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3022index e55408e..14d9998 100644
3023--- a/arch/arm/kernel/setup.c
3024+++ b/arch/arm/kernel/setup.c
3025@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3026 unsigned int elf_hwcap2 __read_mostly;
3027 EXPORT_SYMBOL(elf_hwcap2);
3028
3029+pteval_t __supported_pte_mask __read_only;
3030+pmdval_t __supported_pmd_mask __read_only;
3031
3032 #ifdef MULTI_CPU
3033-struct processor processor __read_mostly;
3034+struct processor processor __read_only;
3035 #endif
3036 #ifdef MULTI_TLB
3037-struct cpu_tlb_fns cpu_tlb __read_mostly;
3038+struct cpu_tlb_fns cpu_tlb __read_only;
3039 #endif
3040 #ifdef MULTI_USER
3041-struct cpu_user_fns cpu_user __read_mostly;
3042+struct cpu_user_fns cpu_user __read_only;
3043 #endif
3044 #ifdef MULTI_CACHE
3045-struct cpu_cache_fns cpu_cache __read_mostly;
3046+struct cpu_cache_fns cpu_cache __read_only;
3047 #endif
3048 #ifdef CONFIG_OUTER_CACHE
3049-struct outer_cache_fns outer_cache __read_mostly;
3050+struct outer_cache_fns outer_cache __read_only;
3051 EXPORT_SYMBOL(outer_cache);
3052 #endif
3053
3054@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3055 asm("mrc p15, 0, %0, c0, c1, 4"
3056 : "=r" (mmfr0));
3057 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3058- (mmfr0 & 0x000000f0) >= 0x00000030)
3059+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3060 cpu_arch = CPU_ARCH_ARMv7;
3061- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3062+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3063+ __supported_pte_mask |= L_PTE_PXN;
3064+ __supported_pmd_mask |= PMD_PXNTABLE;
3065+ }
3066+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3067 (mmfr0 & 0x000000f0) == 0x00000020)
3068 cpu_arch = CPU_ARCH_ARMv6;
3069 else
3070diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3071index 8aa6f1b..0899e08 100644
3072--- a/arch/arm/kernel/signal.c
3073+++ b/arch/arm/kernel/signal.c
3074@@ -24,8 +24,6 @@
3075
3076 extern const unsigned long sigreturn_codes[7];
3077
3078-static unsigned long signal_return_offset;
3079-
3080 #ifdef CONFIG_CRUNCH
3081 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3082 {
3083@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3084 * except when the MPU has protected the vectors
3085 * page from PL0
3086 */
3087- retcode = mm->context.sigpage + signal_return_offset +
3088- (idx << 2) + thumb;
3089+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3090 } else
3091 #endif
3092 {
3093@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3094 } while (thread_flags & _TIF_WORK_MASK);
3095 return 0;
3096 }
3097-
3098-struct page *get_signal_page(void)
3099-{
3100- unsigned long ptr;
3101- unsigned offset;
3102- struct page *page;
3103- void *addr;
3104-
3105- page = alloc_pages(GFP_KERNEL, 0);
3106-
3107- if (!page)
3108- return NULL;
3109-
3110- addr = page_address(page);
3111-
3112- /* Give the signal return code some randomness */
3113- offset = 0x200 + (get_random_int() & 0x7fc);
3114- signal_return_offset = offset;
3115-
3116- /*
3117- * Copy signal return handlers into the vector page, and
3118- * set sigreturn to be a pointer to these.
3119- */
3120- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3121-
3122- ptr = (unsigned long)addr + offset;
3123- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3124-
3125- return page;
3126-}
3127diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3128index 86ef244..c518451 100644
3129--- a/arch/arm/kernel/smp.c
3130+++ b/arch/arm/kernel/smp.c
3131@@ -76,7 +76,7 @@ enum ipi_msg_type {
3132
3133 static DECLARE_COMPLETION(cpu_running);
3134
3135-static struct smp_operations smp_ops;
3136+static struct smp_operations smp_ops __read_only;
3137
3138 void __init smp_set_ops(struct smp_operations *ops)
3139 {
3140diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3141index 7a3be1d..b00c7de 100644
3142--- a/arch/arm/kernel/tcm.c
3143+++ b/arch/arm/kernel/tcm.c
3144@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3145 .virtual = ITCM_OFFSET,
3146 .pfn = __phys_to_pfn(ITCM_OFFSET),
3147 .length = 0,
3148- .type = MT_MEMORY_RWX_ITCM,
3149+ .type = MT_MEMORY_RX_ITCM,
3150 }
3151 };
3152
3153@@ -267,7 +267,9 @@ no_dtcm:
3154 start = &__sitcm_text;
3155 end = &__eitcm_text;
3156 ram = &__itcm_start;
3157+ pax_open_kernel();
3158 memcpy(start, ram, itcm_code_sz);
3159+ pax_close_kernel();
3160 pr_debug("CPU ITCM: copied code from %p - %p\n",
3161 start, end);
3162 itcm_present = true;
3163diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3164index 788e23f..6fa06a1 100644
3165--- a/arch/arm/kernel/traps.c
3166+++ b/arch/arm/kernel/traps.c
3167@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3168 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3169 {
3170 #ifdef CONFIG_KALLSYMS
3171- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3172+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3173 #else
3174 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3175 #endif
3176@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3177 static int die_owner = -1;
3178 static unsigned int die_nest_count;
3179
3180+extern void gr_handle_kernel_exploit(void);
3181+
3182 static unsigned long oops_begin(void)
3183 {
3184 int cpu;
3185@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3186 panic("Fatal exception in interrupt");
3187 if (panic_on_oops)
3188 panic("Fatal exception");
3189+
3190+ gr_handle_kernel_exploit();
3191+
3192 if (signr)
3193 do_exit(signr);
3194 }
3195@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3196 kuser_init(vectors_base);
3197
3198 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3199- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3200+
3201+#ifndef CONFIG_PAX_MEMORY_UDEREF
3202+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3203+#endif
3204+
3205 #else /* ifndef CONFIG_CPU_V7M */
3206 /*
3207 * on V7-M there is no need to copy the vector table to a dedicated
3208diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3209index b31aa73..cc4b7a1 100644
3210--- a/arch/arm/kernel/vmlinux.lds.S
3211+++ b/arch/arm/kernel/vmlinux.lds.S
3212@@ -37,7 +37,7 @@
3213 #endif
3214
3215 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3216- defined(CONFIG_GENERIC_BUG)
3217+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3218 #define ARM_EXIT_KEEP(x) x
3219 #define ARM_EXIT_DISCARD(x)
3220 #else
3221@@ -123,6 +123,8 @@ SECTIONS
3222 #ifdef CONFIG_DEBUG_RODATA
3223 . = ALIGN(1<<SECTION_SHIFT);
3224 #endif
3225+ _etext = .; /* End of text section */
3226+
3227 RO_DATA(PAGE_SIZE)
3228
3229 . = ALIGN(4);
3230@@ -153,8 +155,6 @@ SECTIONS
3231
3232 NOTES
3233
3234- _etext = .; /* End of text and rodata section */
3235-
3236 #ifndef CONFIG_XIP_KERNEL
3237 # ifdef CONFIG_ARM_KERNMEM_PERMS
3238 . = ALIGN(1<<SECTION_SHIFT);
3239diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3240index 0b0d58a..988cb45 100644
3241--- a/arch/arm/kvm/arm.c
3242+++ b/arch/arm/kvm/arm.c
3243@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3244 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3245
3246 /* The VMID used in the VTTBR */
3247-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3248+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3249 static u8 kvm_next_vmid;
3250 static DEFINE_SPINLOCK(kvm_vmid_lock);
3251
3252@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
3253 */
3254 static bool need_new_vmid_gen(struct kvm *kvm)
3255 {
3256- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3257+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3258 }
3259
3260 /**
3261@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
3262
3263 /* First user of a new VMID generation? */
3264 if (unlikely(kvm_next_vmid == 0)) {
3265- atomic64_inc(&kvm_vmid_gen);
3266+ atomic64_inc_unchecked(&kvm_vmid_gen);
3267 kvm_next_vmid = 1;
3268
3269 /*
3270@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
3271 kvm_call_hyp(__kvm_flush_vm_context);
3272 }
3273
3274- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3275+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3276 kvm->arch.vmid = kvm_next_vmid;
3277 kvm_next_vmid++;
3278
3279@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
3280 /**
3281 * Initialize Hyp-mode and memory mappings on all CPUs.
3282 */
3283-int kvm_arch_init(void *opaque)
3284+int kvm_arch_init(const void *opaque)
3285 {
3286 int err;
3287 int ret, cpu;
3288diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3289index 14a0d98..7771a7d 100644
3290--- a/arch/arm/lib/clear_user.S
3291+++ b/arch/arm/lib/clear_user.S
3292@@ -12,14 +12,14 @@
3293
3294 .text
3295
3296-/* Prototype: int __clear_user(void *addr, size_t sz)
3297+/* Prototype: int ___clear_user(void *addr, size_t sz)
3298 * Purpose : clear some user memory
3299 * Params : addr - user memory address to clear
3300 * : sz - number of bytes to clear
3301 * Returns : number of bytes NOT cleared
3302 */
3303 ENTRY(__clear_user_std)
3304-WEAK(__clear_user)
3305+WEAK(___clear_user)
3306 stmfd sp!, {r1, lr}
3307 mov r2, #0
3308 cmp r1, #4
3309@@ -44,7 +44,7 @@ WEAK(__clear_user)
3310 USER( strnebt r2, [r0])
3311 mov r0, #0
3312 ldmfd sp!, {r1, pc}
3313-ENDPROC(__clear_user)
3314+ENDPROC(___clear_user)
3315 ENDPROC(__clear_user_std)
3316
3317 .pushsection .fixup,"ax"
3318diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3319index 7a235b9..73a0556 100644
3320--- a/arch/arm/lib/copy_from_user.S
3321+++ b/arch/arm/lib/copy_from_user.S
3322@@ -17,7 +17,7 @@
3323 /*
3324 * Prototype:
3325 *
3326- * size_t __copy_from_user(void *to, const void *from, size_t n)
3327+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3328 *
3329 * Purpose:
3330 *
3331@@ -89,11 +89,11 @@
3332
3333 .text
3334
3335-ENTRY(__copy_from_user)
3336+ENTRY(___copy_from_user)
3337
3338 #include "copy_template.S"
3339
3340-ENDPROC(__copy_from_user)
3341+ENDPROC(___copy_from_user)
3342
3343 .pushsection .fixup,"ax"
3344 .align 0
3345diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3346index 6ee2f67..d1cce76 100644
3347--- a/arch/arm/lib/copy_page.S
3348+++ b/arch/arm/lib/copy_page.S
3349@@ -10,6 +10,7 @@
3350 * ASM optimised string functions
3351 */
3352 #include <linux/linkage.h>
3353+#include <linux/const.h>
3354 #include <asm/assembler.h>
3355 #include <asm/asm-offsets.h>
3356 #include <asm/cache.h>
3357diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3358index a9d3db1..164b089 100644
3359--- a/arch/arm/lib/copy_to_user.S
3360+++ b/arch/arm/lib/copy_to_user.S
3361@@ -17,7 +17,7 @@
3362 /*
3363 * Prototype:
3364 *
3365- * size_t __copy_to_user(void *to, const void *from, size_t n)
3366+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3367 *
3368 * Purpose:
3369 *
3370@@ -93,11 +93,11 @@
3371 .text
3372
3373 ENTRY(__copy_to_user_std)
3374-WEAK(__copy_to_user)
3375+WEAK(___copy_to_user)
3376
3377 #include "copy_template.S"
3378
3379-ENDPROC(__copy_to_user)
3380+ENDPROC(___copy_to_user)
3381 ENDPROC(__copy_to_user_std)
3382
3383 .pushsection .fixup,"ax"
3384diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3385index 7d08b43..f7ca7ea 100644
3386--- a/arch/arm/lib/csumpartialcopyuser.S
3387+++ b/arch/arm/lib/csumpartialcopyuser.S
3388@@ -57,8 +57,8 @@
3389 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3390 */
3391
3392-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3393-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3394+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3395+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3396
3397 #include "csumpartialcopygeneric.S"
3398
3399diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3400index 312d43e..21d2322 100644
3401--- a/arch/arm/lib/delay.c
3402+++ b/arch/arm/lib/delay.c
3403@@ -29,7 +29,7 @@
3404 /*
3405 * Default to the loop-based delay implementation.
3406 */
3407-struct arm_delay_ops arm_delay_ops = {
3408+struct arm_delay_ops arm_delay_ops __read_only = {
3409 .delay = __loop_delay,
3410 .const_udelay = __loop_const_udelay,
3411 .udelay = __loop_udelay,
3412diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3413index 3e58d71..029817c 100644
3414--- a/arch/arm/lib/uaccess_with_memcpy.c
3415+++ b/arch/arm/lib/uaccess_with_memcpy.c
3416@@ -136,7 +136,7 @@ out:
3417 }
3418
3419 unsigned long
3420-__copy_to_user(void __user *to, const void *from, unsigned long n)
3421+___copy_to_user(void __user *to, const void *from, unsigned long n)
3422 {
3423 /*
3424 * This test is stubbed out of the main function above to keep
3425@@ -190,7 +190,7 @@ out:
3426 return n;
3427 }
3428
3429-unsigned long __clear_user(void __user *addr, unsigned long n)
3430+unsigned long ___clear_user(void __user *addr, unsigned long n)
3431 {
3432 /* See rational for this in __copy_to_user() above. */
3433 if (n < 64)
3434diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3435index ce25e85..3dd7850 100644
3436--- a/arch/arm/mach-at91/setup.c
3437+++ b/arch/arm/mach-at91/setup.c
3438@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3439
3440 desc->pfn = __phys_to_pfn(base);
3441 desc->length = length;
3442- desc->type = MT_MEMORY_RWX_NONCACHED;
3443+ desc->type = MT_MEMORY_RW_NONCACHED;
3444
3445 pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3446 base, length, desc->virtual);
3447diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3448index 7f352de..6dc0929 100644
3449--- a/arch/arm/mach-keystone/keystone.c
3450+++ b/arch/arm/mach-keystone/keystone.c
3451@@ -27,7 +27,7 @@
3452
3453 #include "keystone.h"
3454
3455-static struct notifier_block platform_nb;
3456+static notifier_block_no_const platform_nb;
3457 static unsigned long keystone_dma_pfn_offset __read_mostly;
3458
3459 static int keystone_platform_notifier(struct notifier_block *nb,
3460diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3461index ccef880..5dfad80 100644
3462--- a/arch/arm/mach-mvebu/coherency.c
3463+++ b/arch/arm/mach-mvebu/coherency.c
3464@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3465
3466 /*
3467 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3468- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3469+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3470 * is needed as a workaround for a deadlock issue between the PCIe
3471 * interface and the cache controller.
3472 */
3473@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3474 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3475
3476 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3477- mtype = MT_UNCACHED;
3478+ mtype = MT_UNCACHED_RW;
3479
3480 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3481 }
3482diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3483index b6443a4..20a0b74 100644
3484--- a/arch/arm/mach-omap2/board-n8x0.c
3485+++ b/arch/arm/mach-omap2/board-n8x0.c
3486@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3487 }
3488 #endif
3489
3490-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3491+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3492 .late_init = n8x0_menelaus_late_init,
3493 };
3494
3495diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3496index 79f49d9..70bf184 100644
3497--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3498+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3499@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3500 void (*resume)(void);
3501 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3502 void (*hotplug_restart)(void);
3503-};
3504+} __no_const;
3505
3506 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3507 static struct powerdomain *mpuss_pd;
3508@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3509 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3510 {}
3511
3512-struct cpu_pm_ops omap_pm_ops = {
3513+static struct cpu_pm_ops omap_pm_ops __read_only = {
3514 .finish_suspend = default_finish_suspend,
3515 .resume = dummy_cpu_resume,
3516 .scu_prepare = dummy_scu_prepare,
3517diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3518index f961c46..4a453dc 100644
3519--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3520+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3521@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3522 return NOTIFY_OK;
3523 }
3524
3525-static struct notifier_block __refdata irq_hotplug_notifier = {
3526+static struct notifier_block irq_hotplug_notifier = {
3527 .notifier_call = irq_cpu_hotplug_notify,
3528 };
3529
3530diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3531index be9541e..821805f 100644
3532--- a/arch/arm/mach-omap2/omap_device.c
3533+++ b/arch/arm/mach-omap2/omap_device.c
3534@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3535 struct platform_device __init *omap_device_build(const char *pdev_name,
3536 int pdev_id,
3537 struct omap_hwmod *oh,
3538- void *pdata, int pdata_len)
3539+ const void *pdata, int pdata_len)
3540 {
3541 struct omap_hwmod *ohs[] = { oh };
3542
3543@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3544 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3545 int pdev_id,
3546 struct omap_hwmod **ohs,
3547- int oh_cnt, void *pdata,
3548+ int oh_cnt, const void *pdata,
3549 int pdata_len)
3550 {
3551 int ret = -ENOMEM;
3552diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3553index 78c02b3..c94109a 100644
3554--- a/arch/arm/mach-omap2/omap_device.h
3555+++ b/arch/arm/mach-omap2/omap_device.h
3556@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3557 /* Core code interface */
3558
3559 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3560- struct omap_hwmod *oh, void *pdata,
3561+ struct omap_hwmod *oh, const void *pdata,
3562 int pdata_len);
3563
3564 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3565 struct omap_hwmod **oh, int oh_cnt,
3566- void *pdata, int pdata_len);
3567+ const void *pdata, int pdata_len);
3568
3569 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3570 struct omap_hwmod **ohs, int oh_cnt);
3571diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3572index 9025fff..3555702 100644
3573--- a/arch/arm/mach-omap2/omap_hwmod.c
3574+++ b/arch/arm/mach-omap2/omap_hwmod.c
3575@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3576 int (*init_clkdm)(struct omap_hwmod *oh);
3577 void (*update_context_lost)(struct omap_hwmod *oh);
3578 int (*get_context_lost)(struct omap_hwmod *oh);
3579-};
3580+} __no_const;
3581
3582 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3583-static struct omap_hwmod_soc_ops soc_ops;
3584+static struct omap_hwmod_soc_ops soc_ops __read_only;
3585
3586 /* omap_hwmod_list contains all registered struct omap_hwmods */
3587 static LIST_HEAD(omap_hwmod_list);
3588diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3589index 95fee54..cfa9cf1 100644
3590--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3591+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3592@@ -10,6 +10,7 @@
3593
3594 #include <linux/kernel.h>
3595 #include <linux/init.h>
3596+#include <asm/pgtable.h>
3597
3598 #include "powerdomain.h"
3599
3600@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3601
3602 void __init am43xx_powerdomains_init(void)
3603 {
3604- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3605+ pax_open_kernel();
3606+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3607+ pax_close_kernel();
3608 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3609 pwrdm_register_pwrdms(powerdomains_am43xx);
3610 pwrdm_complete_init();
3611diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3612index ff0a68c..b312aa0 100644
3613--- a/arch/arm/mach-omap2/wd_timer.c
3614+++ b/arch/arm/mach-omap2/wd_timer.c
3615@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3616 struct omap_hwmod *oh;
3617 char *oh_name = "wd_timer2";
3618 char *dev_name = "omap_wdt";
3619- struct omap_wd_timer_platform_data pdata;
3620+ static struct omap_wd_timer_platform_data pdata = {
3621+ .read_reset_sources = prm_read_reset_sources
3622+ };
3623
3624 if (!cpu_class_is_omap2() || of_have_populated_dt())
3625 return 0;
3626@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3627 return -EINVAL;
3628 }
3629
3630- pdata.read_reset_sources = prm_read_reset_sources;
3631-
3632 pdev = omap_device_build(dev_name, id, oh, &pdata,
3633 sizeof(struct omap_wd_timer_platform_data));
3634 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3635diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3636index 4f25a7c..a81be85 100644
3637--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3638+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3639@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3640 bool entered_lp2 = false;
3641
3642 if (tegra_pending_sgi())
3643- ACCESS_ONCE(abort_flag) = true;
3644+ ACCESS_ONCE_RW(abort_flag) = true;
3645
3646 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3647
3648diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3649index 2dea8b5..6499da2 100644
3650--- a/arch/arm/mach-ux500/setup.h
3651+++ b/arch/arm/mach-ux500/setup.h
3652@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3653 .type = MT_DEVICE, \
3654 }
3655
3656-#define __MEM_DEV_DESC(x, sz) { \
3657- .virtual = IO_ADDRESS(x), \
3658- .pfn = __phys_to_pfn(x), \
3659- .length = sz, \
3660- .type = MT_MEMORY_RWX, \
3661-}
3662-
3663 extern struct smp_operations ux500_smp_ops;
3664 extern void ux500_cpu_die(unsigned int cpu);
3665
3666diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3667index c43c714..4f8f7b9 100644
3668--- a/arch/arm/mm/Kconfig
3669+++ b/arch/arm/mm/Kconfig
3670@@ -446,6 +446,7 @@ config CPU_32v5
3671
3672 config CPU_32v6
3673 bool
3674+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3675 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3676
3677 config CPU_32v6K
3678@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3679
3680 config CPU_USE_DOMAINS
3681 bool
3682+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3683 help
3684 This option enables or disables the use of domain switching
3685 via the set_fs() function.
3686@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3687
3688 config KUSER_HELPERS
3689 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3690- depends on MMU
3691+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3692 default y
3693 help
3694 Warning: disabling this option may break user programs.
3695@@ -812,7 +814,7 @@ config KUSER_HELPERS
3696 See Documentation/arm/kernel_user_helpers.txt for details.
3697
3698 However, the fixed address nature of these helpers can be used
3699- by ROP (return orientated programming) authors when creating
3700+ by ROP (Return Oriented Programming) authors when creating
3701 exploits.
3702
3703 If all of the binaries and libraries which run on your platform
3704diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3705index 2c0c541..4585df9 100644
3706--- a/arch/arm/mm/alignment.c
3707+++ b/arch/arm/mm/alignment.c
3708@@ -216,10 +216,12 @@ union offset_union {
3709 #define __get16_unaligned_check(ins,val,addr) \
3710 do { \
3711 unsigned int err = 0, v, a = addr; \
3712+ pax_open_userland(); \
3713 __get8_unaligned_check(ins,v,a,err); \
3714 val = v << ((BE) ? 8 : 0); \
3715 __get8_unaligned_check(ins,v,a,err); \
3716 val |= v << ((BE) ? 0 : 8); \
3717+ pax_close_userland(); \
3718 if (err) \
3719 goto fault; \
3720 } while (0)
3721@@ -233,6 +235,7 @@ union offset_union {
3722 #define __get32_unaligned_check(ins,val,addr) \
3723 do { \
3724 unsigned int err = 0, v, a = addr; \
3725+ pax_open_userland(); \
3726 __get8_unaligned_check(ins,v,a,err); \
3727 val = v << ((BE) ? 24 : 0); \
3728 __get8_unaligned_check(ins,v,a,err); \
3729@@ -241,6 +244,7 @@ union offset_union {
3730 val |= v << ((BE) ? 8 : 16); \
3731 __get8_unaligned_check(ins,v,a,err); \
3732 val |= v << ((BE) ? 0 : 24); \
3733+ pax_close_userland(); \
3734 if (err) \
3735 goto fault; \
3736 } while (0)
3737@@ -254,6 +258,7 @@ union offset_union {
3738 #define __put16_unaligned_check(ins,val,addr) \
3739 do { \
3740 unsigned int err = 0, v = val, a = addr; \
3741+ pax_open_userland(); \
3742 __asm__( FIRST_BYTE_16 \
3743 ARM( "1: "ins" %1, [%2], #1\n" ) \
3744 THUMB( "1: "ins" %1, [%2]\n" ) \
3745@@ -273,6 +278,7 @@ union offset_union {
3746 " .popsection\n" \
3747 : "=r" (err), "=&r" (v), "=&r" (a) \
3748 : "0" (err), "1" (v), "2" (a)); \
3749+ pax_close_userland(); \
3750 if (err) \
3751 goto fault; \
3752 } while (0)
3753@@ -286,6 +292,7 @@ union offset_union {
3754 #define __put32_unaligned_check(ins,val,addr) \
3755 do { \
3756 unsigned int err = 0, v = val, a = addr; \
3757+ pax_open_userland(); \
3758 __asm__( FIRST_BYTE_32 \
3759 ARM( "1: "ins" %1, [%2], #1\n" ) \
3760 THUMB( "1: "ins" %1, [%2]\n" ) \
3761@@ -315,6 +322,7 @@ union offset_union {
3762 " .popsection\n" \
3763 : "=r" (err), "=&r" (v), "=&r" (a) \
3764 : "0" (err), "1" (v), "2" (a)); \
3765+ pax_close_userland(); \
3766 if (err) \
3767 goto fault; \
3768 } while (0)
3769diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3770index 5e65ca8..879e7b3 100644
3771--- a/arch/arm/mm/cache-l2x0.c
3772+++ b/arch/arm/mm/cache-l2x0.c
3773@@ -42,7 +42,7 @@ struct l2c_init_data {
3774 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3775 void (*save)(void __iomem *);
3776 struct outer_cache_fns outer_cache;
3777-};
3778+} __do_const;
3779
3780 #define CACHE_LINE_SIZE 32
3781
3782diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3783index 845769e..4278fd7 100644
3784--- a/arch/arm/mm/context.c
3785+++ b/arch/arm/mm/context.c
3786@@ -43,7 +43,7 @@
3787 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3788
3789 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3790-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3791+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3792 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3793
3794 static DEFINE_PER_CPU(atomic64_t, active_asids);
3795@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3796 {
3797 static u32 cur_idx = 1;
3798 u64 asid = atomic64_read(&mm->context.id);
3799- u64 generation = atomic64_read(&asid_generation);
3800+ u64 generation = atomic64_read_unchecked(&asid_generation);
3801
3802 if (asid != 0) {
3803 /*
3804@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3805 */
3806 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3807 if (asid == NUM_USER_ASIDS) {
3808- generation = atomic64_add_return(ASID_FIRST_VERSION,
3809+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3810 &asid_generation);
3811 flush_context(cpu);
3812 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3813@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3814 cpu_set_reserved_ttbr0();
3815
3816 asid = atomic64_read(&mm->context.id);
3817- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3818+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3819 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3820 goto switch_mm_fastpath;
3821
3822 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3823 /* Check that our ASID belongs to the current generation. */
3824 asid = atomic64_read(&mm->context.id);
3825- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3826+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3827 asid = new_context(mm, cpu);
3828 atomic64_set(&mm->context.id, asid);
3829 }
3830diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3831index a982dc3..2d9f5f7 100644
3832--- a/arch/arm/mm/fault.c
3833+++ b/arch/arm/mm/fault.c
3834@@ -25,6 +25,7 @@
3835 #include <asm/system_misc.h>
3836 #include <asm/system_info.h>
3837 #include <asm/tlbflush.h>
3838+#include <asm/sections.h>
3839
3840 #include "fault.h"
3841
3842@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3843 if (fixup_exception(regs))
3844 return;
3845
3846+#ifdef CONFIG_PAX_MEMORY_UDEREF
3847+ if (addr < TASK_SIZE) {
3848+ if (current->signal->curr_ip)
3849+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3850+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3851+ else
3852+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3853+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3854+ }
3855+#endif
3856+
3857+#ifdef CONFIG_PAX_KERNEXEC
3858+ if ((fsr & FSR_WRITE) &&
3859+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3860+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3861+ {
3862+ if (current->signal->curr_ip)
3863+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3864+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3865+ else
3866+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3867+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3868+ }
3869+#endif
3870+
3871 /*
3872 * No handler, we'll have to terminate things with extreme prejudice.
3873 */
3874@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3875 }
3876 #endif
3877
3878+#ifdef CONFIG_PAX_PAGEEXEC
3879+ if (fsr & FSR_LNX_PF) {
3880+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3881+ do_group_exit(SIGKILL);
3882+ }
3883+#endif
3884+
3885 tsk->thread.address = addr;
3886 tsk->thread.error_code = fsr;
3887 tsk->thread.trap_no = 14;
3888@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3889 }
3890 #endif /* CONFIG_MMU */
3891
3892+#ifdef CONFIG_PAX_PAGEEXEC
3893+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3894+{
3895+ long i;
3896+
3897+ printk(KERN_ERR "PAX: bytes at PC: ");
3898+ for (i = 0; i < 20; i++) {
3899+ unsigned char c;
3900+ if (get_user(c, (__force unsigned char __user *)pc+i))
3901+ printk(KERN_CONT "?? ");
3902+ else
3903+ printk(KERN_CONT "%02x ", c);
3904+ }
3905+ printk("\n");
3906+
3907+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3908+ for (i = -1; i < 20; i++) {
3909+ unsigned long c;
3910+ if (get_user(c, (__force unsigned long __user *)sp+i))
3911+ printk(KERN_CONT "???????? ");
3912+ else
3913+ printk(KERN_CONT "%08lx ", c);
3914+ }
3915+ printk("\n");
3916+}
3917+#endif
3918+
3919 /*
3920 * First Level Translation Fault Handler
3921 *
3922@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3923 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3924 struct siginfo info;
3925
3926+#ifdef CONFIG_PAX_MEMORY_UDEREF
3927+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3928+ if (current->signal->curr_ip)
3929+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3930+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3931+ else
3932+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3933+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3934+ goto die;
3935+ }
3936+#endif
3937+
3938 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3939 return;
3940
3941+die:
3942 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3943 inf->name, fsr, addr);
3944
3945@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3946 ifsr_info[nr].name = name;
3947 }
3948
3949+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3950+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3951+
3952 asmlinkage void __exception
3953 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3954 {
3955 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3956 struct siginfo info;
3957+ unsigned long pc = instruction_pointer(regs);
3958+
3959+ if (user_mode(regs)) {
3960+ unsigned long sigpage = current->mm->context.sigpage;
3961+
3962+ if (sigpage <= pc && pc < sigpage + 7*4) {
3963+ if (pc < sigpage + 3*4)
3964+ sys_sigreturn(regs);
3965+ else
3966+ sys_rt_sigreturn(regs);
3967+ return;
3968+ }
3969+ if (pc == 0xffff0f60UL) {
3970+ /*
3971+ * PaX: __kuser_cmpxchg64 emulation
3972+ */
3973+ // TODO
3974+ //regs->ARM_pc = regs->ARM_lr;
3975+ //return;
3976+ }
3977+ if (pc == 0xffff0fa0UL) {
3978+ /*
3979+ * PaX: __kuser_memory_barrier emulation
3980+ */
3981+ // dmb(); implied by the exception
3982+ regs->ARM_pc = regs->ARM_lr;
3983+ return;
3984+ }
3985+ if (pc == 0xffff0fc0UL) {
3986+ /*
3987+ * PaX: __kuser_cmpxchg emulation
3988+ */
3989+ // TODO
3990+ //long new;
3991+ //int op;
3992+
3993+ //op = FUTEX_OP_SET << 28;
3994+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
3995+ //regs->ARM_r0 = old != new;
3996+ //regs->ARM_pc = regs->ARM_lr;
3997+ //return;
3998+ }
3999+ if (pc == 0xffff0fe0UL) {
4000+ /*
4001+ * PaX: __kuser_get_tls emulation
4002+ */
4003+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4004+ regs->ARM_pc = regs->ARM_lr;
4005+ return;
4006+ }
4007+ }
4008+
4009+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4010+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4011+ if (current->signal->curr_ip)
4012+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4013+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4014+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4015+ else
4016+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4017+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4018+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4019+ goto die;
4020+ }
4021+#endif
4022+
4023+#ifdef CONFIG_PAX_REFCOUNT
4024+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4025+#ifdef CONFIG_THUMB2_KERNEL
4026+ unsigned short bkpt;
4027+
4028+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4029+#else
4030+ unsigned int bkpt;
4031+
4032+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4033+#endif
4034+ current->thread.error_code = ifsr;
4035+ current->thread.trap_no = 0;
4036+ pax_report_refcount_overflow(regs);
4037+ fixup_exception(regs);
4038+ return;
4039+ }
4040+ }
4041+#endif
4042
4043 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4044 return;
4045
4046+die:
4047 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4048 inf->name, ifsr, addr);
4049
4050diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4051index cf08bdf..772656c 100644
4052--- a/arch/arm/mm/fault.h
4053+++ b/arch/arm/mm/fault.h
4054@@ -3,6 +3,7 @@
4055
4056 /*
4057 * Fault status register encodings. We steal bit 31 for our own purposes.
4058+ * Set when the FSR value is from an instruction fault.
4059 */
4060 #define FSR_LNX_PF (1 << 31)
4061 #define FSR_WRITE (1 << 11)
4062@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4063 }
4064 #endif
4065
4066+/* valid for LPAE and !LPAE */
4067+static inline int is_xn_fault(unsigned int fsr)
4068+{
4069+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4070+}
4071+
4072+static inline int is_domain_fault(unsigned int fsr)
4073+{
4074+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4075+}
4076+
4077 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4078 unsigned long search_exception_table(unsigned long addr);
4079
4080diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4081index 2495c8c..415b7fc 100644
4082--- a/arch/arm/mm/init.c
4083+++ b/arch/arm/mm/init.c
4084@@ -758,7 +758,46 @@ void free_tcmmem(void)
4085 {
4086 #ifdef CONFIG_HAVE_TCM
4087 extern char __tcm_start, __tcm_end;
4088+#endif
4089
4090+#ifdef CONFIG_PAX_KERNEXEC
4091+ unsigned long addr;
4092+ pgd_t *pgd;
4093+ pud_t *pud;
4094+ pmd_t *pmd;
4095+ int cpu_arch = cpu_architecture();
4096+ unsigned int cr = get_cr();
4097+
4098+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4099+ /* make pages tables, etc before .text NX */
4100+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4101+ pgd = pgd_offset_k(addr);
4102+ pud = pud_offset(pgd, addr);
4103+ pmd = pmd_offset(pud, addr);
4104+ __section_update(pmd, addr, PMD_SECT_XN);
4105+ }
4106+ /* make init NX */
4107+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4108+ pgd = pgd_offset_k(addr);
4109+ pud = pud_offset(pgd, addr);
4110+ pmd = pmd_offset(pud, addr);
4111+ __section_update(pmd, addr, PMD_SECT_XN);
4112+ }
4113+ /* make kernel code/rodata RX */
4114+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4115+ pgd = pgd_offset_k(addr);
4116+ pud = pud_offset(pgd, addr);
4117+ pmd = pmd_offset(pud, addr);
4118+#ifdef CONFIG_ARM_LPAE
4119+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4120+#else
4121+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4122+#endif
4123+ }
4124+ }
4125+#endif
4126+
4127+#ifdef CONFIG_HAVE_TCM
4128 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4129 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4130 #endif
4131diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4132index d1e5ad7..84dcbf2 100644
4133--- a/arch/arm/mm/ioremap.c
4134+++ b/arch/arm/mm/ioremap.c
4135@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4136 unsigned int mtype;
4137
4138 if (cached)
4139- mtype = MT_MEMORY_RWX;
4140+ mtype = MT_MEMORY_RX;
4141 else
4142- mtype = MT_MEMORY_RWX_NONCACHED;
4143+ mtype = MT_MEMORY_RX_NONCACHED;
4144
4145 return __arm_ioremap_caller(phys_addr, size, mtype,
4146 __builtin_return_address(0));
4147diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4148index 5e85ed3..b10a7ed 100644
4149--- a/arch/arm/mm/mmap.c
4150+++ b/arch/arm/mm/mmap.c
4151@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4152 struct vm_area_struct *vma;
4153 int do_align = 0;
4154 int aliasing = cache_is_vipt_aliasing();
4155+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4156 struct vm_unmapped_area_info info;
4157
4158 /*
4159@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4160 if (len > TASK_SIZE)
4161 return -ENOMEM;
4162
4163+#ifdef CONFIG_PAX_RANDMMAP
4164+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4165+#endif
4166+
4167 if (addr) {
4168 if (do_align)
4169 addr = COLOUR_ALIGN(addr, pgoff);
4170@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4171 addr = PAGE_ALIGN(addr);
4172
4173 vma = find_vma(mm, addr);
4174- if (TASK_SIZE - len >= addr &&
4175- (!vma || addr + len <= vma->vm_start))
4176+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4177 return addr;
4178 }
4179
4180@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4181 info.high_limit = TASK_SIZE;
4182 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4183 info.align_offset = pgoff << PAGE_SHIFT;
4184+ info.threadstack_offset = offset;
4185 return vm_unmapped_area(&info);
4186 }
4187
4188@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4189 unsigned long addr = addr0;
4190 int do_align = 0;
4191 int aliasing = cache_is_vipt_aliasing();
4192+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4193 struct vm_unmapped_area_info info;
4194
4195 /*
4196@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4197 return addr;
4198 }
4199
4200+#ifdef CONFIG_PAX_RANDMMAP
4201+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4202+#endif
4203+
4204 /* requesting a specific address */
4205 if (addr) {
4206 if (do_align)
4207@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4208 else
4209 addr = PAGE_ALIGN(addr);
4210 vma = find_vma(mm, addr);
4211- if (TASK_SIZE - len >= addr &&
4212- (!vma || addr + len <= vma->vm_start))
4213+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4214 return addr;
4215 }
4216
4217@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4218 info.high_limit = mm->mmap_base;
4219 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4220 info.align_offset = pgoff << PAGE_SHIFT;
4221+ info.threadstack_offset = offset;
4222 addr = vm_unmapped_area(&info);
4223
4224 /*
4225@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4226 {
4227 unsigned long random_factor = 0UL;
4228
4229+#ifdef CONFIG_PAX_RANDMMAP
4230+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4231+#endif
4232+
4233 /* 8 bits of randomness in 20 address space bits */
4234 if ((current->flags & PF_RANDOMIZE) &&
4235 !(current->personality & ADDR_NO_RANDOMIZE))
4236@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4237
4238 if (mmap_is_legacy()) {
4239 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4240+
4241+#ifdef CONFIG_PAX_RANDMMAP
4242+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4243+ mm->mmap_base += mm->delta_mmap;
4244+#endif
4245+
4246 mm->get_unmapped_area = arch_get_unmapped_area;
4247 } else {
4248 mm->mmap_base = mmap_base(random_factor);
4249+
4250+#ifdef CONFIG_PAX_RANDMMAP
4251+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4252+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4253+#endif
4254+
4255 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4256 }
4257 }
4258diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4259index 4e6ef89..21c27f2 100644
4260--- a/arch/arm/mm/mmu.c
4261+++ b/arch/arm/mm/mmu.c
4262@@ -41,6 +41,22 @@
4263 #include "mm.h"
4264 #include "tcm.h"
4265
4266+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4267+void modify_domain(unsigned int dom, unsigned int type)
4268+{
4269+ struct thread_info *thread = current_thread_info();
4270+ unsigned int domain = thread->cpu_domain;
4271+ /*
4272+ * DOMAIN_MANAGER might be defined to some other value,
4273+ * use the arch-defined constant
4274+ */
4275+ domain &= ~domain_val(dom, 3);
4276+ thread->cpu_domain = domain | domain_val(dom, type);
4277+ set_domain(thread->cpu_domain);
4278+}
4279+EXPORT_SYMBOL(modify_domain);
4280+#endif
4281+
4282 /*
4283 * empty_zero_page is a special page that is used for
4284 * zero-initialized data and COW.
4285@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4286 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4287 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4288
4289-static struct mem_type mem_types[] = {
4290+#ifdef CONFIG_PAX_KERNEXEC
4291+#define L_PTE_KERNEXEC L_PTE_RDONLY
4292+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4293+#else
4294+#define L_PTE_KERNEXEC L_PTE_DIRTY
4295+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4296+#endif
4297+
4298+static struct mem_type mem_types[] __read_only = {
4299 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4300 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4301 L_PTE_SHARED,
4302@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4303 .prot_sect = PROT_SECT_DEVICE,
4304 .domain = DOMAIN_IO,
4305 },
4306- [MT_UNCACHED] = {
4307+ [MT_UNCACHED_RW] = {
4308 .prot_pte = PROT_PTE_DEVICE,
4309 .prot_l1 = PMD_TYPE_TABLE,
4310 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4311 .domain = DOMAIN_IO,
4312 },
4313- [MT_CACHECLEAN] = {
4314- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4315+ [MT_CACHECLEAN_RO] = {
4316+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4317 .domain = DOMAIN_KERNEL,
4318 },
4319 #ifndef CONFIG_ARM_LPAE
4320- [MT_MINICLEAN] = {
4321- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4322+ [MT_MINICLEAN_RO] = {
4323+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4324 .domain = DOMAIN_KERNEL,
4325 },
4326 #endif
4327@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4328 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4329 L_PTE_RDONLY,
4330 .prot_l1 = PMD_TYPE_TABLE,
4331- .domain = DOMAIN_USER,
4332+ .domain = DOMAIN_VECTORS,
4333 },
4334 [MT_HIGH_VECTORS] = {
4335 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4336 L_PTE_USER | L_PTE_RDONLY,
4337 .prot_l1 = PMD_TYPE_TABLE,
4338- .domain = DOMAIN_USER,
4339+ .domain = DOMAIN_VECTORS,
4340 },
4341- [MT_MEMORY_RWX] = {
4342+ [__MT_MEMORY_RWX] = {
4343 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4344 .prot_l1 = PMD_TYPE_TABLE,
4345 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4346@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4347 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4348 .domain = DOMAIN_KERNEL,
4349 },
4350- [MT_ROM] = {
4351- .prot_sect = PMD_TYPE_SECT,
4352+ [MT_MEMORY_RX] = {
4353+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4354+ .prot_l1 = PMD_TYPE_TABLE,
4355+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4356+ .domain = DOMAIN_KERNEL,
4357+ },
4358+ [MT_ROM_RX] = {
4359+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4360 .domain = DOMAIN_KERNEL,
4361 },
4362- [MT_MEMORY_RWX_NONCACHED] = {
4363+ [MT_MEMORY_RW_NONCACHED] = {
4364 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4365 L_PTE_MT_BUFFERABLE,
4366 .prot_l1 = PMD_TYPE_TABLE,
4367 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4368 .domain = DOMAIN_KERNEL,
4369 },
4370+ [MT_MEMORY_RX_NONCACHED] = {
4371+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4372+ L_PTE_MT_BUFFERABLE,
4373+ .prot_l1 = PMD_TYPE_TABLE,
4374+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4375+ .domain = DOMAIN_KERNEL,
4376+ },
4377 [MT_MEMORY_RW_DTCM] = {
4378 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4379 L_PTE_XN,
4380@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4381 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4382 .domain = DOMAIN_KERNEL,
4383 },
4384- [MT_MEMORY_RWX_ITCM] = {
4385- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4386+ [MT_MEMORY_RX_ITCM] = {
4387+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4388 .prot_l1 = PMD_TYPE_TABLE,
4389+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4390 .domain = DOMAIN_KERNEL,
4391 },
4392 [MT_MEMORY_RW_SO] = {
4393@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4394 * Mark cache clean areas and XIP ROM read only
4395 * from SVC mode and no access from userspace.
4396 */
4397- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4398- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4399- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4400+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4401+#ifdef CONFIG_PAX_KERNEXEC
4402+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4403+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4404+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4405+#endif
4406+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4407+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4408 #endif
4409
4410 /*
4411@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4412 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4413 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4414 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4415- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4416- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4417+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4418+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4419 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4420 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4421+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4422+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4423 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4424- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4425- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4426+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4427+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4428+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4429+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4430 }
4431 }
4432
4433@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4434 if (cpu_arch >= CPU_ARCH_ARMv6) {
4435 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4436 /* Non-cacheable Normal is XCB = 001 */
4437- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4438+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4439+ PMD_SECT_BUFFERED;
4440+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4441 PMD_SECT_BUFFERED;
4442 } else {
4443 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4444- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4445+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4446+ PMD_SECT_TEX(1);
4447+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4448 PMD_SECT_TEX(1);
4449 }
4450 } else {
4451- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4452+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4453+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4454 }
4455
4456 #ifdef CONFIG_ARM_LPAE
4457@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4458 user_pgprot |= PTE_EXT_PXN;
4459 #endif
4460
4461+ user_pgprot |= __supported_pte_mask;
4462+
4463 for (i = 0; i < 16; i++) {
4464 pteval_t v = pgprot_val(protection_map[i]);
4465 protection_map[i] = __pgprot(v | user_pgprot);
4466@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4467
4468 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4469 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4470- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4471- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4472+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4473+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4474 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4475 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4476+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4477+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4478 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4479- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4480- mem_types[MT_ROM].prot_sect |= cp->pmd;
4481+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4482+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4483+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4484
4485 switch (cp->pmd) {
4486 case PMD_SECT_WT:
4487- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4488+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4489 break;
4490 case PMD_SECT_WB:
4491 case PMD_SECT_WBWA:
4492- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4493+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4494 break;
4495 }
4496 pr_info("Memory policy: %sData cache %s\n",
4497@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4498 return;
4499 }
4500
4501- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4502+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4503 md->virtual >= PAGE_OFFSET &&
4504 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4505 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4506@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4507 * called function. This means you can't use any function or debugging
4508 * method which may touch any device, otherwise the kernel _will_ crash.
4509 */
4510+
4511+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4512+
4513 static void __init devicemaps_init(const struct machine_desc *mdesc)
4514 {
4515 struct map_desc map;
4516 unsigned long addr;
4517- void *vectors;
4518
4519- /*
4520- * Allocate the vector page early.
4521- */
4522- vectors = early_alloc(PAGE_SIZE * 2);
4523-
4524- early_trap_init(vectors);
4525+ early_trap_init(&vectors);
4526
4527 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4528 pmd_clear(pmd_off_k(addr));
4529@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4530 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4531 map.virtual = MODULES_VADDR;
4532 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4533- map.type = MT_ROM;
4534+ map.type = MT_ROM_RX;
4535 create_mapping(&map);
4536 #endif
4537
4538@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4539 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4540 map.virtual = FLUSH_BASE;
4541 map.length = SZ_1M;
4542- map.type = MT_CACHECLEAN;
4543+ map.type = MT_CACHECLEAN_RO;
4544 create_mapping(&map);
4545 #endif
4546 #ifdef FLUSH_BASE_MINICACHE
4547 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4548 map.virtual = FLUSH_BASE_MINICACHE;
4549 map.length = SZ_1M;
4550- map.type = MT_MINICLEAN;
4551+ map.type = MT_MINICLEAN_RO;
4552 create_mapping(&map);
4553 #endif
4554
4555@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4556 * location (0xffff0000). If we aren't using high-vectors, also
4557 * create a mapping at the low-vectors virtual address.
4558 */
4559- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4560+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4561 map.virtual = 0xffff0000;
4562 map.length = PAGE_SIZE;
4563 #ifdef CONFIG_KUSER_HELPERS
4564@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4565 static void __init map_lowmem(void)
4566 {
4567 struct memblock_region *reg;
4568+#ifndef CONFIG_PAX_KERNEXEC
4569 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4570 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4571+#endif
4572
4573 /* Map all the lowmem memory banks. */
4574 for_each_memblock(memory, reg) {
4575@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4576 if (start >= end)
4577 break;
4578
4579+#ifdef CONFIG_PAX_KERNEXEC
4580+ map.pfn = __phys_to_pfn(start);
4581+ map.virtual = __phys_to_virt(start);
4582+ map.length = end - start;
4583+
4584+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4585+ struct map_desc kernel;
4586+ struct map_desc initmap;
4587+
4588+ /* when freeing initmem we will make this RW */
4589+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4590+ initmap.virtual = (unsigned long)__init_begin;
4591+ initmap.length = _sdata - __init_begin;
4592+ initmap.type = __MT_MEMORY_RWX;
4593+ create_mapping(&initmap);
4594+
4595+ /* when freeing initmem we will make this RX */
4596+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4597+ kernel.virtual = (unsigned long)_stext;
4598+ kernel.length = __init_begin - _stext;
4599+ kernel.type = __MT_MEMORY_RWX;
4600+ create_mapping(&kernel);
4601+
4602+ if (map.virtual < (unsigned long)_stext) {
4603+ map.length = (unsigned long)_stext - map.virtual;
4604+ map.type = __MT_MEMORY_RWX;
4605+ create_mapping(&map);
4606+ }
4607+
4608+ map.pfn = __phys_to_pfn(__pa(_sdata));
4609+ map.virtual = (unsigned long)_sdata;
4610+ map.length = end - __pa(_sdata);
4611+ }
4612+
4613+ map.type = MT_MEMORY_RW;
4614+ create_mapping(&map);
4615+#else
4616 if (end < kernel_x_start) {
4617 map.pfn = __phys_to_pfn(start);
4618 map.virtual = __phys_to_virt(start);
4619 map.length = end - start;
4620- map.type = MT_MEMORY_RWX;
4621+ map.type = __MT_MEMORY_RWX;
4622
4623 create_mapping(&map);
4624 } else if (start >= kernel_x_end) {
4625@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4626 map.pfn = __phys_to_pfn(kernel_x_start);
4627 map.virtual = __phys_to_virt(kernel_x_start);
4628 map.length = kernel_x_end - kernel_x_start;
4629- map.type = MT_MEMORY_RWX;
4630+ map.type = __MT_MEMORY_RWX;
4631
4632 create_mapping(&map);
4633
4634@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4635 create_mapping(&map);
4636 }
4637 }
4638+#endif
4639 }
4640 }
4641
4642diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4643index e1268f9..a9755a7 100644
4644--- a/arch/arm/net/bpf_jit_32.c
4645+++ b/arch/arm/net/bpf_jit_32.c
4646@@ -20,6 +20,7 @@
4647 #include <asm/cacheflush.h>
4648 #include <asm/hwcap.h>
4649 #include <asm/opcodes.h>
4650+#include <asm/pgtable.h>
4651
4652 #include "bpf_jit_32.h"
4653
4654@@ -71,7 +72,11 @@ struct jit_ctx {
4655 #endif
4656 };
4657
4658+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4659+int bpf_jit_enable __read_only;
4660+#else
4661 int bpf_jit_enable __read_mostly;
4662+#endif
4663
4664 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4665 {
4666@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4667 {
4668 u32 *ptr;
4669 /* We are guaranteed to have aligned memory. */
4670+ pax_open_kernel();
4671 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4672 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4673+ pax_close_kernel();
4674 }
4675
4676 static void build_prologue(struct jit_ctx *ctx)
4677diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4678index 5b217f4..c23f40e 100644
4679--- a/arch/arm/plat-iop/setup.c
4680+++ b/arch/arm/plat-iop/setup.c
4681@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4682 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4683 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4684 .length = IOP3XX_PERIPHERAL_SIZE,
4685- .type = MT_UNCACHED,
4686+ .type = MT_UNCACHED_RW,
4687 },
4688 };
4689
4690diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4691index a5bc92d..0bb4730 100644
4692--- a/arch/arm/plat-omap/sram.c
4693+++ b/arch/arm/plat-omap/sram.c
4694@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4695 * Looks like we need to preserve some bootloader code at the
4696 * beginning of SRAM for jumping to flash for reboot to work...
4697 */
4698+ pax_open_kernel();
4699 memset_io(omap_sram_base + omap_sram_skip, 0,
4700 omap_sram_size - omap_sram_skip);
4701+ pax_close_kernel();
4702 }
4703diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4704index ce6d763..cfea917 100644
4705--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4706+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4707@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4708 int (*started)(unsigned ch);
4709 int (*flush)(unsigned ch);
4710 int (*stop)(unsigned ch);
4711-};
4712+} __no_const;
4713
4714 extern void *samsung_dmadev_get_ops(void);
4715 extern void *s3c_dma_get_ops(void);
4716diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4717index a5abb00..9cbca9a 100644
4718--- a/arch/arm64/include/asm/barrier.h
4719+++ b/arch/arm64/include/asm/barrier.h
4720@@ -44,7 +44,7 @@
4721 do { \
4722 compiletime_assert_atomic_type(*p); \
4723 barrier(); \
4724- ACCESS_ONCE(*p) = (v); \
4725+ ACCESS_ONCE_RW(*p) = (v); \
4726 } while (0)
4727
4728 #define smp_load_acquire(p) \
4729diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4730index 09da25b..3ea0d64 100644
4731--- a/arch/arm64/include/asm/percpu.h
4732+++ b/arch/arm64/include/asm/percpu.h
4733@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4734 {
4735 switch (size) {
4736 case 1:
4737- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4738+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4739 break;
4740 case 2:
4741- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4742+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4743 break;
4744 case 4:
4745- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4746+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4747 break;
4748 case 8:
4749- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4750+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4751 break;
4752 default:
4753 BUILD_BUG();
4754diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4755index 3bf8f4e..5dd5491 100644
4756--- a/arch/arm64/include/asm/uaccess.h
4757+++ b/arch/arm64/include/asm/uaccess.h
4758@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4759 flag; \
4760 })
4761
4762+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4763 #define access_ok(type, addr, size) __range_ok(addr, size)
4764 #define user_addr_max get_fs
4765
4766diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4767index c3a58a1..78fbf54 100644
4768--- a/arch/avr32/include/asm/cache.h
4769+++ b/arch/avr32/include/asm/cache.h
4770@@ -1,8 +1,10 @@
4771 #ifndef __ASM_AVR32_CACHE_H
4772 #define __ASM_AVR32_CACHE_H
4773
4774+#include <linux/const.h>
4775+
4776 #define L1_CACHE_SHIFT 5
4777-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4778+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4779
4780 /*
4781 * Memory returned by kmalloc() may be used for DMA, so we must make
4782diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4783index d232888..87c8df1 100644
4784--- a/arch/avr32/include/asm/elf.h
4785+++ b/arch/avr32/include/asm/elf.h
4786@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4787 the loader. We need to make sure that it is out of the way of the program
4788 that it will "exec", and that there is sufficient room for the brk. */
4789
4790-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4791+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4792
4793+#ifdef CONFIG_PAX_ASLR
4794+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4795+
4796+#define PAX_DELTA_MMAP_LEN 15
4797+#define PAX_DELTA_STACK_LEN 15
4798+#endif
4799
4800 /* This yields a mask that user programs can use to figure out what
4801 instruction set this CPU supports. This could be done in user space,
4802diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4803index 479330b..53717a8 100644
4804--- a/arch/avr32/include/asm/kmap_types.h
4805+++ b/arch/avr32/include/asm/kmap_types.h
4806@@ -2,9 +2,9 @@
4807 #define __ASM_AVR32_KMAP_TYPES_H
4808
4809 #ifdef CONFIG_DEBUG_HIGHMEM
4810-# define KM_TYPE_NR 29
4811+# define KM_TYPE_NR 30
4812 #else
4813-# define KM_TYPE_NR 14
4814+# define KM_TYPE_NR 15
4815 #endif
4816
4817 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4818diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4819index d223a8b..69c5210 100644
4820--- a/arch/avr32/mm/fault.c
4821+++ b/arch/avr32/mm/fault.c
4822@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4823
4824 int exception_trace = 1;
4825
4826+#ifdef CONFIG_PAX_PAGEEXEC
4827+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4828+{
4829+ unsigned long i;
4830+
4831+ printk(KERN_ERR "PAX: bytes at PC: ");
4832+ for (i = 0; i < 20; i++) {
4833+ unsigned char c;
4834+ if (get_user(c, (unsigned char *)pc+i))
4835+ printk(KERN_CONT "???????? ");
4836+ else
4837+ printk(KERN_CONT "%02x ", c);
4838+ }
4839+ printk("\n");
4840+}
4841+#endif
4842+
4843 /*
4844 * This routine handles page faults. It determines the address and the
4845 * problem, and then passes it off to one of the appropriate routines.
4846@@ -178,6 +195,16 @@ bad_area:
4847 up_read(&mm->mmap_sem);
4848
4849 if (user_mode(regs)) {
4850+
4851+#ifdef CONFIG_PAX_PAGEEXEC
4852+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4853+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4854+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4855+ do_group_exit(SIGKILL);
4856+ }
4857+ }
4858+#endif
4859+
4860 if (exception_trace && printk_ratelimit())
4861 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4862 "sp %08lx ecr %lu\n",
4863diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4864index 568885a..f8008df 100644
4865--- a/arch/blackfin/include/asm/cache.h
4866+++ b/arch/blackfin/include/asm/cache.h
4867@@ -7,6 +7,7 @@
4868 #ifndef __ARCH_BLACKFIN_CACHE_H
4869 #define __ARCH_BLACKFIN_CACHE_H
4870
4871+#include <linux/const.h>
4872 #include <linux/linkage.h> /* for asmlinkage */
4873
4874 /*
4875@@ -14,7 +15,7 @@
4876 * Blackfin loads 32 bytes for cache
4877 */
4878 #define L1_CACHE_SHIFT 5
4879-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4880+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4881 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4882
4883 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4884diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4885index aea2718..3639a60 100644
4886--- a/arch/cris/include/arch-v10/arch/cache.h
4887+++ b/arch/cris/include/arch-v10/arch/cache.h
4888@@ -1,8 +1,9 @@
4889 #ifndef _ASM_ARCH_CACHE_H
4890 #define _ASM_ARCH_CACHE_H
4891
4892+#include <linux/const.h>
4893 /* Etrax 100LX have 32-byte cache-lines. */
4894-#define L1_CACHE_BYTES 32
4895 #define L1_CACHE_SHIFT 5
4896+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4897
4898 #endif /* _ASM_ARCH_CACHE_H */
4899diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4900index 7caf25d..ee65ac5 100644
4901--- a/arch/cris/include/arch-v32/arch/cache.h
4902+++ b/arch/cris/include/arch-v32/arch/cache.h
4903@@ -1,11 +1,12 @@
4904 #ifndef _ASM_CRIS_ARCH_CACHE_H
4905 #define _ASM_CRIS_ARCH_CACHE_H
4906
4907+#include <linux/const.h>
4908 #include <arch/hwregs/dma.h>
4909
4910 /* A cache-line is 32 bytes. */
4911-#define L1_CACHE_BYTES 32
4912 #define L1_CACHE_SHIFT 5
4913+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4914
4915 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4916
4917diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4918index 102190a..5334cea 100644
4919--- a/arch/frv/include/asm/atomic.h
4920+++ b/arch/frv/include/asm/atomic.h
4921@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
4922 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4923 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4924
4925+#define atomic64_read_unchecked(v) atomic64_read(v)
4926+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4927+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4928+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4929+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4930+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4931+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4932+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4933+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4934+
4935 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4936 {
4937 int c, old;
4938diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4939index 2797163..c2a401df9 100644
4940--- a/arch/frv/include/asm/cache.h
4941+++ b/arch/frv/include/asm/cache.h
4942@@ -12,10 +12,11 @@
4943 #ifndef __ASM_CACHE_H
4944 #define __ASM_CACHE_H
4945
4946+#include <linux/const.h>
4947
4948 /* bytes per L1 cache line */
4949 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4950-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4951+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4952
4953 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4954 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4955diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4956index 43901f2..0d8b865 100644
4957--- a/arch/frv/include/asm/kmap_types.h
4958+++ b/arch/frv/include/asm/kmap_types.h
4959@@ -2,6 +2,6 @@
4960 #ifndef _ASM_KMAP_TYPES_H
4961 #define _ASM_KMAP_TYPES_H
4962
4963-#define KM_TYPE_NR 17
4964+#define KM_TYPE_NR 18
4965
4966 #endif
4967diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4968index 836f147..4cf23f5 100644
4969--- a/arch/frv/mm/elf-fdpic.c
4970+++ b/arch/frv/mm/elf-fdpic.c
4971@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4972 {
4973 struct vm_area_struct *vma;
4974 struct vm_unmapped_area_info info;
4975+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4976
4977 if (len > TASK_SIZE)
4978 return -ENOMEM;
4979@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4980 if (addr) {
4981 addr = PAGE_ALIGN(addr);
4982 vma = find_vma(current->mm, addr);
4983- if (TASK_SIZE - len >= addr &&
4984- (!vma || addr + len <= vma->vm_start))
4985+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4986 goto success;
4987 }
4988
4989@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4990 info.high_limit = (current->mm->start_stack - 0x00200000);
4991 info.align_mask = 0;
4992 info.align_offset = 0;
4993+ info.threadstack_offset = offset;
4994 addr = vm_unmapped_area(&info);
4995 if (!(addr & ~PAGE_MASK))
4996 goto success;
4997diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4998index 69952c1..4fa2908 100644
4999--- a/arch/hexagon/include/asm/cache.h
5000+++ b/arch/hexagon/include/asm/cache.h
5001@@ -21,9 +21,11 @@
5002 #ifndef __ASM_CACHE_H
5003 #define __ASM_CACHE_H
5004
5005+#include <linux/const.h>
5006+
5007 /* Bytes per L1 cache line */
5008-#define L1_CACHE_SHIFT (5)
5009-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5010+#define L1_CACHE_SHIFT 5
5011+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5012
5013 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5014
5015diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5016index 074e52b..76afdac 100644
5017--- a/arch/ia64/Kconfig
5018+++ b/arch/ia64/Kconfig
5019@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5020 config KEXEC
5021 bool "kexec system call"
5022 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5023+ depends on !GRKERNSEC_KMEM
5024 help
5025 kexec is a system call that implements the ability to shutdown your
5026 current kernel, and to start another kernel. It is like a reboot
5027diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5028index 970d0bd..e750b9b 100644
5029--- a/arch/ia64/Makefile
5030+++ b/arch/ia64/Makefile
5031@@ -98,5 +98,6 @@ endef
5032 archprepare: make_nr_irqs_h FORCE
5033 PHONY += make_nr_irqs_h FORCE
5034
5035+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5036 make_nr_irqs_h: FORCE
5037 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5038diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5039index 0bf0350..2ad1957 100644
5040--- a/arch/ia64/include/asm/atomic.h
5041+++ b/arch/ia64/include/asm/atomic.h
5042@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5043 #define atomic64_inc(v) atomic64_add(1, (v))
5044 #define atomic64_dec(v) atomic64_sub(1, (v))
5045
5046+#define atomic64_read_unchecked(v) atomic64_read(v)
5047+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5048+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5049+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5050+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5051+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5052+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5053+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5054+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5055+
5056 #endif /* _ASM_IA64_ATOMIC_H */
5057diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5058index f6769eb..1cdb590 100644
5059--- a/arch/ia64/include/asm/barrier.h
5060+++ b/arch/ia64/include/asm/barrier.h
5061@@ -66,7 +66,7 @@
5062 do { \
5063 compiletime_assert_atomic_type(*p); \
5064 barrier(); \
5065- ACCESS_ONCE(*p) = (v); \
5066+ ACCESS_ONCE_RW(*p) = (v); \
5067 } while (0)
5068
5069 #define smp_load_acquire(p) \
5070diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5071index 988254a..e1ee885 100644
5072--- a/arch/ia64/include/asm/cache.h
5073+++ b/arch/ia64/include/asm/cache.h
5074@@ -1,6 +1,7 @@
5075 #ifndef _ASM_IA64_CACHE_H
5076 #define _ASM_IA64_CACHE_H
5077
5078+#include <linux/const.h>
5079
5080 /*
5081 * Copyright (C) 1998-2000 Hewlett-Packard Co
5082@@ -9,7 +10,7 @@
5083
5084 /* Bytes per L1 (data) cache line. */
5085 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5086-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5087+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5088
5089 #ifdef CONFIG_SMP
5090 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5091diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5092index 5a83c5c..4d7f553 100644
5093--- a/arch/ia64/include/asm/elf.h
5094+++ b/arch/ia64/include/asm/elf.h
5095@@ -42,6 +42,13 @@
5096 */
5097 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5098
5099+#ifdef CONFIG_PAX_ASLR
5100+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5101+
5102+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5103+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5104+#endif
5105+
5106 #define PT_IA_64_UNWIND 0x70000001
5107
5108 /* IA-64 relocations: */
5109diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5110index 5767cdf..7462574 100644
5111--- a/arch/ia64/include/asm/pgalloc.h
5112+++ b/arch/ia64/include/asm/pgalloc.h
5113@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5114 pgd_val(*pgd_entry) = __pa(pud);
5115 }
5116
5117+static inline void
5118+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5119+{
5120+ pgd_populate(mm, pgd_entry, pud);
5121+}
5122+
5123 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5124 {
5125 return quicklist_alloc(0, GFP_KERNEL, NULL);
5126@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5127 pud_val(*pud_entry) = __pa(pmd);
5128 }
5129
5130+static inline void
5131+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5132+{
5133+ pud_populate(mm, pud_entry, pmd);
5134+}
5135+
5136 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5137 {
5138 return quicklist_alloc(0, GFP_KERNEL, NULL);
5139diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5140index 7935115..c0eca6a 100644
5141--- a/arch/ia64/include/asm/pgtable.h
5142+++ b/arch/ia64/include/asm/pgtable.h
5143@@ -12,7 +12,7 @@
5144 * David Mosberger-Tang <davidm@hpl.hp.com>
5145 */
5146
5147-
5148+#include <linux/const.h>
5149 #include <asm/mman.h>
5150 #include <asm/page.h>
5151 #include <asm/processor.h>
5152@@ -142,6 +142,17 @@
5153 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5154 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5155 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5156+
5157+#ifdef CONFIG_PAX_PAGEEXEC
5158+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5159+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5160+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5161+#else
5162+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5163+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5164+# define PAGE_COPY_NOEXEC PAGE_COPY
5165+#endif
5166+
5167 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5168 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5169 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5170diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5171index 45698cd..e8e2dbc 100644
5172--- a/arch/ia64/include/asm/spinlock.h
5173+++ b/arch/ia64/include/asm/spinlock.h
5174@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5175 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5176
5177 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5178- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5179+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5180 }
5181
5182 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5183diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5184index 103bedc..0210597 100644
5185--- a/arch/ia64/include/asm/uaccess.h
5186+++ b/arch/ia64/include/asm/uaccess.h
5187@@ -70,6 +70,7 @@
5188 && ((segment).seg == KERNEL_DS.seg \
5189 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5190 })
5191+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5192 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5193
5194 /*
5195@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5196 static inline unsigned long
5197 __copy_to_user (void __user *to, const void *from, unsigned long count)
5198 {
5199+ if (count > INT_MAX)
5200+ return count;
5201+
5202+ if (!__builtin_constant_p(count))
5203+ check_object_size(from, count, true);
5204+
5205 return __copy_user(to, (__force void __user *) from, count);
5206 }
5207
5208 static inline unsigned long
5209 __copy_from_user (void *to, const void __user *from, unsigned long count)
5210 {
5211+ if (count > INT_MAX)
5212+ return count;
5213+
5214+ if (!__builtin_constant_p(count))
5215+ check_object_size(to, count, false);
5216+
5217 return __copy_user((__force void __user *) to, from, count);
5218 }
5219
5220@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5221 ({ \
5222 void __user *__cu_to = (to); \
5223 const void *__cu_from = (from); \
5224- long __cu_len = (n); \
5225+ unsigned long __cu_len = (n); \
5226 \
5227- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5228+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5229+ if (!__builtin_constant_p(n)) \
5230+ check_object_size(__cu_from, __cu_len, true); \
5231 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5232+ } \
5233 __cu_len; \
5234 })
5235
5236@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5237 ({ \
5238 void *__cu_to = (to); \
5239 const void __user *__cu_from = (from); \
5240- long __cu_len = (n); \
5241+ unsigned long __cu_len = (n); \
5242 \
5243 __chk_user_ptr(__cu_from); \
5244- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5245+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5246+ if (!__builtin_constant_p(n)) \
5247+ check_object_size(__cu_to, __cu_len, false); \
5248 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5249+ } \
5250 __cu_len; \
5251 })
5252
5253diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5254index 29754aa..06d2838 100644
5255--- a/arch/ia64/kernel/module.c
5256+++ b/arch/ia64/kernel/module.c
5257@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5258 }
5259
5260 static inline int
5261+in_init_rx (const struct module *mod, uint64_t addr)
5262+{
5263+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5264+}
5265+
5266+static inline int
5267+in_init_rw (const struct module *mod, uint64_t addr)
5268+{
5269+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5270+}
5271+
5272+static inline int
5273 in_init (const struct module *mod, uint64_t addr)
5274 {
5275- return addr - (uint64_t) mod->module_init < mod->init_size;
5276+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5277+}
5278+
5279+static inline int
5280+in_core_rx (const struct module *mod, uint64_t addr)
5281+{
5282+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5283+}
5284+
5285+static inline int
5286+in_core_rw (const struct module *mod, uint64_t addr)
5287+{
5288+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5289 }
5290
5291 static inline int
5292 in_core (const struct module *mod, uint64_t addr)
5293 {
5294- return addr - (uint64_t) mod->module_core < mod->core_size;
5295+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5296 }
5297
5298 static inline int
5299@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5300 break;
5301
5302 case RV_BDREL:
5303- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5304+ if (in_init_rx(mod, val))
5305+ val -= (uint64_t) mod->module_init_rx;
5306+ else if (in_init_rw(mod, val))
5307+ val -= (uint64_t) mod->module_init_rw;
5308+ else if (in_core_rx(mod, val))
5309+ val -= (uint64_t) mod->module_core_rx;
5310+ else if (in_core_rw(mod, val))
5311+ val -= (uint64_t) mod->module_core_rw;
5312 break;
5313
5314 case RV_LTV:
5315@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5316 * addresses have been selected...
5317 */
5318 uint64_t gp;
5319- if (mod->core_size > MAX_LTOFF)
5320+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5321 /*
5322 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5323 * at the end of the module.
5324 */
5325- gp = mod->core_size - MAX_LTOFF / 2;
5326+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5327 else
5328- gp = mod->core_size / 2;
5329- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5330+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5331+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5332 mod->arch.gp = gp;
5333 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5334 }
5335diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5336index c39c3cd..3c77738 100644
5337--- a/arch/ia64/kernel/palinfo.c
5338+++ b/arch/ia64/kernel/palinfo.c
5339@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5340 return NOTIFY_OK;
5341 }
5342
5343-static struct notifier_block __refdata palinfo_cpu_notifier =
5344+static struct notifier_block palinfo_cpu_notifier =
5345 {
5346 .notifier_call = palinfo_cpu_callback,
5347 .priority = 0,
5348diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5349index 41e33f8..65180b2a 100644
5350--- a/arch/ia64/kernel/sys_ia64.c
5351+++ b/arch/ia64/kernel/sys_ia64.c
5352@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5353 unsigned long align_mask = 0;
5354 struct mm_struct *mm = current->mm;
5355 struct vm_unmapped_area_info info;
5356+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5357
5358 if (len > RGN_MAP_LIMIT)
5359 return -ENOMEM;
5360@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5361 if (REGION_NUMBER(addr) == RGN_HPAGE)
5362 addr = 0;
5363 #endif
5364+
5365+#ifdef CONFIG_PAX_RANDMMAP
5366+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5367+ addr = mm->free_area_cache;
5368+ else
5369+#endif
5370+
5371 if (!addr)
5372 addr = TASK_UNMAPPED_BASE;
5373
5374@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5375 info.high_limit = TASK_SIZE;
5376 info.align_mask = align_mask;
5377 info.align_offset = 0;
5378+ info.threadstack_offset = offset;
5379 return vm_unmapped_area(&info);
5380 }
5381
5382diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5383index 84f8a52..7c76178 100644
5384--- a/arch/ia64/kernel/vmlinux.lds.S
5385+++ b/arch/ia64/kernel/vmlinux.lds.S
5386@@ -192,7 +192,7 @@ SECTIONS {
5387 /* Per-cpu data: */
5388 . = ALIGN(PERCPU_PAGE_SIZE);
5389 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5390- __phys_per_cpu_start = __per_cpu_load;
5391+ __phys_per_cpu_start = per_cpu_load;
5392 /*
5393 * ensure percpu data fits
5394 * into percpu page size
5395diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5396index ba5ba7a..36e9d3a 100644
5397--- a/arch/ia64/mm/fault.c
5398+++ b/arch/ia64/mm/fault.c
5399@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5400 return pte_present(pte);
5401 }
5402
5403+#ifdef CONFIG_PAX_PAGEEXEC
5404+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5405+{
5406+ unsigned long i;
5407+
5408+ printk(KERN_ERR "PAX: bytes at PC: ");
5409+ for (i = 0; i < 8; i++) {
5410+ unsigned int c;
5411+ if (get_user(c, (unsigned int *)pc+i))
5412+ printk(KERN_CONT "???????? ");
5413+ else
5414+ printk(KERN_CONT "%08x ", c);
5415+ }
5416+ printk("\n");
5417+}
5418+#endif
5419+
5420 # define VM_READ_BIT 0
5421 # define VM_WRITE_BIT 1
5422 # define VM_EXEC_BIT 2
5423@@ -151,8 +168,21 @@ retry:
5424 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5425 goto bad_area;
5426
5427- if ((vma->vm_flags & mask) != mask)
5428+ if ((vma->vm_flags & mask) != mask) {
5429+
5430+#ifdef CONFIG_PAX_PAGEEXEC
5431+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5432+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5433+ goto bad_area;
5434+
5435+ up_read(&mm->mmap_sem);
5436+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5437+ do_group_exit(SIGKILL);
5438+ }
5439+#endif
5440+
5441 goto bad_area;
5442+ }
5443
5444 /*
5445 * If for any reason at all we couldn't handle the fault, make
5446diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5447index 76069c1..c2aa816 100644
5448--- a/arch/ia64/mm/hugetlbpage.c
5449+++ b/arch/ia64/mm/hugetlbpage.c
5450@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5451 unsigned long pgoff, unsigned long flags)
5452 {
5453 struct vm_unmapped_area_info info;
5454+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5455
5456 if (len > RGN_MAP_LIMIT)
5457 return -ENOMEM;
5458@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5459 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5460 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5461 info.align_offset = 0;
5462+ info.threadstack_offset = offset;
5463 return vm_unmapped_area(&info);
5464 }
5465
5466diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5467index 6b33457..88b5124 100644
5468--- a/arch/ia64/mm/init.c
5469+++ b/arch/ia64/mm/init.c
5470@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5471 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5472 vma->vm_end = vma->vm_start + PAGE_SIZE;
5473 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5474+
5475+#ifdef CONFIG_PAX_PAGEEXEC
5476+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5477+ vma->vm_flags &= ~VM_EXEC;
5478+
5479+#ifdef CONFIG_PAX_MPROTECT
5480+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5481+ vma->vm_flags &= ~VM_MAYEXEC;
5482+#endif
5483+
5484+ }
5485+#endif
5486+
5487 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5488 down_write(&current->mm->mmap_sem);
5489 if (insert_vm_struct(current->mm, vma)) {
5490@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5491 gate_vma.vm_start = FIXADDR_USER_START;
5492 gate_vma.vm_end = FIXADDR_USER_END;
5493 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5494- gate_vma.vm_page_prot = __P101;
5495+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5496
5497 return 0;
5498 }
5499diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5500index 40b3ee98..8c2c112 100644
5501--- a/arch/m32r/include/asm/cache.h
5502+++ b/arch/m32r/include/asm/cache.h
5503@@ -1,8 +1,10 @@
5504 #ifndef _ASM_M32R_CACHE_H
5505 #define _ASM_M32R_CACHE_H
5506
5507+#include <linux/const.h>
5508+
5509 /* L1 cache line size */
5510 #define L1_CACHE_SHIFT 4
5511-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5512+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5513
5514 #endif /* _ASM_M32R_CACHE_H */
5515diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5516index 82abd15..d95ae5d 100644
5517--- a/arch/m32r/lib/usercopy.c
5518+++ b/arch/m32r/lib/usercopy.c
5519@@ -14,6 +14,9 @@
5520 unsigned long
5521 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5522 {
5523+ if ((long)n < 0)
5524+ return n;
5525+
5526 prefetch(from);
5527 if (access_ok(VERIFY_WRITE, to, n))
5528 __copy_user(to,from,n);
5529@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5530 unsigned long
5531 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5532 {
5533+ if ((long)n < 0)
5534+ return n;
5535+
5536 prefetchw(to);
5537 if (access_ok(VERIFY_READ, from, n))
5538 __copy_user_zeroing(to,from,n);
5539diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5540index 0395c51..5f26031 100644
5541--- a/arch/m68k/include/asm/cache.h
5542+++ b/arch/m68k/include/asm/cache.h
5543@@ -4,9 +4,11 @@
5544 #ifndef __ARCH_M68K_CACHE_H
5545 #define __ARCH_M68K_CACHE_H
5546
5547+#include <linux/const.h>
5548+
5549 /* bytes per L1 cache line */
5550 #define L1_CACHE_SHIFT 4
5551-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5552+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5553
5554 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5555
5556diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5557index d703d8e..a8e2d70 100644
5558--- a/arch/metag/include/asm/barrier.h
5559+++ b/arch/metag/include/asm/barrier.h
5560@@ -90,7 +90,7 @@ static inline void fence(void)
5561 do { \
5562 compiletime_assert_atomic_type(*p); \
5563 smp_mb(); \
5564- ACCESS_ONCE(*p) = (v); \
5565+ ACCESS_ONCE_RW(*p) = (v); \
5566 } while (0)
5567
5568 #define smp_load_acquire(p) \
5569diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5570index 3c32075..ae0ae75 100644
5571--- a/arch/metag/mm/hugetlbpage.c
5572+++ b/arch/metag/mm/hugetlbpage.c
5573@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5574 info.high_limit = TASK_SIZE;
5575 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5576 info.align_offset = 0;
5577+ info.threadstack_offset = 0;
5578 return vm_unmapped_area(&info);
5579 }
5580
5581diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5582index 4efe96a..60e8699 100644
5583--- a/arch/microblaze/include/asm/cache.h
5584+++ b/arch/microblaze/include/asm/cache.h
5585@@ -13,11 +13,12 @@
5586 #ifndef _ASM_MICROBLAZE_CACHE_H
5587 #define _ASM_MICROBLAZE_CACHE_H
5588
5589+#include <linux/const.h>
5590 #include <asm/registers.h>
5591
5592 #define L1_CACHE_SHIFT 5
5593 /* word-granular cache in microblaze */
5594-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5595+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5596
5597 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5598
5599diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5600index 843713c..b6a87b9 100644
5601--- a/arch/mips/Kconfig
5602+++ b/arch/mips/Kconfig
5603@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
5604
5605 config KEXEC
5606 bool "Kexec system call"
5607+ depends on !GRKERNSEC_KMEM
5608 help
5609 kexec is a system call that implements the ability to shutdown your
5610 current kernel, and to start another kernel. It is like a reboot
5611diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5612index 3778655..1dff0a9 100644
5613--- a/arch/mips/cavium-octeon/dma-octeon.c
5614+++ b/arch/mips/cavium-octeon/dma-octeon.c
5615@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5616 if (dma_release_from_coherent(dev, order, vaddr))
5617 return;
5618
5619- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5620+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5621 }
5622
5623 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5624diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5625index 857da84..3f4458b 100644
5626--- a/arch/mips/include/asm/atomic.h
5627+++ b/arch/mips/include/asm/atomic.h
5628@@ -22,15 +22,39 @@
5629 #include <asm/cmpxchg.h>
5630 #include <asm/war.h>
5631
5632+#ifdef CONFIG_GENERIC_ATOMIC64
5633+#include <asm-generic/atomic64.h>
5634+#endif
5635+
5636 #define ATOMIC_INIT(i) { (i) }
5637
5638+#ifdef CONFIG_64BIT
5639+#define _ASM_EXTABLE(from, to) \
5640+" .section __ex_table,\"a\"\n" \
5641+" .dword " #from ", " #to"\n" \
5642+" .previous\n"
5643+#else
5644+#define _ASM_EXTABLE(from, to) \
5645+" .section __ex_table,\"a\"\n" \
5646+" .word " #from ", " #to"\n" \
5647+" .previous\n"
5648+#endif
5649+
5650 /*
5651 * atomic_read - read atomic variable
5652 * @v: pointer of type atomic_t
5653 *
5654 * Atomically reads the value of @v.
5655 */
5656-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5657+static inline int atomic_read(const atomic_t *v)
5658+{
5659+ return ACCESS_ONCE(v->counter);
5660+}
5661+
5662+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5663+{
5664+ return ACCESS_ONCE(v->counter);
5665+}
5666
5667 /*
5668 * atomic_set - set atomic variable
5669@@ -39,47 +63,77 @@
5670 *
5671 * Atomically sets the value of @v to @i.
5672 */
5673-#define atomic_set(v, i) ((v)->counter = (i))
5674+static inline void atomic_set(atomic_t *v, int i)
5675+{
5676+ v->counter = i;
5677+}
5678
5679-#define ATOMIC_OP(op, c_op, asm_op) \
5680-static __inline__ void atomic_##op(int i, atomic_t * v) \
5681+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5682+{
5683+ v->counter = i;
5684+}
5685+
5686+#ifdef CONFIG_PAX_REFCOUNT
5687+#define __OVERFLOW_POST \
5688+ " b 4f \n" \
5689+ " .set noreorder \n" \
5690+ "3: b 5f \n" \
5691+ " move %0, %1 \n" \
5692+ " .set reorder \n"
5693+#define __OVERFLOW_EXTABLE \
5694+ "3:\n" \
5695+ _ASM_EXTABLE(2b, 3b)
5696+#else
5697+#define __OVERFLOW_POST
5698+#define __OVERFLOW_EXTABLE
5699+#endif
5700+
5701+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5702+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5703 { \
5704 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5705 int temp; \
5706 \
5707 __asm__ __volatile__( \
5708- " .set arch=r4000 \n" \
5709- "1: ll %0, %1 # atomic_" #op " \n" \
5710- " " #asm_op " %0, %2 \n" \
5711+ " .set mips3 \n" \
5712+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5713+ "2: " #asm_op " %0, %2 \n" \
5714 " sc %0, %1 \n" \
5715 " beqzl %0, 1b \n" \
5716+ extable \
5717 " .set mips0 \n" \
5718 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5719 : "Ir" (i)); \
5720 } else if (kernel_uses_llsc) { \
5721 int temp; \
5722 \
5723- do { \
5724- __asm__ __volatile__( \
5725- " .set arch=r4000 \n" \
5726- " ll %0, %1 # atomic_" #op "\n" \
5727- " " #asm_op " %0, %2 \n" \
5728- " sc %0, %1 \n" \
5729- " .set mips0 \n" \
5730- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5731- : "Ir" (i)); \
5732- } while (unlikely(!temp)); \
5733+ __asm__ __volatile__( \
5734+ " .set mips3 \n" \
5735+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5736+ "2: " #asm_op " %0, %2 \n" \
5737+ " sc %0, %1 \n" \
5738+ " beqz %0, 1b \n" \
5739+ extable \
5740+ " .set mips0 \n" \
5741+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5742+ : "Ir" (i)); \
5743 } else { \
5744 unsigned long flags; \
5745 \
5746 raw_local_irq_save(flags); \
5747- v->counter c_op i; \
5748+ __asm__ __volatile__( \
5749+ "2: " #asm_op " %0, %1 \n" \
5750+ extable \
5751+ : "+r" (v->counter) : "Ir" (i)); \
5752 raw_local_irq_restore(flags); \
5753 } \
5754 }
5755
5756-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5757-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5758+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u) \
5759+ __ATOMIC_OP(op, _unchecked, asm_op)
5760+
5761+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5762+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5763 { \
5764 int result; \
5765 \
5766@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5767 int temp; \
5768 \
5769 __asm__ __volatile__( \
5770- " .set arch=r4000 \n" \
5771- "1: ll %1, %2 # atomic_" #op "_return \n" \
5772- " " #asm_op " %0, %1, %3 \n" \
5773+ " .set mips3 \n" \
5774+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5775+ "2: " #asm_op " %0, %1, %3 \n" \
5776 " sc %0, %2 \n" \
5777 " beqzl %0, 1b \n" \
5778- " " #asm_op " %0, %1, %3 \n" \
5779+ post_op \
5780+ extable \
5781+ "4: " #asm_op " %0, %1, %3 \n" \
5782+ "5: \n" \
5783 " .set mips0 \n" \
5784 : "=&r" (result), "=&r" (temp), \
5785 "+" GCC_OFF12_ASM() (v->counter) \
5786@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5787 } else if (kernel_uses_llsc) { \
5788 int temp; \
5789 \
5790- do { \
5791- __asm__ __volatile__( \
5792- " .set arch=r4000 \n" \
5793- " ll %1, %2 # atomic_" #op "_return \n" \
5794- " " #asm_op " %0, %1, %3 \n" \
5795- " sc %0, %2 \n" \
5796- " .set mips0 \n" \
5797- : "=&r" (result), "=&r" (temp), \
5798- "+" GCC_OFF12_ASM() (v->counter) \
5799- : "Ir" (i)); \
5800- } while (unlikely(!result)); \
5801+ __asm__ __volatile__( \
5802+ " .set mips3 \n" \
5803+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5804+ "2: " #asm_op " %0, %1, %3 \n" \
5805+ " sc %0, %2 \n" \
5806+ post_op \
5807+ extable \
5808+ "4: " #asm_op " %0, %1, %3 \n" \
5809+ "5: \n" \
5810+ " .set mips0 \n" \
5811+ : "=&r" (result), "=&r" (temp), \
5812+ "+" GCC_OFF12_ASM() (v->counter) \
5813+ : "Ir" (i)); \
5814 \
5815 result = temp; result c_op i; \
5816 } else { \
5817 unsigned long flags; \
5818 \
5819 raw_local_irq_save(flags); \
5820- result = v->counter; \
5821- result c_op i; \
5822- v->counter = result; \
5823+ __asm__ __volatile__( \
5824+ " lw %0, %1 \n" \
5825+ "2: " #asm_op " %0, %1, %2 \n" \
5826+ " sw %0, %1 \n" \
5827+ "3: \n" \
5828+ extable \
5829+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
5830+ : "Ir" (i)); \
5831 raw_local_irq_restore(flags); \
5832 } \
5833 \
5834@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5835 return result; \
5836 }
5837
5838-#define ATOMIC_OPS(op, c_op, asm_op) \
5839- ATOMIC_OP(op, c_op, asm_op) \
5840- ATOMIC_OP_RETURN(op, c_op, asm_op)
5841+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
5842+ __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5843
5844-ATOMIC_OPS(add, +=, addu)
5845-ATOMIC_OPS(sub, -=, subu)
5846+#define ATOMIC_OPS(op, asm_op) \
5847+ ATOMIC_OP(op, asm_op) \
5848+ ATOMIC_OP_RETURN(op, asm_op)
5849+
5850+ATOMIC_OPS(add, add)
5851+ATOMIC_OPS(sub, sub)
5852
5853 #undef ATOMIC_OPS
5854 #undef ATOMIC_OP_RETURN
5855+#undef __ATOMIC_OP_RETURN
5856 #undef ATOMIC_OP
5857+#undef __ATOMIC_OP
5858
5859 /*
5860 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5861@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5862 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5863 * The function returns the old value of @v minus @i.
5864 */
5865-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5866+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5867 {
5868 int result;
5869
5870@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5871 return result;
5872 }
5873
5874-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5875-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5876+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5877+{
5878+ return cmpxchg(&v->counter, old, new);
5879+}
5880+
5881+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5882+ int new)
5883+{
5884+ return cmpxchg(&(v->counter), old, new);
5885+}
5886+
5887+static inline int atomic_xchg(atomic_t *v, int new)
5888+{
5889+ return xchg(&v->counter, new);
5890+}
5891+
5892+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5893+{
5894+ return xchg(&(v->counter), new);
5895+}
5896
5897 /**
5898 * __atomic_add_unless - add unless the number is a given value
5899@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5900
5901 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5902 #define atomic_inc_return(v) atomic_add_return(1, (v))
5903+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5904+{
5905+ return atomic_add_return_unchecked(1, v);
5906+}
5907
5908 /*
5909 * atomic_sub_and_test - subtract value from variable and test result
5910@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5911 * other cases.
5912 */
5913 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5914+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5915+{
5916+ return atomic_add_return_unchecked(1, v) == 0;
5917+}
5918
5919 /*
5920 * atomic_dec_and_test - decrement by 1 and test
5921@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5922 * Atomically increments @v by 1.
5923 */
5924 #define atomic_inc(v) atomic_add(1, (v))
5925+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5926+{
5927+ atomic_add_unchecked(1, v);
5928+}
5929
5930 /*
5931 * atomic_dec - decrement and test
5932@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5933 * Atomically decrements @v by 1.
5934 */
5935 #define atomic_dec(v) atomic_sub(1, (v))
5936+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
5937+{
5938+ atomic_sub_unchecked(1, v);
5939+}
5940
5941 /*
5942 * atomic_add_negative - add and test if negative
5943@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5944 * @v: pointer of type atomic64_t
5945 *
5946 */
5947-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
5948+static inline long atomic64_read(const atomic64_t *v)
5949+{
5950+ return ACCESS_ONCE(v->counter);
5951+}
5952+
5953+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5954+{
5955+ return ACCESS_ONCE(v->counter);
5956+}
5957
5958 /*
5959 * atomic64_set - set atomic variable
5960 * @v: pointer of type atomic64_t
5961 * @i: required value
5962 */
5963-#define atomic64_set(v, i) ((v)->counter = (i))
5964+static inline void atomic64_set(atomic64_t *v, long i)
5965+{
5966+ v->counter = i;
5967+}
5968
5969-#define ATOMIC64_OP(op, c_op, asm_op) \
5970-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
5971+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5972+{
5973+ v->counter = i;
5974+}
5975+
5976+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
5977+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
5978 { \
5979 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5980 long temp; \
5981 \
5982 __asm__ __volatile__( \
5983- " .set arch=r4000 \n" \
5984- "1: lld %0, %1 # atomic64_" #op " \n" \
5985- " " #asm_op " %0, %2 \n" \
5986+ " .set mips3 \n" \
5987+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
5988+ "2: " #asm_op " %0, %2 \n" \
5989 " scd %0, %1 \n" \
5990 " beqzl %0, 1b \n" \
5991+ extable \
5992 " .set mips0 \n" \
5993 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5994 : "Ir" (i)); \
5995 } else if (kernel_uses_llsc) { \
5996 long temp; \
5997 \
5998- do { \
5999- __asm__ __volatile__( \
6000- " .set arch=r4000 \n" \
6001- " lld %0, %1 # atomic64_" #op "\n" \
6002- " " #asm_op " %0, %2 \n" \
6003- " scd %0, %1 \n" \
6004- " .set mips0 \n" \
6005- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6006- : "Ir" (i)); \
6007- } while (unlikely(!temp)); \
6008+ __asm__ __volatile__( \
6009+ " .set mips3 \n" \
6010+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6011+ "2: " #asm_op " %0, %2 \n" \
6012+ " scd %0, %1 \n" \
6013+ " beqz %0, 1b \n" \
6014+ extable \
6015+ " .set mips0 \n" \
6016+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6017+ : "Ir" (i)); \
6018 } else { \
6019 unsigned long flags; \
6020 \
6021 raw_local_irq_save(flags); \
6022- v->counter c_op i; \
6023+ __asm__ __volatile__( \
6024+ "2: " #asm_op " %0, %1 \n" \
6025+ extable \
6026+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
6027 raw_local_irq_restore(flags); \
6028 } \
6029 }
6030
6031-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6032-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6033+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u) \
6034+ __ATOMIC64_OP(op, _unchecked, asm_op)
6035+
6036+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6037+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6038 { \
6039 long result; \
6040 \
6041@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6042 long temp; \
6043 \
6044 __asm__ __volatile__( \
6045- " .set arch=r4000 \n" \
6046+ " .set mips3 \n" \
6047 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6048- " " #asm_op " %0, %1, %3 \n" \
6049+ "2: " #asm_op " %0, %1, %3 \n" \
6050 " scd %0, %2 \n" \
6051 " beqzl %0, 1b \n" \
6052- " " #asm_op " %0, %1, %3 \n" \
6053+ post_op \
6054+ extable \
6055+ "4: " #asm_op " %0, %1, %3 \n" \
6056+ "5: \n" \
6057 " .set mips0 \n" \
6058 : "=&r" (result), "=&r" (temp), \
6059 "+" GCC_OFF12_ASM() (v->counter) \
6060@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6061 } else if (kernel_uses_llsc) { \
6062 long temp; \
6063 \
6064- do { \
6065- __asm__ __volatile__( \
6066- " .set arch=r4000 \n" \
6067- " lld %1, %2 # atomic64_" #op "_return\n" \
6068- " " #asm_op " %0, %1, %3 \n" \
6069- " scd %0, %2 \n" \
6070- " .set mips0 \n" \
6071- : "=&r" (result), "=&r" (temp), \
6072- "=" GCC_OFF12_ASM() (v->counter) \
6073- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6074- : "memory"); \
6075- } while (unlikely(!result)); \
6076+ __asm__ __volatile__( \
6077+ " .set mips3 \n" \
6078+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6079+ "2: " #asm_op " %0, %1, %3 \n" \
6080+ " scd %0, %2 \n" \
6081+ " beqz %0, 1b \n" \
6082+ post_op \
6083+ extable \
6084+ "4: " #asm_op " %0, %1, %3 \n" \
6085+ "5: \n" \
6086+ " .set mips0 \n" \
6087+ : "=&r" (result), "=&r" (temp), \
6088+ "=" GCC_OFF12_ASM() (v->counter) \
6089+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6090+ : "memory"); \
6091 \
6092 result = temp; result c_op i; \
6093 } else { \
6094 unsigned long flags; \
6095 \
6096 raw_local_irq_save(flags); \
6097- result = v->counter; \
6098- result c_op i; \
6099- v->counter = result; \
6100+ __asm__ __volatile__( \
6101+ " ld %0, %1 \n" \
6102+ "2: " #asm_op " %0, %1, %2 \n" \
6103+ " sd %0, %1 \n" \
6104+ "3: \n" \
6105+ extable \
6106+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
6107+ : "Ir" (i)); \
6108 raw_local_irq_restore(flags); \
6109 } \
6110 \
6111@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6112 return result; \
6113 }
6114
6115-#define ATOMIC64_OPS(op, c_op, asm_op) \
6116- ATOMIC64_OP(op, c_op, asm_op) \
6117- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6118+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
6119+ __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6120
6121-ATOMIC64_OPS(add, +=, daddu)
6122-ATOMIC64_OPS(sub, -=, dsubu)
6123+#define ATOMIC64_OPS(op, asm_op) \
6124+ ATOMIC64_OP(op, asm_op) \
6125+ ATOMIC64_OP_RETURN(op, asm_op)
6126+
6127+ATOMIC64_OPS(add, dadd)
6128+ATOMIC64_OPS(sub, dsub)
6129
6130 #undef ATOMIC64_OPS
6131 #undef ATOMIC64_OP_RETURN
6132+#undef __ATOMIC64_OP_RETURN
6133 #undef ATOMIC64_OP
6134+#undef __ATOMIC64_OP
6135+#undef __OVERFLOW_EXTABLE
6136+#undef __OVERFLOW_POST
6137
6138 /*
6139 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6140@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6141 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6142 * The function returns the old value of @v minus @i.
6143 */
6144-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6145+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6146 {
6147 long result;
6148
6149@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6150 return result;
6151 }
6152
6153-#define atomic64_cmpxchg(v, o, n) \
6154- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6155-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6156+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6157+{
6158+ return cmpxchg(&v->counter, old, new);
6159+}
6160+
6161+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6162+ long new)
6163+{
6164+ return cmpxchg(&(v->counter), old, new);
6165+}
6166+
6167+static inline long atomic64_xchg(atomic64_t *v, long new)
6168+{
6169+ return xchg(&v->counter, new);
6170+}
6171+
6172+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6173+{
6174+ return xchg(&(v->counter), new);
6175+}
6176
6177 /**
6178 * atomic64_add_unless - add unless the number is a given value
6179@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6180
6181 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6182 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6183+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6184
6185 /*
6186 * atomic64_sub_and_test - subtract value from variable and test result
6187@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6188 * other cases.
6189 */
6190 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6191+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6192
6193 /*
6194 * atomic64_dec_and_test - decrement by 1 and test
6195@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6196 * Atomically increments @v by 1.
6197 */
6198 #define atomic64_inc(v) atomic64_add(1, (v))
6199+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6200
6201 /*
6202 * atomic64_dec - decrement and test
6203@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6204 * Atomically decrements @v by 1.
6205 */
6206 #define atomic64_dec(v) atomic64_sub(1, (v))
6207+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6208
6209 /*
6210 * atomic64_add_negative - add and test if negative
6211diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6212index 2b8bbbc..4556df6 100644
6213--- a/arch/mips/include/asm/barrier.h
6214+++ b/arch/mips/include/asm/barrier.h
6215@@ -133,7 +133,7 @@
6216 do { \
6217 compiletime_assert_atomic_type(*p); \
6218 smp_mb(); \
6219- ACCESS_ONCE(*p) = (v); \
6220+ ACCESS_ONCE_RW(*p) = (v); \
6221 } while (0)
6222
6223 #define smp_load_acquire(p) \
6224diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6225index b4db69f..8f3b093 100644
6226--- a/arch/mips/include/asm/cache.h
6227+++ b/arch/mips/include/asm/cache.h
6228@@ -9,10 +9,11 @@
6229 #ifndef _ASM_CACHE_H
6230 #define _ASM_CACHE_H
6231
6232+#include <linux/const.h>
6233 #include <kmalloc.h>
6234
6235 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6236-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6237+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6238
6239 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6240 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6241diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6242index eb4d95d..f2f7f93 100644
6243--- a/arch/mips/include/asm/elf.h
6244+++ b/arch/mips/include/asm/elf.h
6245@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6246 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6247 #endif
6248
6249+#ifdef CONFIG_PAX_ASLR
6250+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6251+
6252+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6253+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6254+#endif
6255+
6256 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6257 struct linux_binprm;
6258 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6259 int uses_interp);
6260
6261-struct mm_struct;
6262-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6263-#define arch_randomize_brk arch_randomize_brk
6264-
6265 struct arch_elf_state {
6266 int fp_abi;
6267 int interp_fp_abi;
6268diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6269index c1f6afa..38cc6e9 100644
6270--- a/arch/mips/include/asm/exec.h
6271+++ b/arch/mips/include/asm/exec.h
6272@@ -12,6 +12,6 @@
6273 #ifndef _ASM_EXEC_H
6274 #define _ASM_EXEC_H
6275
6276-extern unsigned long arch_align_stack(unsigned long sp);
6277+#define arch_align_stack(x) ((x) & ~0xfUL)
6278
6279 #endif /* _ASM_EXEC_H */
6280diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6281index 9e8ef59..1139d6b 100644
6282--- a/arch/mips/include/asm/hw_irq.h
6283+++ b/arch/mips/include/asm/hw_irq.h
6284@@ -10,7 +10,7 @@
6285
6286 #include <linux/atomic.h>
6287
6288-extern atomic_t irq_err_count;
6289+extern atomic_unchecked_t irq_err_count;
6290
6291 /*
6292 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6293diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6294index 46dfc3c..a16b13a 100644
6295--- a/arch/mips/include/asm/local.h
6296+++ b/arch/mips/include/asm/local.h
6297@@ -12,15 +12,25 @@ typedef struct
6298 atomic_long_t a;
6299 } local_t;
6300
6301+typedef struct {
6302+ atomic_long_unchecked_t a;
6303+} local_unchecked_t;
6304+
6305 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6306
6307 #define local_read(l) atomic_long_read(&(l)->a)
6308+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6309 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6310+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6311
6312 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6313+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6314 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6315+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6316 #define local_inc(l) atomic_long_inc(&(l)->a)
6317+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6318 #define local_dec(l) atomic_long_dec(&(l)->a)
6319+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6320
6321 /*
6322 * Same as above, but return the result value
6323@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6324 return result;
6325 }
6326
6327+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6328+{
6329+ unsigned long result;
6330+
6331+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6332+ unsigned long temp;
6333+
6334+ __asm__ __volatile__(
6335+ " .set mips3 \n"
6336+ "1:" __LL "%1, %2 # local_add_return \n"
6337+ " addu %0, %1, %3 \n"
6338+ __SC "%0, %2 \n"
6339+ " beqzl %0, 1b \n"
6340+ " addu %0, %1, %3 \n"
6341+ " .set mips0 \n"
6342+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6343+ : "Ir" (i), "m" (l->a.counter)
6344+ : "memory");
6345+ } else if (kernel_uses_llsc) {
6346+ unsigned long temp;
6347+
6348+ __asm__ __volatile__(
6349+ " .set mips3 \n"
6350+ "1:" __LL "%1, %2 # local_add_return \n"
6351+ " addu %0, %1, %3 \n"
6352+ __SC "%0, %2 \n"
6353+ " beqz %0, 1b \n"
6354+ " addu %0, %1, %3 \n"
6355+ " .set mips0 \n"
6356+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6357+ : "Ir" (i), "m" (l->a.counter)
6358+ : "memory");
6359+ } else {
6360+ unsigned long flags;
6361+
6362+ local_irq_save(flags);
6363+ result = l->a.counter;
6364+ result += i;
6365+ l->a.counter = result;
6366+ local_irq_restore(flags);
6367+ }
6368+
6369+ return result;
6370+}
6371+
6372 static __inline__ long local_sub_return(long i, local_t * l)
6373 {
6374 unsigned long result;
6375@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6376
6377 #define local_cmpxchg(l, o, n) \
6378 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6379+#define local_cmpxchg_unchecked(l, o, n) \
6380+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6381 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6382
6383 /**
6384diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6385index 154b70a..426ae3d 100644
6386--- a/arch/mips/include/asm/page.h
6387+++ b/arch/mips/include/asm/page.h
6388@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6389 #ifdef CONFIG_CPU_MIPS32
6390 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6391 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6392- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6393+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6394 #else
6395 typedef struct { unsigned long long pte; } pte_t;
6396 #define pte_val(x) ((x).pte)
6397diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6398index b336037..5b874cc 100644
6399--- a/arch/mips/include/asm/pgalloc.h
6400+++ b/arch/mips/include/asm/pgalloc.h
6401@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6402 {
6403 set_pud(pud, __pud((unsigned long)pmd));
6404 }
6405+
6406+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6407+{
6408+ pud_populate(mm, pud, pmd);
6409+}
6410 #endif
6411
6412 /*
6413diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6414index 845016d..3303268 100644
6415--- a/arch/mips/include/asm/pgtable.h
6416+++ b/arch/mips/include/asm/pgtable.h
6417@@ -20,6 +20,9 @@
6418 #include <asm/io.h>
6419 #include <asm/pgtable-bits.h>
6420
6421+#define ktla_ktva(addr) (addr)
6422+#define ktva_ktla(addr) (addr)
6423+
6424 struct mm_struct;
6425 struct vm_area_struct;
6426
6427diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6428index e4440f9..8fb0005 100644
6429--- a/arch/mips/include/asm/thread_info.h
6430+++ b/arch/mips/include/asm/thread_info.h
6431@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6432 #define TIF_SECCOMP 4 /* secure computing */
6433 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6434 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6435+/* li takes a 32bit immediate */
6436+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6437+
6438 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6439 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6440 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6441@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
6442 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6443 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6444 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6445+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6446
6447 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6448 _TIF_SYSCALL_AUDIT | \
6449- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6450+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6451+ _TIF_GRSEC_SETXID)
6452
6453 /* work to do in syscall_trace_leave() */
6454 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6455- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6456+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6457
6458 /* work to do on interrupt/exception return */
6459 #define _TIF_WORK_MASK \
6460@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
6461 /* work to do on any return to u-space */
6462 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6463 _TIF_WORK_SYSCALL_EXIT | \
6464- _TIF_SYSCALL_TRACEPOINT)
6465+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6466
6467 /*
6468 * We stash processor id into a COP0 register to retrieve it fast
6469diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6470index bf8b324..cec5705 100644
6471--- a/arch/mips/include/asm/uaccess.h
6472+++ b/arch/mips/include/asm/uaccess.h
6473@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6474 __ok == 0; \
6475 })
6476
6477+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6478 #define access_ok(type, addr, size) \
6479 likely(__access_ok((addr), (size), __access_mask))
6480
6481diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6482index 1188e00..41cf144 100644
6483--- a/arch/mips/kernel/binfmt_elfn32.c
6484+++ b/arch/mips/kernel/binfmt_elfn32.c
6485@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6486 #undef ELF_ET_DYN_BASE
6487 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6488
6489+#ifdef CONFIG_PAX_ASLR
6490+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6491+
6492+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6493+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6494+#endif
6495+
6496 #include <asm/processor.h>
6497 #include <linux/module.h>
6498 #include <linux/elfcore.h>
6499diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6500index 9287678..f870e47 100644
6501--- a/arch/mips/kernel/binfmt_elfo32.c
6502+++ b/arch/mips/kernel/binfmt_elfo32.c
6503@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6504 #undef ELF_ET_DYN_BASE
6505 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6506
6507+#ifdef CONFIG_PAX_ASLR
6508+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6509+
6510+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6511+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6512+#endif
6513+
6514 #include <asm/processor.h>
6515
6516 #include <linux/module.h>
6517diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6518index a74ec3a..4f06f18 100644
6519--- a/arch/mips/kernel/i8259.c
6520+++ b/arch/mips/kernel/i8259.c
6521@@ -202,7 +202,7 @@ spurious_8259A_irq:
6522 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6523 spurious_irq_mask |= irqmask;
6524 }
6525- atomic_inc(&irq_err_count);
6526+ atomic_inc_unchecked(&irq_err_count);
6527 /*
6528 * Theoretically we do not have to handle this IRQ,
6529 * but in Linux this does not cause problems and is
6530diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6531index 44a1f79..2bd6aa3 100644
6532--- a/arch/mips/kernel/irq-gt641xx.c
6533+++ b/arch/mips/kernel/irq-gt641xx.c
6534@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6535 }
6536 }
6537
6538- atomic_inc(&irq_err_count);
6539+ atomic_inc_unchecked(&irq_err_count);
6540 }
6541
6542 void __init gt641xx_irq_init(void)
6543diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6544index d2bfbc2..a8eacd2 100644
6545--- a/arch/mips/kernel/irq.c
6546+++ b/arch/mips/kernel/irq.c
6547@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6548 printk("unexpected IRQ # %d\n", irq);
6549 }
6550
6551-atomic_t irq_err_count;
6552+atomic_unchecked_t irq_err_count;
6553
6554 int arch_show_interrupts(struct seq_file *p, int prec)
6555 {
6556- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6557+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6558 return 0;
6559 }
6560
6561 asmlinkage void spurious_interrupt(void)
6562 {
6563- atomic_inc(&irq_err_count);
6564+ atomic_inc_unchecked(&irq_err_count);
6565 }
6566
6567 void __init init_IRQ(void)
6568@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6569 #endif
6570 }
6571
6572+
6573 #ifdef DEBUG_STACKOVERFLOW
6574+extern void gr_handle_kernel_exploit(void);
6575+
6576 static inline void check_stack_overflow(void)
6577 {
6578 unsigned long sp;
6579@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6580 printk("do_IRQ: stack overflow: %ld\n",
6581 sp - sizeof(struct thread_info));
6582 dump_stack();
6583+ gr_handle_kernel_exploit();
6584 }
6585 }
6586 #else
6587diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6588index 0614717..002fa43 100644
6589--- a/arch/mips/kernel/pm-cps.c
6590+++ b/arch/mips/kernel/pm-cps.c
6591@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6592 nc_core_ready_count = nc_addr;
6593
6594 /* Ensure ready_count is zero-initialised before the assembly runs */
6595- ACCESS_ONCE(*nc_core_ready_count) = 0;
6596+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6597 coupled_barrier(&per_cpu(pm_barrier, core), online);
6598
6599 /* Run the generated entry code */
6600diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6601index 85bff5d..39bc202 100644
6602--- a/arch/mips/kernel/process.c
6603+++ b/arch/mips/kernel/process.c
6604@@ -534,18 +534,6 @@ out:
6605 return pc;
6606 }
6607
6608-/*
6609- * Don't forget that the stack pointer must be aligned on a 8 bytes
6610- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6611- */
6612-unsigned long arch_align_stack(unsigned long sp)
6613-{
6614- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6615- sp -= get_random_int() & ~PAGE_MASK;
6616-
6617- return sp & ALMASK;
6618-}
6619-
6620 static void arch_dump_stack(void *info)
6621 {
6622 struct pt_regs *regs;
6623diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6624index 5104528..950bbdc 100644
6625--- a/arch/mips/kernel/ptrace.c
6626+++ b/arch/mips/kernel/ptrace.c
6627@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6628 return ret;
6629 }
6630
6631+#ifdef CONFIG_GRKERNSEC_SETXID
6632+extern void gr_delayed_cred_worker(void);
6633+#endif
6634+
6635 /*
6636 * Notification of system call entry/exit
6637 * - triggered by current->work.syscall_trace
6638@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6639 tracehook_report_syscall_entry(regs))
6640 ret = -1;
6641
6642+#ifdef CONFIG_GRKERNSEC_SETXID
6643+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6644+ gr_delayed_cred_worker();
6645+#endif
6646+
6647 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6648 trace_sys_enter(regs, regs->regs[2]);
6649
6650diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6651index 07fc524..b9d7f28 100644
6652--- a/arch/mips/kernel/reset.c
6653+++ b/arch/mips/kernel/reset.c
6654@@ -13,6 +13,7 @@
6655 #include <linux/reboot.h>
6656
6657 #include <asm/reboot.h>
6658+#include <asm/bug.h>
6659
6660 /*
6661 * Urgs ... Too many MIPS machines to handle this in a generic way.
6662@@ -29,16 +30,19 @@ void machine_restart(char *command)
6663 {
6664 if (_machine_restart)
6665 _machine_restart(command);
6666+ BUG();
6667 }
6668
6669 void machine_halt(void)
6670 {
6671 if (_machine_halt)
6672 _machine_halt();
6673+ BUG();
6674 }
6675
6676 void machine_power_off(void)
6677 {
6678 if (pm_power_off)
6679 pm_power_off();
6680+ BUG();
6681 }
6682diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6683index 2242bdd..b284048 100644
6684--- a/arch/mips/kernel/sync-r4k.c
6685+++ b/arch/mips/kernel/sync-r4k.c
6686@@ -18,8 +18,8 @@
6687 #include <asm/mipsregs.h>
6688
6689 static atomic_t count_start_flag = ATOMIC_INIT(0);
6690-static atomic_t count_count_start = ATOMIC_INIT(0);
6691-static atomic_t count_count_stop = ATOMIC_INIT(0);
6692+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6693+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6694 static atomic_t count_reference = ATOMIC_INIT(0);
6695
6696 #define COUNTON 100
6697@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6698
6699 for (i = 0; i < NR_LOOPS; i++) {
6700 /* slaves loop on '!= 2' */
6701- while (atomic_read(&count_count_start) != 1)
6702+ while (atomic_read_unchecked(&count_count_start) != 1)
6703 mb();
6704- atomic_set(&count_count_stop, 0);
6705+ atomic_set_unchecked(&count_count_stop, 0);
6706 smp_wmb();
6707
6708 /* this lets the slaves write their count register */
6709- atomic_inc(&count_count_start);
6710+ atomic_inc_unchecked(&count_count_start);
6711
6712 /*
6713 * Everyone initialises count in the last loop:
6714@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6715 /*
6716 * Wait for all slaves to leave the synchronization point:
6717 */
6718- while (atomic_read(&count_count_stop) != 1)
6719+ while (atomic_read_unchecked(&count_count_stop) != 1)
6720 mb();
6721- atomic_set(&count_count_start, 0);
6722+ atomic_set_unchecked(&count_count_start, 0);
6723 smp_wmb();
6724- atomic_inc(&count_count_stop);
6725+ atomic_inc_unchecked(&count_count_stop);
6726 }
6727 /* Arrange for an interrupt in a short while */
6728 write_c0_compare(read_c0_count() + COUNTON);
6729@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6730 initcount = atomic_read(&count_reference);
6731
6732 for (i = 0; i < NR_LOOPS; i++) {
6733- atomic_inc(&count_count_start);
6734- while (atomic_read(&count_count_start) != 2)
6735+ atomic_inc_unchecked(&count_count_start);
6736+ while (atomic_read_unchecked(&count_count_start) != 2)
6737 mb();
6738
6739 /*
6740@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6741 if (i == NR_LOOPS-1)
6742 write_c0_count(initcount);
6743
6744- atomic_inc(&count_count_stop);
6745- while (atomic_read(&count_count_stop) != 2)
6746+ atomic_inc_unchecked(&count_count_stop);
6747+ while (atomic_read_unchecked(&count_count_stop) != 2)
6748 mb();
6749 }
6750 /* Arrange for an interrupt in a short while */
6751diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6752index c3b41e2..46c32e9 100644
6753--- a/arch/mips/kernel/traps.c
6754+++ b/arch/mips/kernel/traps.c
6755@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6756 siginfo_t info;
6757
6758 prev_state = exception_enter();
6759- die_if_kernel("Integer overflow", regs);
6760+ if (unlikely(!user_mode(regs))) {
6761+
6762+#ifdef CONFIG_PAX_REFCOUNT
6763+ if (fixup_exception(regs)) {
6764+ pax_report_refcount_overflow(regs);
6765+ exception_exit(prev_state);
6766+ return;
6767+ }
6768+#endif
6769+
6770+ die("Integer overflow", regs);
6771+ }
6772
6773 info.si_code = FPE_INTOVF;
6774 info.si_signo = SIGFPE;
6775diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6776index 270bbd4..c01932a 100644
6777--- a/arch/mips/kvm/mips.c
6778+++ b/arch/mips/kvm/mips.c
6779@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6780 return r;
6781 }
6782
6783-int kvm_arch_init(void *opaque)
6784+int kvm_arch_init(const void *opaque)
6785 {
6786 if (kvm_mips_callbacks) {
6787 kvm_err("kvm: module already exists\n");
6788diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6789index 70ab5d6..62940fe 100644
6790--- a/arch/mips/mm/fault.c
6791+++ b/arch/mips/mm/fault.c
6792@@ -28,6 +28,23 @@
6793 #include <asm/highmem.h> /* For VMALLOC_END */
6794 #include <linux/kdebug.h>
6795
6796+#ifdef CONFIG_PAX_PAGEEXEC
6797+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6798+{
6799+ unsigned long i;
6800+
6801+ printk(KERN_ERR "PAX: bytes at PC: ");
6802+ for (i = 0; i < 5; i++) {
6803+ unsigned int c;
6804+ if (get_user(c, (unsigned int *)pc+i))
6805+ printk(KERN_CONT "???????? ");
6806+ else
6807+ printk(KERN_CONT "%08x ", c);
6808+ }
6809+ printk("\n");
6810+}
6811+#endif
6812+
6813 /*
6814 * This routine handles page faults. It determines the address,
6815 * and the problem, and then passes it off to one of the appropriate
6816@@ -201,6 +218,14 @@ bad_area:
6817 bad_area_nosemaphore:
6818 /* User mode accesses just cause a SIGSEGV */
6819 if (user_mode(regs)) {
6820+
6821+#ifdef CONFIG_PAX_PAGEEXEC
6822+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6823+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6824+ do_group_exit(SIGKILL);
6825+ }
6826+#endif
6827+
6828 tsk->thread.cp0_badvaddr = address;
6829 tsk->thread.error_code = write;
6830 #if 0
6831diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6832index f1baadd..5472dca 100644
6833--- a/arch/mips/mm/mmap.c
6834+++ b/arch/mips/mm/mmap.c
6835@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6836 struct vm_area_struct *vma;
6837 unsigned long addr = addr0;
6838 int do_color_align;
6839+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6840 struct vm_unmapped_area_info info;
6841
6842 if (unlikely(len > TASK_SIZE))
6843@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6844 do_color_align = 1;
6845
6846 /* requesting a specific address */
6847+
6848+#ifdef CONFIG_PAX_RANDMMAP
6849+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6850+#endif
6851+
6852 if (addr) {
6853 if (do_color_align)
6854 addr = COLOUR_ALIGN(addr, pgoff);
6855@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6856 addr = PAGE_ALIGN(addr);
6857
6858 vma = find_vma(mm, addr);
6859- if (TASK_SIZE - len >= addr &&
6860- (!vma || addr + len <= vma->vm_start))
6861+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6862 return addr;
6863 }
6864
6865 info.length = len;
6866 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6867 info.align_offset = pgoff << PAGE_SHIFT;
6868+ info.threadstack_offset = offset;
6869
6870 if (dir == DOWN) {
6871 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6872@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6873 {
6874 unsigned long random_factor = 0UL;
6875
6876+#ifdef CONFIG_PAX_RANDMMAP
6877+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6878+#endif
6879+
6880 if (current->flags & PF_RANDOMIZE) {
6881 random_factor = get_random_int();
6882 random_factor = random_factor << PAGE_SHIFT;
6883@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6884
6885 if (mmap_is_legacy()) {
6886 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6887+
6888+#ifdef CONFIG_PAX_RANDMMAP
6889+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6890+ mm->mmap_base += mm->delta_mmap;
6891+#endif
6892+
6893 mm->get_unmapped_area = arch_get_unmapped_area;
6894 } else {
6895 mm->mmap_base = mmap_base(random_factor);
6896+
6897+#ifdef CONFIG_PAX_RANDMMAP
6898+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6899+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6900+#endif
6901+
6902 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6903 }
6904 }
6905
6906-static inline unsigned long brk_rnd(void)
6907-{
6908- unsigned long rnd = get_random_int();
6909-
6910- rnd = rnd << PAGE_SHIFT;
6911- /* 8MB for 32bit, 256MB for 64bit */
6912- if (TASK_IS_32BIT_ADDR)
6913- rnd = rnd & 0x7ffffful;
6914- else
6915- rnd = rnd & 0xffffffful;
6916-
6917- return rnd;
6918-}
6919-
6920-unsigned long arch_randomize_brk(struct mm_struct *mm)
6921-{
6922- unsigned long base = mm->brk;
6923- unsigned long ret;
6924-
6925- ret = PAGE_ALIGN(base + brk_rnd());
6926-
6927- if (ret < mm->brk)
6928- return mm->brk;
6929-
6930- return ret;
6931-}
6932-
6933 int __virt_addr_valid(const volatile void *kaddr)
6934 {
6935 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
6936diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
6937index d07e041..bedb72b 100644
6938--- a/arch/mips/pci/pci-octeon.c
6939+++ b/arch/mips/pci/pci-octeon.c
6940@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
6941
6942
6943 static struct pci_ops octeon_pci_ops = {
6944- octeon_read_config,
6945- octeon_write_config,
6946+ .read = octeon_read_config,
6947+ .write = octeon_write_config,
6948 };
6949
6950 static struct resource octeon_pci_mem_resource = {
6951diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
6952index 5e36c33..eb4a17b 100644
6953--- a/arch/mips/pci/pcie-octeon.c
6954+++ b/arch/mips/pci/pcie-octeon.c
6955@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
6956 }
6957
6958 static struct pci_ops octeon_pcie0_ops = {
6959- octeon_pcie0_read_config,
6960- octeon_pcie0_write_config,
6961+ .read = octeon_pcie0_read_config,
6962+ .write = octeon_pcie0_write_config,
6963 };
6964
6965 static struct resource octeon_pcie0_mem_resource = {
6966@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
6967 };
6968
6969 static struct pci_ops octeon_pcie1_ops = {
6970- octeon_pcie1_read_config,
6971- octeon_pcie1_write_config,
6972+ .read = octeon_pcie1_read_config,
6973+ .write = octeon_pcie1_write_config,
6974 };
6975
6976 static struct resource octeon_pcie1_mem_resource = {
6977@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
6978 };
6979
6980 static struct pci_ops octeon_dummy_ops = {
6981- octeon_dummy_read_config,
6982- octeon_dummy_write_config,
6983+ .read = octeon_dummy_read_config,
6984+ .write = octeon_dummy_write_config,
6985 };
6986
6987 static struct resource octeon_dummy_mem_resource = {
6988diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
6989index a2358b4..7cead4f 100644
6990--- a/arch/mips/sgi-ip27/ip27-nmi.c
6991+++ b/arch/mips/sgi-ip27/ip27-nmi.c
6992@@ -187,9 +187,9 @@ void
6993 cont_nmi_dump(void)
6994 {
6995 #ifndef REAL_NMI_SIGNAL
6996- static atomic_t nmied_cpus = ATOMIC_INIT(0);
6997+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
6998
6999- atomic_inc(&nmied_cpus);
7000+ atomic_inc_unchecked(&nmied_cpus);
7001 #endif
7002 /*
7003 * Only allow 1 cpu to proceed
7004@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7005 udelay(10000);
7006 }
7007 #else
7008- while (atomic_read(&nmied_cpus) != num_online_cpus());
7009+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7010 #endif
7011
7012 /*
7013diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7014index a046b30..6799527 100644
7015--- a/arch/mips/sni/rm200.c
7016+++ b/arch/mips/sni/rm200.c
7017@@ -270,7 +270,7 @@ spurious_8259A_irq:
7018 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7019 spurious_irq_mask |= irqmask;
7020 }
7021- atomic_inc(&irq_err_count);
7022+ atomic_inc_unchecked(&irq_err_count);
7023 /*
7024 * Theoretically we do not have to handle this IRQ,
7025 * but in Linux this does not cause problems and is
7026diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7027index 41e873b..34d33a7 100644
7028--- a/arch/mips/vr41xx/common/icu.c
7029+++ b/arch/mips/vr41xx/common/icu.c
7030@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7031
7032 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7033
7034- atomic_inc(&irq_err_count);
7035+ atomic_inc_unchecked(&irq_err_count);
7036
7037 return -1;
7038 }
7039diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7040index ae0e4ee..e8f0692 100644
7041--- a/arch/mips/vr41xx/common/irq.c
7042+++ b/arch/mips/vr41xx/common/irq.c
7043@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7044 irq_cascade_t *cascade;
7045
7046 if (irq >= NR_IRQS) {
7047- atomic_inc(&irq_err_count);
7048+ atomic_inc_unchecked(&irq_err_count);
7049 return;
7050 }
7051
7052@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7053 ret = cascade->get_irq(irq);
7054 irq = ret;
7055 if (ret < 0)
7056- atomic_inc(&irq_err_count);
7057+ atomic_inc_unchecked(&irq_err_count);
7058 else
7059 irq_dispatch(irq);
7060 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7061diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7062index 967d144..db12197 100644
7063--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7064+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7065@@ -11,12 +11,14 @@
7066 #ifndef _ASM_PROC_CACHE_H
7067 #define _ASM_PROC_CACHE_H
7068
7069+#include <linux/const.h>
7070+
7071 /* L1 cache */
7072
7073 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7074 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7075-#define L1_CACHE_BYTES 16 /* bytes per entry */
7076 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7077+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7078 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7079
7080 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7081diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7082index bcb5df2..84fabd2 100644
7083--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7084+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7085@@ -16,13 +16,15 @@
7086 #ifndef _ASM_PROC_CACHE_H
7087 #define _ASM_PROC_CACHE_H
7088
7089+#include <linux/const.h>
7090+
7091 /*
7092 * L1 cache
7093 */
7094 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7095 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7096-#define L1_CACHE_BYTES 32 /* bytes per entry */
7097 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7098+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7099 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7100
7101 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7102diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7103index 4ce7a01..449202a 100644
7104--- a/arch/openrisc/include/asm/cache.h
7105+++ b/arch/openrisc/include/asm/cache.h
7106@@ -19,11 +19,13 @@
7107 #ifndef __ASM_OPENRISC_CACHE_H
7108 #define __ASM_OPENRISC_CACHE_H
7109
7110+#include <linux/const.h>
7111+
7112 /* FIXME: How can we replace these with values from the CPU...
7113 * they shouldn't be hard-coded!
7114 */
7115
7116-#define L1_CACHE_BYTES 16
7117 #define L1_CACHE_SHIFT 4
7118+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7119
7120 #endif /* __ASM_OPENRISC_CACHE_H */
7121diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7122index 226f8ca..9d9b87d 100644
7123--- a/arch/parisc/include/asm/atomic.h
7124+++ b/arch/parisc/include/asm/atomic.h
7125@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7126 return dec;
7127 }
7128
7129+#define atomic64_read_unchecked(v) atomic64_read(v)
7130+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7131+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7132+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7133+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7134+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7135+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7136+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7137+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7138+
7139 #endif /* !CONFIG_64BIT */
7140
7141
7142diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7143index 47f11c7..3420df2 100644
7144--- a/arch/parisc/include/asm/cache.h
7145+++ b/arch/parisc/include/asm/cache.h
7146@@ -5,6 +5,7 @@
7147 #ifndef __ARCH_PARISC_CACHE_H
7148 #define __ARCH_PARISC_CACHE_H
7149
7150+#include <linux/const.h>
7151
7152 /*
7153 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7154@@ -15,13 +16,13 @@
7155 * just ruin performance.
7156 */
7157 #ifdef CONFIG_PA20
7158-#define L1_CACHE_BYTES 64
7159 #define L1_CACHE_SHIFT 6
7160 #else
7161-#define L1_CACHE_BYTES 32
7162 #define L1_CACHE_SHIFT 5
7163 #endif
7164
7165+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7166+
7167 #ifndef __ASSEMBLY__
7168
7169 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7170diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7171index 3391d06..c23a2cc 100644
7172--- a/arch/parisc/include/asm/elf.h
7173+++ b/arch/parisc/include/asm/elf.h
7174@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7175
7176 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7177
7178+#ifdef CONFIG_PAX_ASLR
7179+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7180+
7181+#define PAX_DELTA_MMAP_LEN 16
7182+#define PAX_DELTA_STACK_LEN 16
7183+#endif
7184+
7185 /* This yields a mask that user programs can use to figure out what
7186 instruction set this CPU supports. This could be done in user space,
7187 but it's not easy, and we've already done it here. */
7188diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7189index f213f5b..0af3e8e 100644
7190--- a/arch/parisc/include/asm/pgalloc.h
7191+++ b/arch/parisc/include/asm/pgalloc.h
7192@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7193 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7194 }
7195
7196+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7197+{
7198+ pgd_populate(mm, pgd, pmd);
7199+}
7200+
7201 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7202 {
7203 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7204@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7205 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7206 #define pmd_free(mm, x) do { } while (0)
7207 #define pgd_populate(mm, pmd, pte) BUG()
7208+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7209
7210 #endif
7211
7212diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7213index 22b89d1..ce34230 100644
7214--- a/arch/parisc/include/asm/pgtable.h
7215+++ b/arch/parisc/include/asm/pgtable.h
7216@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7217 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7218 #define PAGE_COPY PAGE_EXECREAD
7219 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7220+
7221+#ifdef CONFIG_PAX_PAGEEXEC
7222+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7223+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7224+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7225+#else
7226+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7227+# define PAGE_COPY_NOEXEC PAGE_COPY
7228+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7229+#endif
7230+
7231 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7232 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7233 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7234diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7235index a5cb070..8604ddc 100644
7236--- a/arch/parisc/include/asm/uaccess.h
7237+++ b/arch/parisc/include/asm/uaccess.h
7238@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7239 const void __user *from,
7240 unsigned long n)
7241 {
7242- int sz = __compiletime_object_size(to);
7243+ size_t sz = __compiletime_object_size(to);
7244 int ret = -EFAULT;
7245
7246- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7247+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7248 ret = __copy_from_user(to, from, n);
7249 else
7250 copy_from_user_overflow();
7251diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7252index 5822e8e..bc5e638 100644
7253--- a/arch/parisc/kernel/module.c
7254+++ b/arch/parisc/kernel/module.c
7255@@ -98,16 +98,38 @@
7256
7257 /* three functions to determine where in the module core
7258 * or init pieces the location is */
7259+static inline int in_init_rx(struct module *me, void *loc)
7260+{
7261+ return (loc >= me->module_init_rx &&
7262+ loc < (me->module_init_rx + me->init_size_rx));
7263+}
7264+
7265+static inline int in_init_rw(struct module *me, void *loc)
7266+{
7267+ return (loc >= me->module_init_rw &&
7268+ loc < (me->module_init_rw + me->init_size_rw));
7269+}
7270+
7271 static inline int in_init(struct module *me, void *loc)
7272 {
7273- return (loc >= me->module_init &&
7274- loc <= (me->module_init + me->init_size));
7275+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7276+}
7277+
7278+static inline int in_core_rx(struct module *me, void *loc)
7279+{
7280+ return (loc >= me->module_core_rx &&
7281+ loc < (me->module_core_rx + me->core_size_rx));
7282+}
7283+
7284+static inline int in_core_rw(struct module *me, void *loc)
7285+{
7286+ return (loc >= me->module_core_rw &&
7287+ loc < (me->module_core_rw + me->core_size_rw));
7288 }
7289
7290 static inline int in_core(struct module *me, void *loc)
7291 {
7292- return (loc >= me->module_core &&
7293- loc <= (me->module_core + me->core_size));
7294+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7295 }
7296
7297 static inline int in_local(struct module *me, void *loc)
7298@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7299 }
7300
7301 /* align things a bit */
7302- me->core_size = ALIGN(me->core_size, 16);
7303- me->arch.got_offset = me->core_size;
7304- me->core_size += gots * sizeof(struct got_entry);
7305+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7306+ me->arch.got_offset = me->core_size_rw;
7307+ me->core_size_rw += gots * sizeof(struct got_entry);
7308
7309- me->core_size = ALIGN(me->core_size, 16);
7310- me->arch.fdesc_offset = me->core_size;
7311- me->core_size += fdescs * sizeof(Elf_Fdesc);
7312+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7313+ me->arch.fdesc_offset = me->core_size_rw;
7314+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7315
7316 me->arch.got_max = gots;
7317 me->arch.fdesc_max = fdescs;
7318@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7319
7320 BUG_ON(value == 0);
7321
7322- got = me->module_core + me->arch.got_offset;
7323+ got = me->module_core_rw + me->arch.got_offset;
7324 for (i = 0; got[i].addr; i++)
7325 if (got[i].addr == value)
7326 goto out;
7327@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7328 #ifdef CONFIG_64BIT
7329 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7330 {
7331- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7332+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7333
7334 if (!value) {
7335 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7336@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7337
7338 /* Create new one */
7339 fdesc->addr = value;
7340- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7341+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7342 return (Elf_Addr)fdesc;
7343 }
7344 #endif /* CONFIG_64BIT */
7345@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7346
7347 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7348 end = table + sechdrs[me->arch.unwind_section].sh_size;
7349- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7350+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7351
7352 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7353 me->arch.unwind_section, table, end, gp);
7354diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7355index e1ffea2..46ed66e 100644
7356--- a/arch/parisc/kernel/sys_parisc.c
7357+++ b/arch/parisc/kernel/sys_parisc.c
7358@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7359 unsigned long task_size = TASK_SIZE;
7360 int do_color_align, last_mmap;
7361 struct vm_unmapped_area_info info;
7362+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7363
7364 if (len > task_size)
7365 return -ENOMEM;
7366@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7367 goto found_addr;
7368 }
7369
7370+#ifdef CONFIG_PAX_RANDMMAP
7371+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7372+#endif
7373+
7374 if (addr) {
7375 if (do_color_align && last_mmap)
7376 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7377@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7378 info.high_limit = mmap_upper_limit();
7379 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7380 info.align_offset = shared_align_offset(last_mmap, pgoff);
7381+ info.threadstack_offset = offset;
7382 addr = vm_unmapped_area(&info);
7383
7384 found_addr:
7385@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7386 unsigned long addr = addr0;
7387 int do_color_align, last_mmap;
7388 struct vm_unmapped_area_info info;
7389+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7390
7391 #ifdef CONFIG_64BIT
7392 /* This should only ever run for 32-bit processes. */
7393@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7394 }
7395
7396 /* requesting a specific address */
7397+#ifdef CONFIG_PAX_RANDMMAP
7398+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7399+#endif
7400+
7401 if (addr) {
7402 if (do_color_align && last_mmap)
7403 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7404@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7405 info.high_limit = mm->mmap_base;
7406 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7407 info.align_offset = shared_align_offset(last_mmap, pgoff);
7408+ info.threadstack_offset = offset;
7409 addr = vm_unmapped_area(&info);
7410 if (!(addr & ~PAGE_MASK))
7411 goto found_addr;
7412@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7413 mm->mmap_legacy_base = mmap_legacy_base();
7414 mm->mmap_base = mmap_upper_limit();
7415
7416+#ifdef CONFIG_PAX_RANDMMAP
7417+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7418+ mm->mmap_legacy_base += mm->delta_mmap;
7419+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7420+ }
7421+#endif
7422+
7423 if (mmap_is_legacy()) {
7424 mm->mmap_base = mm->mmap_legacy_base;
7425 mm->get_unmapped_area = arch_get_unmapped_area;
7426diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7427index 47ee620..1107387 100644
7428--- a/arch/parisc/kernel/traps.c
7429+++ b/arch/parisc/kernel/traps.c
7430@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7431
7432 down_read(&current->mm->mmap_sem);
7433 vma = find_vma(current->mm,regs->iaoq[0]);
7434- if (vma && (regs->iaoq[0] >= vma->vm_start)
7435- && (vma->vm_flags & VM_EXEC)) {
7436-
7437+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7438 fault_address = regs->iaoq[0];
7439 fault_space = regs->iasq[0];
7440
7441diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7442index e5120e6..8ddb5cc 100644
7443--- a/arch/parisc/mm/fault.c
7444+++ b/arch/parisc/mm/fault.c
7445@@ -15,6 +15,7 @@
7446 #include <linux/sched.h>
7447 #include <linux/interrupt.h>
7448 #include <linux/module.h>
7449+#include <linux/unistd.h>
7450
7451 #include <asm/uaccess.h>
7452 #include <asm/traps.h>
7453@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7454 static unsigned long
7455 parisc_acctyp(unsigned long code, unsigned int inst)
7456 {
7457- if (code == 6 || code == 16)
7458+ if (code == 6 || code == 7 || code == 16)
7459 return VM_EXEC;
7460
7461 switch (inst & 0xf0000000) {
7462@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7463 }
7464 #endif
7465
7466+#ifdef CONFIG_PAX_PAGEEXEC
7467+/*
7468+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7469+ *
7470+ * returns 1 when task should be killed
7471+ * 2 when rt_sigreturn trampoline was detected
7472+ * 3 when unpatched PLT trampoline was detected
7473+ */
7474+static int pax_handle_fetch_fault(struct pt_regs *regs)
7475+{
7476+
7477+#ifdef CONFIG_PAX_EMUPLT
7478+ int err;
7479+
7480+ do { /* PaX: unpatched PLT emulation */
7481+ unsigned int bl, depwi;
7482+
7483+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7484+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7485+
7486+ if (err)
7487+ break;
7488+
7489+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7490+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7491+
7492+ err = get_user(ldw, (unsigned int *)addr);
7493+ err |= get_user(bv, (unsigned int *)(addr+4));
7494+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7495+
7496+ if (err)
7497+ break;
7498+
7499+ if (ldw == 0x0E801096U &&
7500+ bv == 0xEAC0C000U &&
7501+ ldw2 == 0x0E881095U)
7502+ {
7503+ unsigned int resolver, map;
7504+
7505+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7506+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7507+ if (err)
7508+ break;
7509+
7510+ regs->gr[20] = instruction_pointer(regs)+8;
7511+ regs->gr[21] = map;
7512+ regs->gr[22] = resolver;
7513+ regs->iaoq[0] = resolver | 3UL;
7514+ regs->iaoq[1] = regs->iaoq[0] + 4;
7515+ return 3;
7516+ }
7517+ }
7518+ } while (0);
7519+#endif
7520+
7521+#ifdef CONFIG_PAX_EMUTRAMP
7522+
7523+#ifndef CONFIG_PAX_EMUSIGRT
7524+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7525+ return 1;
7526+#endif
7527+
7528+ do { /* PaX: rt_sigreturn emulation */
7529+ unsigned int ldi1, ldi2, bel, nop;
7530+
7531+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7532+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7533+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7534+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7535+
7536+ if (err)
7537+ break;
7538+
7539+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7540+ ldi2 == 0x3414015AU &&
7541+ bel == 0xE4008200U &&
7542+ nop == 0x08000240U)
7543+ {
7544+ regs->gr[25] = (ldi1 & 2) >> 1;
7545+ regs->gr[20] = __NR_rt_sigreturn;
7546+ regs->gr[31] = regs->iaoq[1] + 16;
7547+ regs->sr[0] = regs->iasq[1];
7548+ regs->iaoq[0] = 0x100UL;
7549+ regs->iaoq[1] = regs->iaoq[0] + 4;
7550+ regs->iasq[0] = regs->sr[2];
7551+ regs->iasq[1] = regs->sr[2];
7552+ return 2;
7553+ }
7554+ } while (0);
7555+#endif
7556+
7557+ return 1;
7558+}
7559+
7560+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7561+{
7562+ unsigned long i;
7563+
7564+ printk(KERN_ERR "PAX: bytes at PC: ");
7565+ for (i = 0; i < 5; i++) {
7566+ unsigned int c;
7567+ if (get_user(c, (unsigned int *)pc+i))
7568+ printk(KERN_CONT "???????? ");
7569+ else
7570+ printk(KERN_CONT "%08x ", c);
7571+ }
7572+ printk("\n");
7573+}
7574+#endif
7575+
7576 int fixup_exception(struct pt_regs *regs)
7577 {
7578 const struct exception_table_entry *fix;
7579@@ -234,8 +345,33 @@ retry:
7580
7581 good_area:
7582
7583- if ((vma->vm_flags & acc_type) != acc_type)
7584+ if ((vma->vm_flags & acc_type) != acc_type) {
7585+
7586+#ifdef CONFIG_PAX_PAGEEXEC
7587+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7588+ (address & ~3UL) == instruction_pointer(regs))
7589+ {
7590+ up_read(&mm->mmap_sem);
7591+ switch (pax_handle_fetch_fault(regs)) {
7592+
7593+#ifdef CONFIG_PAX_EMUPLT
7594+ case 3:
7595+ return;
7596+#endif
7597+
7598+#ifdef CONFIG_PAX_EMUTRAMP
7599+ case 2:
7600+ return;
7601+#endif
7602+
7603+ }
7604+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7605+ do_group_exit(SIGKILL);
7606+ }
7607+#endif
7608+
7609 goto bad_area;
7610+ }
7611
7612 /*
7613 * If for any reason at all we couldn't handle the fault, make
7614diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7615index a2a168e..e484682 100644
7616--- a/arch/powerpc/Kconfig
7617+++ b/arch/powerpc/Kconfig
7618@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7619 config KEXEC
7620 bool "kexec system call"
7621 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7622+ depends on !GRKERNSEC_KMEM
7623 help
7624 kexec is a system call that implements the ability to shutdown your
7625 current kernel, and to start another kernel. It is like a reboot
7626diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7627index 512d278..d31fadd 100644
7628--- a/arch/powerpc/include/asm/atomic.h
7629+++ b/arch/powerpc/include/asm/atomic.h
7630@@ -12,6 +12,11 @@
7631
7632 #define ATOMIC_INIT(i) { (i) }
7633
7634+#define _ASM_EXTABLE(from, to) \
7635+" .section __ex_table,\"a\"\n" \
7636+ PPC_LONG" " #from ", " #to"\n" \
7637+" .previous\n"
7638+
7639 static __inline__ int atomic_read(const atomic_t *v)
7640 {
7641 int t;
7642@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7643 return t;
7644 }
7645
7646+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7647+{
7648+ int t;
7649+
7650+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7651+
7652+ return t;
7653+}
7654+
7655 static __inline__ void atomic_set(atomic_t *v, int i)
7656 {
7657 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7658 }
7659
7660-#define ATOMIC_OP(op, asm_op) \
7661-static __inline__ void atomic_##op(int a, atomic_t *v) \
7662+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7663+{
7664+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7665+}
7666+
7667+#ifdef CONFIG_PAX_REFCOUNT
7668+#define __REFCOUNT_OP(op) op##o.
7669+#define __OVERFLOW_PRE \
7670+ " mcrxr cr0\n"
7671+#define __OVERFLOW_POST \
7672+ " bf 4*cr0+so, 3f\n" \
7673+ "2: .long 0x00c00b00\n" \
7674+ "3:\n"
7675+#define __OVERFLOW_EXTABLE \
7676+ "\n4:\n"
7677+ _ASM_EXTABLE(2b, 4b)
7678+#else
7679+#define __REFCOUNT_OP(op) op
7680+#define __OVERFLOW_PRE
7681+#define __OVERFLOW_POST
7682+#define __OVERFLOW_EXTABLE
7683+#endif
7684+
7685+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7686+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7687 { \
7688 int t; \
7689 \
7690 __asm__ __volatile__( \
7691-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7692+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7693+ pre_op \
7694 #asm_op " %0,%2,%0\n" \
7695+ post_op \
7696 PPC405_ERR77(0,%3) \
7697 " stwcx. %0,0,%3 \n" \
7698 " bne- 1b\n" \
7699+ extable \
7700 : "=&r" (t), "+m" (v->counter) \
7701 : "r" (a), "r" (&v->counter) \
7702 : "cc"); \
7703 } \
7704
7705-#define ATOMIC_OP_RETURN(op, asm_op) \
7706-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7707+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7708+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7709+
7710+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7711+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7712 { \
7713 int t; \
7714 \
7715 __asm__ __volatile__( \
7716 PPC_ATOMIC_ENTRY_BARRIER \
7717-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7718+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7719+ pre_op \
7720 #asm_op " %0,%1,%0\n" \
7721+ post_op \
7722 PPC405_ERR77(0,%2) \
7723 " stwcx. %0,0,%2 \n" \
7724 " bne- 1b\n" \
7725+ extable \
7726 PPC_ATOMIC_EXIT_BARRIER \
7727 : "=&r" (t) \
7728 : "r" (a), "r" (&v->counter) \
7729@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7730 return t; \
7731 }
7732
7733+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7734+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7735+
7736 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7737
7738 ATOMIC_OPS(add, add)
7739@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7740
7741 #undef ATOMIC_OPS
7742 #undef ATOMIC_OP_RETURN
7743+#undef __ATOMIC_OP_RETURN
7744 #undef ATOMIC_OP
7745+#undef __ATOMIC_OP
7746
7747 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7748
7749-static __inline__ void atomic_inc(atomic_t *v)
7750-{
7751- int t;
7752+/*
7753+ * atomic_inc - increment atomic variable
7754+ * @v: pointer of type atomic_t
7755+ *
7756+ * Automatically increments @v by 1
7757+ */
7758+#define atomic_inc(v) atomic_add(1, (v))
7759+#define atomic_inc_return(v) atomic_add_return(1, (v))
7760
7761- __asm__ __volatile__(
7762-"1: lwarx %0,0,%2 # atomic_inc\n\
7763- addic %0,%0,1\n"
7764- PPC405_ERR77(0,%2)
7765-" stwcx. %0,0,%2 \n\
7766- bne- 1b"
7767- : "=&r" (t), "+m" (v->counter)
7768- : "r" (&v->counter)
7769- : "cc", "xer");
7770+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7771+{
7772+ atomic_add_unchecked(1, v);
7773 }
7774
7775-static __inline__ int atomic_inc_return(atomic_t *v)
7776+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7777 {
7778- int t;
7779-
7780- __asm__ __volatile__(
7781- PPC_ATOMIC_ENTRY_BARRIER
7782-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7783- addic %0,%0,1\n"
7784- PPC405_ERR77(0,%1)
7785-" stwcx. %0,0,%1 \n\
7786- bne- 1b"
7787- PPC_ATOMIC_EXIT_BARRIER
7788- : "=&r" (t)
7789- : "r" (&v->counter)
7790- : "cc", "xer", "memory");
7791-
7792- return t;
7793+ return atomic_add_return_unchecked(1, v);
7794 }
7795
7796 /*
7797@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7798 */
7799 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7800
7801-static __inline__ void atomic_dec(atomic_t *v)
7802+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7803 {
7804- int t;
7805-
7806- __asm__ __volatile__(
7807-"1: lwarx %0,0,%2 # atomic_dec\n\
7808- addic %0,%0,-1\n"
7809- PPC405_ERR77(0,%2)\
7810-" stwcx. %0,0,%2\n\
7811- bne- 1b"
7812- : "=&r" (t), "+m" (v->counter)
7813- : "r" (&v->counter)
7814- : "cc", "xer");
7815+ return atomic_add_return_unchecked(1, v) == 0;
7816 }
7817
7818-static __inline__ int atomic_dec_return(atomic_t *v)
7819+/*
7820+ * atomic_dec - decrement atomic variable
7821+ * @v: pointer of type atomic_t
7822+ *
7823+ * Atomically decrements @v by 1
7824+ */
7825+#define atomic_dec(v) atomic_sub(1, (v))
7826+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7827+
7828+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7829 {
7830- int t;
7831-
7832- __asm__ __volatile__(
7833- PPC_ATOMIC_ENTRY_BARRIER
7834-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7835- addic %0,%0,-1\n"
7836- PPC405_ERR77(0,%1)
7837-" stwcx. %0,0,%1\n\
7838- bne- 1b"
7839- PPC_ATOMIC_EXIT_BARRIER
7840- : "=&r" (t)
7841- : "r" (&v->counter)
7842- : "cc", "xer", "memory");
7843-
7844- return t;
7845+ atomic_sub_unchecked(1, v);
7846 }
7847
7848 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7849 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7850
7851+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7852+{
7853+ return cmpxchg(&(v->counter), old, new);
7854+}
7855+
7856+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7857+{
7858+ return xchg(&(v->counter), new);
7859+}
7860+
7861 /**
7862 * __atomic_add_unless - add unless the number is a given value
7863 * @v: pointer of type atomic_t
7864@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7865 PPC_ATOMIC_ENTRY_BARRIER
7866 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7867 cmpw 0,%0,%3 \n\
7868- beq- 2f \n\
7869- add %0,%2,%0 \n"
7870+ beq- 2f \n"
7871+
7872+#ifdef CONFIG_PAX_REFCOUNT
7873+" mcrxr cr0\n"
7874+" addo. %0,%2,%0\n"
7875+" bf 4*cr0+so, 4f\n"
7876+"3:.long " "0x00c00b00""\n"
7877+"4:\n"
7878+#else
7879+ "add %0,%2,%0 \n"
7880+#endif
7881+
7882 PPC405_ERR77(0,%2)
7883 " stwcx. %0,0,%1 \n\
7884 bne- 1b \n"
7885+"5:"
7886+
7887+#ifdef CONFIG_PAX_REFCOUNT
7888+ _ASM_EXTABLE(3b, 5b)
7889+#endif
7890+
7891 PPC_ATOMIC_EXIT_BARRIER
7892 " subf %0,%2,%0 \n\
7893 2:"
7894@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7895 }
7896 #define atomic_dec_if_positive atomic_dec_if_positive
7897
7898+#define smp_mb__before_atomic_dec() smp_mb()
7899+#define smp_mb__after_atomic_dec() smp_mb()
7900+#define smp_mb__before_atomic_inc() smp_mb()
7901+#define smp_mb__after_atomic_inc() smp_mb()
7902+
7903 #ifdef __powerpc64__
7904
7905 #define ATOMIC64_INIT(i) { (i) }
7906@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7907 return t;
7908 }
7909
7910+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7911+{
7912+ long t;
7913+
7914+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7915+
7916+ return t;
7917+}
7918+
7919 static __inline__ void atomic64_set(atomic64_t *v, long i)
7920 {
7921 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7922 }
7923
7924-#define ATOMIC64_OP(op, asm_op) \
7925-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7926+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7927+{
7928+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7929+}
7930+
7931+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7932+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
7933 { \
7934 long t; \
7935 \
7936 __asm__ __volatile__( \
7937 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
7938+ pre_op \
7939 #asm_op " %0,%2,%0\n" \
7940+ post_op \
7941 " stdcx. %0,0,%3 \n" \
7942 " bne- 1b\n" \
7943+ extable \
7944 : "=&r" (t), "+m" (v->counter) \
7945 : "r" (a), "r" (&v->counter) \
7946 : "cc"); \
7947 }
7948
7949-#define ATOMIC64_OP_RETURN(op, asm_op) \
7950-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
7951+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
7952+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7953+
7954+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7955+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
7956 { \
7957 long t; \
7958 \
7959 __asm__ __volatile__( \
7960 PPC_ATOMIC_ENTRY_BARRIER \
7961 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
7962+ pre_op \
7963 #asm_op " %0,%1,%0\n" \
7964+ post_op \
7965 " stdcx. %0,0,%2 \n" \
7966 " bne- 1b\n" \
7967+ extable \
7968 PPC_ATOMIC_EXIT_BARRIER \
7969 : "=&r" (t) \
7970 : "r" (a), "r" (&v->counter) \
7971@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
7972 return t; \
7973 }
7974
7975+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
7976+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7977+
7978 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
7979
7980 ATOMIC64_OPS(add, add)
7981@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
7982
7983 #undef ATOMIC64_OPS
7984 #undef ATOMIC64_OP_RETURN
7985+#undef __ATOMIC64_OP_RETURN
7986 #undef ATOMIC64_OP
7987+#undef __ATOMIC64_OP
7988+#undef __OVERFLOW_EXTABLE
7989+#undef __OVERFLOW_POST
7990+#undef __OVERFLOW_PRE
7991+#undef __REFCOUNT_OP
7992
7993 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
7994
7995-static __inline__ void atomic64_inc(atomic64_t *v)
7996-{
7997- long t;
7998+/*
7999+ * atomic64_inc - increment atomic variable
8000+ * @v: pointer of type atomic64_t
8001+ *
8002+ * Automatically increments @v by 1
8003+ */
8004+#define atomic64_inc(v) atomic64_add(1, (v))
8005+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8006
8007- __asm__ __volatile__(
8008-"1: ldarx %0,0,%2 # atomic64_inc\n\
8009- addic %0,%0,1\n\
8010- stdcx. %0,0,%2 \n\
8011- bne- 1b"
8012- : "=&r" (t), "+m" (v->counter)
8013- : "r" (&v->counter)
8014- : "cc", "xer");
8015+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8016+{
8017+ atomic64_add_unchecked(1, v);
8018 }
8019
8020-static __inline__ long atomic64_inc_return(atomic64_t *v)
8021+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8022 {
8023- long t;
8024-
8025- __asm__ __volatile__(
8026- PPC_ATOMIC_ENTRY_BARRIER
8027-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8028- addic %0,%0,1\n\
8029- stdcx. %0,0,%1 \n\
8030- bne- 1b"
8031- PPC_ATOMIC_EXIT_BARRIER
8032- : "=&r" (t)
8033- : "r" (&v->counter)
8034- : "cc", "xer", "memory");
8035-
8036- return t;
8037+ return atomic64_add_return_unchecked(1, v);
8038 }
8039
8040 /*
8041@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8042 */
8043 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8044
8045-static __inline__ void atomic64_dec(atomic64_t *v)
8046+/*
8047+ * atomic64_dec - decrement atomic variable
8048+ * @v: pointer of type atomic64_t
8049+ *
8050+ * Atomically decrements @v by 1
8051+ */
8052+#define atomic64_dec(v) atomic64_sub(1, (v))
8053+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8054+
8055+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8056 {
8057- long t;
8058-
8059- __asm__ __volatile__(
8060-"1: ldarx %0,0,%2 # atomic64_dec\n\
8061- addic %0,%0,-1\n\
8062- stdcx. %0,0,%2\n\
8063- bne- 1b"
8064- : "=&r" (t), "+m" (v->counter)
8065- : "r" (&v->counter)
8066- : "cc", "xer");
8067-}
8068-
8069-static __inline__ long atomic64_dec_return(atomic64_t *v)
8070-{
8071- long t;
8072-
8073- __asm__ __volatile__(
8074- PPC_ATOMIC_ENTRY_BARRIER
8075-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8076- addic %0,%0,-1\n\
8077- stdcx. %0,0,%1\n\
8078- bne- 1b"
8079- PPC_ATOMIC_EXIT_BARRIER
8080- : "=&r" (t)
8081- : "r" (&v->counter)
8082- : "cc", "xer", "memory");
8083-
8084- return t;
8085+ atomic64_sub_unchecked(1, v);
8086 }
8087
8088 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8089@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8090 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8091 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8092
8093+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8094+{
8095+ return cmpxchg(&(v->counter), old, new);
8096+}
8097+
8098+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8099+{
8100+ return xchg(&(v->counter), new);
8101+}
8102+
8103 /**
8104 * atomic64_add_unless - add unless the number is a given value
8105 * @v: pointer of type atomic64_t
8106@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8107
8108 __asm__ __volatile__ (
8109 PPC_ATOMIC_ENTRY_BARRIER
8110-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8111+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8112 cmpd 0,%0,%3 \n\
8113- beq- 2f \n\
8114- add %0,%2,%0 \n"
8115+ beq- 2f \n"
8116+
8117+#ifdef CONFIG_PAX_REFCOUNT
8118+" mcrxr cr0\n"
8119+" addo. %0,%2,%0\n"
8120+" bf 4*cr0+so, 4f\n"
8121+"3:.long " "0x00c00b00""\n"
8122+"4:\n"
8123+#else
8124+ "add %0,%2,%0 \n"
8125+#endif
8126+
8127 " stdcx. %0,0,%1 \n\
8128 bne- 1b \n"
8129 PPC_ATOMIC_EXIT_BARRIER
8130+"5:"
8131+
8132+#ifdef CONFIG_PAX_REFCOUNT
8133+ _ASM_EXTABLE(3b, 5b)
8134+#endif
8135+
8136 " subf %0,%2,%0 \n\
8137 2:"
8138 : "=&r" (t)
8139diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8140index a3bf5be..e03ba81 100644
8141--- a/arch/powerpc/include/asm/barrier.h
8142+++ b/arch/powerpc/include/asm/barrier.h
8143@@ -76,7 +76,7 @@
8144 do { \
8145 compiletime_assert_atomic_type(*p); \
8146 smp_lwsync(); \
8147- ACCESS_ONCE(*p) = (v); \
8148+ ACCESS_ONCE_RW(*p) = (v); \
8149 } while (0)
8150
8151 #define smp_load_acquire(p) \
8152diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8153index 34a05a1..a1f2c67 100644
8154--- a/arch/powerpc/include/asm/cache.h
8155+++ b/arch/powerpc/include/asm/cache.h
8156@@ -4,6 +4,7 @@
8157 #ifdef __KERNEL__
8158
8159 #include <asm/reg.h>
8160+#include <linux/const.h>
8161
8162 /* bytes per L1 cache line */
8163 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8164@@ -23,7 +24,7 @@
8165 #define L1_CACHE_SHIFT 7
8166 #endif
8167
8168-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8169+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8170
8171 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8172
8173diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8174index 57d289a..b36c98c 100644
8175--- a/arch/powerpc/include/asm/elf.h
8176+++ b/arch/powerpc/include/asm/elf.h
8177@@ -30,6 +30,18 @@
8178
8179 #define ELF_ET_DYN_BASE 0x20000000
8180
8181+#ifdef CONFIG_PAX_ASLR
8182+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8183+
8184+#ifdef __powerpc64__
8185+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8186+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8187+#else
8188+#define PAX_DELTA_MMAP_LEN 15
8189+#define PAX_DELTA_STACK_LEN 15
8190+#endif
8191+#endif
8192+
8193 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8194
8195 /*
8196@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8197 (0x7ff >> (PAGE_SHIFT - 12)) : \
8198 (0x3ffff >> (PAGE_SHIFT - 12)))
8199
8200-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8201-#define arch_randomize_brk arch_randomize_brk
8202-
8203-
8204 #ifdef CONFIG_SPU_BASE
8205 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8206 #define NT_SPU 1
8207diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8208index 8196e9c..d83a9f3 100644
8209--- a/arch/powerpc/include/asm/exec.h
8210+++ b/arch/powerpc/include/asm/exec.h
8211@@ -4,6 +4,6 @@
8212 #ifndef _ASM_POWERPC_EXEC_H
8213 #define _ASM_POWERPC_EXEC_H
8214
8215-extern unsigned long arch_align_stack(unsigned long sp);
8216+#define arch_align_stack(x) ((x) & ~0xfUL)
8217
8218 #endif /* _ASM_POWERPC_EXEC_H */
8219diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8220index 5acabbd..7ea14fa 100644
8221--- a/arch/powerpc/include/asm/kmap_types.h
8222+++ b/arch/powerpc/include/asm/kmap_types.h
8223@@ -10,7 +10,7 @@
8224 * 2 of the License, or (at your option) any later version.
8225 */
8226
8227-#define KM_TYPE_NR 16
8228+#define KM_TYPE_NR 17
8229
8230 #endif /* __KERNEL__ */
8231 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8232diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8233index b8da913..c02b593 100644
8234--- a/arch/powerpc/include/asm/local.h
8235+++ b/arch/powerpc/include/asm/local.h
8236@@ -9,21 +9,65 @@ typedef struct
8237 atomic_long_t a;
8238 } local_t;
8239
8240+typedef struct
8241+{
8242+ atomic_long_unchecked_t a;
8243+} local_unchecked_t;
8244+
8245 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8246
8247 #define local_read(l) atomic_long_read(&(l)->a)
8248+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8249 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8250+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8251
8252 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8253+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8254 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8255+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8256 #define local_inc(l) atomic_long_inc(&(l)->a)
8257+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8258 #define local_dec(l) atomic_long_dec(&(l)->a)
8259+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8260
8261 static __inline__ long local_add_return(long a, local_t *l)
8262 {
8263 long t;
8264
8265 __asm__ __volatile__(
8266+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8267+
8268+#ifdef CONFIG_PAX_REFCOUNT
8269+" mcrxr cr0\n"
8270+" addo. %0,%1,%0\n"
8271+" bf 4*cr0+so, 3f\n"
8272+"2:.long " "0x00c00b00""\n"
8273+#else
8274+" add %0,%1,%0\n"
8275+#endif
8276+
8277+"3:\n"
8278+ PPC405_ERR77(0,%2)
8279+ PPC_STLCX "%0,0,%2 \n\
8280+ bne- 1b"
8281+
8282+#ifdef CONFIG_PAX_REFCOUNT
8283+"\n4:\n"
8284+ _ASM_EXTABLE(2b, 4b)
8285+#endif
8286+
8287+ : "=&r" (t)
8288+ : "r" (a), "r" (&(l->a.counter))
8289+ : "cc", "memory");
8290+
8291+ return t;
8292+}
8293+
8294+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8295+{
8296+ long t;
8297+
8298+ __asm__ __volatile__(
8299 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8300 add %0,%1,%0\n"
8301 PPC405_ERR77(0,%2)
8302@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8303
8304 #define local_cmpxchg(l, o, n) \
8305 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8306+#define local_cmpxchg_unchecked(l, o, n) \
8307+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8308 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8309
8310 /**
8311diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8312index 8565c25..2865190 100644
8313--- a/arch/powerpc/include/asm/mman.h
8314+++ b/arch/powerpc/include/asm/mman.h
8315@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8316 }
8317 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8318
8319-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8320+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8321 {
8322 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8323 }
8324diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8325index 69c0598..2c56964 100644
8326--- a/arch/powerpc/include/asm/page.h
8327+++ b/arch/powerpc/include/asm/page.h
8328@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8329 * and needs to be executable. This means the whole heap ends
8330 * up being executable.
8331 */
8332-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8333- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8334+#define VM_DATA_DEFAULT_FLAGS32 \
8335+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8336+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8337
8338 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8339 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8340@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8341 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8342 #endif
8343
8344+#define ktla_ktva(addr) (addr)
8345+#define ktva_ktla(addr) (addr)
8346+
8347 #ifndef CONFIG_PPC_BOOK3S_64
8348 /*
8349 * Use the top bit of the higher-level page table entries to indicate whether
8350diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8351index d908a46..3753f71 100644
8352--- a/arch/powerpc/include/asm/page_64.h
8353+++ b/arch/powerpc/include/asm/page_64.h
8354@@ -172,15 +172,18 @@ do { \
8355 * stack by default, so in the absence of a PT_GNU_STACK program header
8356 * we turn execute permission off.
8357 */
8358-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8359- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8360+#define VM_STACK_DEFAULT_FLAGS32 \
8361+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8362+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8363
8364 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8365 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8366
8367+#ifndef CONFIG_PAX_PAGEEXEC
8368 #define VM_STACK_DEFAULT_FLAGS \
8369 (is_32bit_task() ? \
8370 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8371+#endif
8372
8373 #include <asm-generic/getorder.h>
8374
8375diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8376index 4b0be20..c15a27d 100644
8377--- a/arch/powerpc/include/asm/pgalloc-64.h
8378+++ b/arch/powerpc/include/asm/pgalloc-64.h
8379@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8380 #ifndef CONFIG_PPC_64K_PAGES
8381
8382 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8383+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8384
8385 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8386 {
8387@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8388 pud_set(pud, (unsigned long)pmd);
8389 }
8390
8391+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8392+{
8393+ pud_populate(mm, pud, pmd);
8394+}
8395+
8396 #define pmd_populate(mm, pmd, pte_page) \
8397 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8398 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8399@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8400 #endif
8401
8402 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8403+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8404
8405 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8406 pte_t *pte)
8407diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8408index a8805fe..6d69617 100644
8409--- a/arch/powerpc/include/asm/pgtable.h
8410+++ b/arch/powerpc/include/asm/pgtable.h
8411@@ -2,6 +2,7 @@
8412 #define _ASM_POWERPC_PGTABLE_H
8413 #ifdef __KERNEL__
8414
8415+#include <linux/const.h>
8416 #ifndef __ASSEMBLY__
8417 #include <linux/mmdebug.h>
8418 #include <linux/mmzone.h>
8419diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8420index 4aad413..85d86bf 100644
8421--- a/arch/powerpc/include/asm/pte-hash32.h
8422+++ b/arch/powerpc/include/asm/pte-hash32.h
8423@@ -21,6 +21,7 @@
8424 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8425 #define _PAGE_USER 0x004 /* usermode access allowed */
8426 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8427+#define _PAGE_EXEC _PAGE_GUARDED
8428 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8429 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8430 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8431diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8432index 1c874fb..e8480a4 100644
8433--- a/arch/powerpc/include/asm/reg.h
8434+++ b/arch/powerpc/include/asm/reg.h
8435@@ -253,6 +253,7 @@
8436 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8437 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8438 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8439+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8440 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8441 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8442 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8443diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8444index 5a6614a..d89995d1 100644
8445--- a/arch/powerpc/include/asm/smp.h
8446+++ b/arch/powerpc/include/asm/smp.h
8447@@ -51,7 +51,7 @@ struct smp_ops_t {
8448 int (*cpu_disable)(void);
8449 void (*cpu_die)(unsigned int nr);
8450 int (*cpu_bootable)(unsigned int nr);
8451-};
8452+} __no_const;
8453
8454 extern void smp_send_debugger_break(void);
8455 extern void start_secondary_resume(void);
8456diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8457index 4dbe072..b803275 100644
8458--- a/arch/powerpc/include/asm/spinlock.h
8459+++ b/arch/powerpc/include/asm/spinlock.h
8460@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8461 __asm__ __volatile__(
8462 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8463 __DO_SIGN_EXTEND
8464-" addic. %0,%0,1\n\
8465- ble- 2f\n"
8466+
8467+#ifdef CONFIG_PAX_REFCOUNT
8468+" mcrxr cr0\n"
8469+" addico. %0,%0,1\n"
8470+" bf 4*cr0+so, 3f\n"
8471+"2:.long " "0x00c00b00""\n"
8472+#else
8473+" addic. %0,%0,1\n"
8474+#endif
8475+
8476+"3:\n"
8477+ "ble- 4f\n"
8478 PPC405_ERR77(0,%1)
8479 " stwcx. %0,0,%1\n\
8480 bne- 1b\n"
8481 PPC_ACQUIRE_BARRIER
8482-"2:" : "=&r" (tmp)
8483+"4:"
8484+
8485+#ifdef CONFIG_PAX_REFCOUNT
8486+ _ASM_EXTABLE(2b,4b)
8487+#endif
8488+
8489+ : "=&r" (tmp)
8490 : "r" (&rw->lock)
8491 : "cr0", "xer", "memory");
8492
8493@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8494 __asm__ __volatile__(
8495 "# read_unlock\n\t"
8496 PPC_RELEASE_BARRIER
8497-"1: lwarx %0,0,%1\n\
8498- addic %0,%0,-1\n"
8499+"1: lwarx %0,0,%1\n"
8500+
8501+#ifdef CONFIG_PAX_REFCOUNT
8502+" mcrxr cr0\n"
8503+" addico. %0,%0,-1\n"
8504+" bf 4*cr0+so, 3f\n"
8505+"2:.long " "0x00c00b00""\n"
8506+#else
8507+" addic. %0,%0,-1\n"
8508+#endif
8509+
8510+"3:\n"
8511 PPC405_ERR77(0,%1)
8512 " stwcx. %0,0,%1\n\
8513 bne- 1b"
8514+
8515+#ifdef CONFIG_PAX_REFCOUNT
8516+"\n4:\n"
8517+ _ASM_EXTABLE(2b, 4b)
8518+#endif
8519+
8520 : "=&r"(tmp)
8521 : "r"(&rw->lock)
8522 : "cr0", "xer", "memory");
8523diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8524index 0be6c68..9c3c6ee 100644
8525--- a/arch/powerpc/include/asm/thread_info.h
8526+++ b/arch/powerpc/include/asm/thread_info.h
8527@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8528 #if defined(CONFIG_PPC64)
8529 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8530 #endif
8531+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8532+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8533
8534 /* as above, but as bit values */
8535 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8536@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8537 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8538 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8539 #define _TIF_NOHZ (1<<TIF_NOHZ)
8540+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8541 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8542 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8543- _TIF_NOHZ)
8544+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8545
8546 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8547 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8548diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8549index a0c071d..49cdc7f 100644
8550--- a/arch/powerpc/include/asm/uaccess.h
8551+++ b/arch/powerpc/include/asm/uaccess.h
8552@@ -58,6 +58,7 @@
8553
8554 #endif
8555
8556+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8557 #define access_ok(type, addr, size) \
8558 (__chk_user_ptr(addr), \
8559 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8560@@ -318,52 +319,6 @@ do { \
8561 extern unsigned long __copy_tofrom_user(void __user *to,
8562 const void __user *from, unsigned long size);
8563
8564-#ifndef __powerpc64__
8565-
8566-static inline unsigned long copy_from_user(void *to,
8567- const void __user *from, unsigned long n)
8568-{
8569- unsigned long over;
8570-
8571- if (access_ok(VERIFY_READ, from, n))
8572- return __copy_tofrom_user((__force void __user *)to, from, n);
8573- if ((unsigned long)from < TASK_SIZE) {
8574- over = (unsigned long)from + n - TASK_SIZE;
8575- return __copy_tofrom_user((__force void __user *)to, from,
8576- n - over) + over;
8577- }
8578- return n;
8579-}
8580-
8581-static inline unsigned long copy_to_user(void __user *to,
8582- const void *from, unsigned long n)
8583-{
8584- unsigned long over;
8585-
8586- if (access_ok(VERIFY_WRITE, to, n))
8587- return __copy_tofrom_user(to, (__force void __user *)from, n);
8588- if ((unsigned long)to < TASK_SIZE) {
8589- over = (unsigned long)to + n - TASK_SIZE;
8590- return __copy_tofrom_user(to, (__force void __user *)from,
8591- n - over) + over;
8592- }
8593- return n;
8594-}
8595-
8596-#else /* __powerpc64__ */
8597-
8598-#define __copy_in_user(to, from, size) \
8599- __copy_tofrom_user((to), (from), (size))
8600-
8601-extern unsigned long copy_from_user(void *to, const void __user *from,
8602- unsigned long n);
8603-extern unsigned long copy_to_user(void __user *to, const void *from,
8604- unsigned long n);
8605-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8606- unsigned long n);
8607-
8608-#endif /* __powerpc64__ */
8609-
8610 static inline unsigned long __copy_from_user_inatomic(void *to,
8611 const void __user *from, unsigned long n)
8612 {
8613@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8614 if (ret == 0)
8615 return 0;
8616 }
8617+
8618+ if (!__builtin_constant_p(n))
8619+ check_object_size(to, n, false);
8620+
8621 return __copy_tofrom_user((__force void __user *)to, from, n);
8622 }
8623
8624@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8625 if (ret == 0)
8626 return 0;
8627 }
8628+
8629+ if (!__builtin_constant_p(n))
8630+ check_object_size(from, n, true);
8631+
8632 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8633 }
8634
8635@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8636 return __copy_to_user_inatomic(to, from, size);
8637 }
8638
8639+#ifndef __powerpc64__
8640+
8641+static inline unsigned long __must_check copy_from_user(void *to,
8642+ const void __user *from, unsigned long n)
8643+{
8644+ unsigned long over;
8645+
8646+ if ((long)n < 0)
8647+ return n;
8648+
8649+ if (access_ok(VERIFY_READ, from, n)) {
8650+ if (!__builtin_constant_p(n))
8651+ check_object_size(to, n, false);
8652+ return __copy_tofrom_user((__force void __user *)to, from, n);
8653+ }
8654+ if ((unsigned long)from < TASK_SIZE) {
8655+ over = (unsigned long)from + n - TASK_SIZE;
8656+ if (!__builtin_constant_p(n - over))
8657+ check_object_size(to, n - over, false);
8658+ return __copy_tofrom_user((__force void __user *)to, from,
8659+ n - over) + over;
8660+ }
8661+ return n;
8662+}
8663+
8664+static inline unsigned long __must_check copy_to_user(void __user *to,
8665+ const void *from, unsigned long n)
8666+{
8667+ unsigned long over;
8668+
8669+ if ((long)n < 0)
8670+ return n;
8671+
8672+ if (access_ok(VERIFY_WRITE, to, n)) {
8673+ if (!__builtin_constant_p(n))
8674+ check_object_size(from, n, true);
8675+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8676+ }
8677+ if ((unsigned long)to < TASK_SIZE) {
8678+ over = (unsigned long)to + n - TASK_SIZE;
8679+ if (!__builtin_constant_p(n))
8680+ check_object_size(from, n - over, true);
8681+ return __copy_tofrom_user(to, (__force void __user *)from,
8682+ n - over) + over;
8683+ }
8684+ return n;
8685+}
8686+
8687+#else /* __powerpc64__ */
8688+
8689+#define __copy_in_user(to, from, size) \
8690+ __copy_tofrom_user((to), (from), (size))
8691+
8692+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8693+{
8694+ if ((long)n < 0 || n > INT_MAX)
8695+ return n;
8696+
8697+ if (!__builtin_constant_p(n))
8698+ check_object_size(to, n, false);
8699+
8700+ if (likely(access_ok(VERIFY_READ, from, n)))
8701+ n = __copy_from_user(to, from, n);
8702+ else
8703+ memset(to, 0, n);
8704+ return n;
8705+}
8706+
8707+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8708+{
8709+ if ((long)n < 0 || n > INT_MAX)
8710+ return n;
8711+
8712+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8713+ if (!__builtin_constant_p(n))
8714+ check_object_size(from, n, true);
8715+ n = __copy_to_user(to, from, n);
8716+ }
8717+ return n;
8718+}
8719+
8720+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8721+ unsigned long n);
8722+
8723+#endif /* __powerpc64__ */
8724+
8725 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8726
8727 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8728diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8729index 502cf69..53936a1 100644
8730--- a/arch/powerpc/kernel/Makefile
8731+++ b/arch/powerpc/kernel/Makefile
8732@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8733 CFLAGS_btext.o += -fPIC
8734 endif
8735
8736+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8737+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8738+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8739+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8740+
8741 ifdef CONFIG_FUNCTION_TRACER
8742 # Do not trace early boot code
8743 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8744@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8745 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8746 endif
8747
8748+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8749+
8750 obj-y := cputable.o ptrace.o syscalls.o \
8751 irq.o align.o signal_32.o pmc.o vdso.o \
8752 process.o systbl.o idle.o \
8753diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8754index 3e68d1c..72a5ee6 100644
8755--- a/arch/powerpc/kernel/exceptions-64e.S
8756+++ b/arch/powerpc/kernel/exceptions-64e.S
8757@@ -1010,6 +1010,7 @@ storage_fault_common:
8758 std r14,_DAR(r1)
8759 std r15,_DSISR(r1)
8760 addi r3,r1,STACK_FRAME_OVERHEAD
8761+ bl save_nvgprs
8762 mr r4,r14
8763 mr r5,r15
8764 ld r14,PACA_EXGEN+EX_R14(r13)
8765@@ -1018,8 +1019,7 @@ storage_fault_common:
8766 cmpdi r3,0
8767 bne- 1f
8768 b ret_from_except_lite
8769-1: bl save_nvgprs
8770- mr r5,r3
8771+1: mr r5,r3
8772 addi r3,r1,STACK_FRAME_OVERHEAD
8773 ld r4,_DAR(r1)
8774 bl bad_page_fault
8775diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8776index c2df815..bae3d12 100644
8777--- a/arch/powerpc/kernel/exceptions-64s.S
8778+++ b/arch/powerpc/kernel/exceptions-64s.S
8779@@ -1599,10 +1599,10 @@ handle_page_fault:
8780 11: ld r4,_DAR(r1)
8781 ld r5,_DSISR(r1)
8782 addi r3,r1,STACK_FRAME_OVERHEAD
8783+ bl save_nvgprs
8784 bl do_page_fault
8785 cmpdi r3,0
8786 beq+ 12f
8787- bl save_nvgprs
8788 mr r5,r3
8789 addi r3,r1,STACK_FRAME_OVERHEAD
8790 lwz r4,_DAR(r1)
8791diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8792index 4509603..cdb491f 100644
8793--- a/arch/powerpc/kernel/irq.c
8794+++ b/arch/powerpc/kernel/irq.c
8795@@ -460,6 +460,8 @@ void migrate_irqs(void)
8796 }
8797 #endif
8798
8799+extern void gr_handle_kernel_exploit(void);
8800+
8801 static inline void check_stack_overflow(void)
8802 {
8803 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8804@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8805 pr_err("do_IRQ: stack overflow: %ld\n",
8806 sp - sizeof(struct thread_info));
8807 dump_stack();
8808+ gr_handle_kernel_exploit();
8809 }
8810 #endif
8811 }
8812diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8813index c94d2e0..992a9ce 100644
8814--- a/arch/powerpc/kernel/module_32.c
8815+++ b/arch/powerpc/kernel/module_32.c
8816@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8817 me->arch.core_plt_section = i;
8818 }
8819 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8820- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8821+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8822 return -ENOEXEC;
8823 }
8824
8825@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8826
8827 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8828 /* Init, or core PLT? */
8829- if (location >= mod->module_core
8830- && location < mod->module_core + mod->core_size)
8831+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8832+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8833 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8834- else
8835+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8836+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8837 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8838+ else {
8839+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8840+ return ~0UL;
8841+ }
8842
8843 /* Find this entry, or if that fails, the next avail. entry */
8844 while (entry->jump[0]) {
8845@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8846 }
8847 #ifdef CONFIG_DYNAMIC_FTRACE
8848 module->arch.tramp =
8849- do_plt_call(module->module_core,
8850+ do_plt_call(module->module_core_rx,
8851 (unsigned long)ftrace_caller,
8852 sechdrs, module);
8853 #endif
8854diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8855index b4cc7be..1fe8bb3 100644
8856--- a/arch/powerpc/kernel/process.c
8857+++ b/arch/powerpc/kernel/process.c
8858@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8859 * Lookup NIP late so we have the best change of getting the
8860 * above info out without failing
8861 */
8862- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8863- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8864+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8865+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8866 #endif
8867 show_stack(current, (unsigned long *) regs->gpr[1]);
8868 if (!user_mode(regs))
8869@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8870 newsp = stack[0];
8871 ip = stack[STACK_FRAME_LR_SAVE];
8872 if (!firstframe || ip != lr) {
8873- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8874+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8875 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8876 if ((ip == rth) && curr_frame >= 0) {
8877- printk(" (%pS)",
8878+ printk(" (%pA)",
8879 (void *)current->ret_stack[curr_frame].ret);
8880 curr_frame--;
8881 }
8882@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8883 struct pt_regs *regs = (struct pt_regs *)
8884 (sp + STACK_FRAME_OVERHEAD);
8885 lr = regs->link;
8886- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8887+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8888 regs->trap, (void *)regs->nip, (void *)lr);
8889 firstframe = 1;
8890 }
8891@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8892 mtspr(SPRN_CTRLT, ctrl);
8893 }
8894 #endif /* CONFIG_PPC64 */
8895-
8896-unsigned long arch_align_stack(unsigned long sp)
8897-{
8898- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8899- sp -= get_random_int() & ~PAGE_MASK;
8900- return sp & ~0xf;
8901-}
8902-
8903-static inline unsigned long brk_rnd(void)
8904-{
8905- unsigned long rnd = 0;
8906-
8907- /* 8MB for 32bit, 1GB for 64bit */
8908- if (is_32bit_task())
8909- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8910- else
8911- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8912-
8913- return rnd << PAGE_SHIFT;
8914-}
8915-
8916-unsigned long arch_randomize_brk(struct mm_struct *mm)
8917-{
8918- unsigned long base = mm->brk;
8919- unsigned long ret;
8920-
8921-#ifdef CONFIG_PPC_STD_MMU_64
8922- /*
8923- * If we are using 1TB segments and we are allowed to randomise
8924- * the heap, we can put it above 1TB so it is backed by a 1TB
8925- * segment. Otherwise the heap will be in the bottom 1TB
8926- * which always uses 256MB segments and this may result in a
8927- * performance penalty.
8928- */
8929- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8930- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8931-#endif
8932-
8933- ret = PAGE_ALIGN(base + brk_rnd());
8934-
8935- if (ret < mm->brk)
8936- return mm->brk;
8937-
8938- return ret;
8939-}
8940-
8941diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8942index f21897b..28c0428 100644
8943--- a/arch/powerpc/kernel/ptrace.c
8944+++ b/arch/powerpc/kernel/ptrace.c
8945@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
8946 return ret;
8947 }
8948
8949+#ifdef CONFIG_GRKERNSEC_SETXID
8950+extern void gr_delayed_cred_worker(void);
8951+#endif
8952+
8953 /*
8954 * We must return the syscall number to actually look up in the table.
8955 * This can be -1L to skip running any syscall at all.
8956@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8957
8958 secure_computing_strict(regs->gpr[0]);
8959
8960+#ifdef CONFIG_GRKERNSEC_SETXID
8961+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8962+ gr_delayed_cred_worker();
8963+#endif
8964+
8965 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8966 tracehook_report_syscall_entry(regs))
8967 /*
8968@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8969 {
8970 int step;
8971
8972+#ifdef CONFIG_GRKERNSEC_SETXID
8973+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8974+ gr_delayed_cred_worker();
8975+#endif
8976+
8977 audit_syscall_exit(regs);
8978
8979 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8980diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
8981index b171001..4ac7ac5 100644
8982--- a/arch/powerpc/kernel/signal_32.c
8983+++ b/arch/powerpc/kernel/signal_32.c
8984@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
8985 /* Save user registers on the stack */
8986 frame = &rt_sf->uc.uc_mcontext;
8987 addr = frame;
8988- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
8989+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8990 sigret = 0;
8991 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8992 } else {
8993diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8994index 2cb0c94..c0c0bc9 100644
8995--- a/arch/powerpc/kernel/signal_64.c
8996+++ b/arch/powerpc/kernel/signal_64.c
8997@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
8998 current->thread.fp_state.fpscr = 0;
8999
9000 /* Set up to return from userspace. */
9001- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9002+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9003 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9004 } else {
9005 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9006diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9007index e6595b7..24bde6e 100644
9008--- a/arch/powerpc/kernel/traps.c
9009+++ b/arch/powerpc/kernel/traps.c
9010@@ -36,6 +36,7 @@
9011 #include <linux/debugfs.h>
9012 #include <linux/ratelimit.h>
9013 #include <linux/context_tracking.h>
9014+#include <linux/uaccess.h>
9015
9016 #include <asm/emulated_ops.h>
9017 #include <asm/pgtable.h>
9018@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9019 return flags;
9020 }
9021
9022+extern void gr_handle_kernel_exploit(void);
9023+
9024 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9025 int signr)
9026 {
9027@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9028 panic("Fatal exception in interrupt");
9029 if (panic_on_oops)
9030 panic("Fatal exception");
9031+
9032+ gr_handle_kernel_exploit();
9033+
9034 do_exit(signr);
9035 }
9036
9037@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9038 enum ctx_state prev_state = exception_enter();
9039 unsigned int reason = get_reason(regs);
9040
9041+#ifdef CONFIG_PAX_REFCOUNT
9042+ unsigned int bkpt;
9043+ const struct exception_table_entry *entry;
9044+
9045+ if (reason & REASON_ILLEGAL) {
9046+ /* Check if PaX bad instruction */
9047+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9048+ current->thread.trap_nr = 0;
9049+ pax_report_refcount_overflow(regs);
9050+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9051+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9052+ regs->nip = entry->fixup;
9053+ return;
9054+ }
9055+ /* fixup_exception() could not handle */
9056+ goto bail;
9057+ }
9058+ }
9059+#endif
9060+
9061 /* We can now get here via a FP Unavailable exception if the core
9062 * has no FPU, in that case the reason flags will be 0 */
9063
9064diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9065index 305eb0d..accc5b40 100644
9066--- a/arch/powerpc/kernel/vdso.c
9067+++ b/arch/powerpc/kernel/vdso.c
9068@@ -34,6 +34,7 @@
9069 #include <asm/vdso.h>
9070 #include <asm/vdso_datapage.h>
9071 #include <asm/setup.h>
9072+#include <asm/mman.h>
9073
9074 #undef DEBUG
9075
9076@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9077 vdso_base = VDSO32_MBASE;
9078 #endif
9079
9080- current->mm->context.vdso_base = 0;
9081+ current->mm->context.vdso_base = ~0UL;
9082
9083 /* vDSO has a problem and was disabled, just don't "enable" it for the
9084 * process
9085@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9086 vdso_base = get_unmapped_area(NULL, vdso_base,
9087 (vdso_pages << PAGE_SHIFT) +
9088 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9089- 0, 0);
9090+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9091 if (IS_ERR_VALUE(vdso_base)) {
9092 rc = vdso_base;
9093 goto fail_mmapsem;
9094diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9095index c45eaab..5f41b57 100644
9096--- a/arch/powerpc/kvm/powerpc.c
9097+++ b/arch/powerpc/kvm/powerpc.c
9098@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9099 }
9100 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9101
9102-int kvm_arch_init(void *opaque)
9103+int kvm_arch_init(const void *opaque)
9104 {
9105 return 0;
9106 }
9107diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9108index 5eea6f3..5d10396 100644
9109--- a/arch/powerpc/lib/usercopy_64.c
9110+++ b/arch/powerpc/lib/usercopy_64.c
9111@@ -9,22 +9,6 @@
9112 #include <linux/module.h>
9113 #include <asm/uaccess.h>
9114
9115-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9116-{
9117- if (likely(access_ok(VERIFY_READ, from, n)))
9118- n = __copy_from_user(to, from, n);
9119- else
9120- memset(to, 0, n);
9121- return n;
9122-}
9123-
9124-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9125-{
9126- if (likely(access_ok(VERIFY_WRITE, to, n)))
9127- n = __copy_to_user(to, from, n);
9128- return n;
9129-}
9130-
9131 unsigned long copy_in_user(void __user *to, const void __user *from,
9132 unsigned long n)
9133 {
9134@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9135 return n;
9136 }
9137
9138-EXPORT_SYMBOL(copy_from_user);
9139-EXPORT_SYMBOL(copy_to_user);
9140 EXPORT_SYMBOL(copy_in_user);
9141
9142diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9143index 6154b0a..4de2b19 100644
9144--- a/arch/powerpc/mm/fault.c
9145+++ b/arch/powerpc/mm/fault.c
9146@@ -33,6 +33,10 @@
9147 #include <linux/ratelimit.h>
9148 #include <linux/context_tracking.h>
9149 #include <linux/hugetlb.h>
9150+#include <linux/slab.h>
9151+#include <linux/pagemap.h>
9152+#include <linux/compiler.h>
9153+#include <linux/unistd.h>
9154
9155 #include <asm/firmware.h>
9156 #include <asm/page.h>
9157@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9158 }
9159 #endif
9160
9161+#ifdef CONFIG_PAX_PAGEEXEC
9162+/*
9163+ * PaX: decide what to do with offenders (regs->nip = fault address)
9164+ *
9165+ * returns 1 when task should be killed
9166+ */
9167+static int pax_handle_fetch_fault(struct pt_regs *regs)
9168+{
9169+ return 1;
9170+}
9171+
9172+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9173+{
9174+ unsigned long i;
9175+
9176+ printk(KERN_ERR "PAX: bytes at PC: ");
9177+ for (i = 0; i < 5; i++) {
9178+ unsigned int c;
9179+ if (get_user(c, (unsigned int __user *)pc+i))
9180+ printk(KERN_CONT "???????? ");
9181+ else
9182+ printk(KERN_CONT "%08x ", c);
9183+ }
9184+ printk("\n");
9185+}
9186+#endif
9187+
9188 /*
9189 * Check whether the instruction at regs->nip is a store using
9190 * an update addressing form which will update r1.
9191@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9192 * indicate errors in DSISR but can validly be set in SRR1.
9193 */
9194 if (trap == 0x400)
9195- error_code &= 0x48200000;
9196+ error_code &= 0x58200000;
9197 else
9198 is_write = error_code & DSISR_ISSTORE;
9199 #else
9200@@ -383,7 +414,7 @@ good_area:
9201 * "undefined". Of those that can be set, this is the only
9202 * one which seems bad.
9203 */
9204- if (error_code & 0x10000000)
9205+ if (error_code & DSISR_GUARDED)
9206 /* Guarded storage error. */
9207 goto bad_area;
9208 #endif /* CONFIG_8xx */
9209@@ -398,7 +429,7 @@ good_area:
9210 * processors use the same I/D cache coherency mechanism
9211 * as embedded.
9212 */
9213- if (error_code & DSISR_PROTFAULT)
9214+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9215 goto bad_area;
9216 #endif /* CONFIG_PPC_STD_MMU */
9217
9218@@ -490,6 +521,23 @@ bad_area:
9219 bad_area_nosemaphore:
9220 /* User mode accesses cause a SIGSEGV */
9221 if (user_mode(regs)) {
9222+
9223+#ifdef CONFIG_PAX_PAGEEXEC
9224+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9225+#ifdef CONFIG_PPC_STD_MMU
9226+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9227+#else
9228+ if (is_exec && regs->nip == address) {
9229+#endif
9230+ switch (pax_handle_fetch_fault(regs)) {
9231+ }
9232+
9233+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9234+ do_group_exit(SIGKILL);
9235+ }
9236+ }
9237+#endif
9238+
9239 _exception(SIGSEGV, regs, code, address);
9240 goto bail;
9241 }
9242diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9243index cb8bdbe..cde4bc7 100644
9244--- a/arch/powerpc/mm/mmap.c
9245+++ b/arch/powerpc/mm/mmap.c
9246@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9247 return sysctl_legacy_va_layout;
9248 }
9249
9250-static unsigned long mmap_rnd(void)
9251+static unsigned long mmap_rnd(struct mm_struct *mm)
9252 {
9253 unsigned long rnd = 0;
9254
9255+#ifdef CONFIG_PAX_RANDMMAP
9256+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9257+#endif
9258+
9259 if (current->flags & PF_RANDOMIZE) {
9260 /* 8MB for 32bit, 1GB for 64bit */
9261 if (is_32bit_task())
9262@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9263 return rnd << PAGE_SHIFT;
9264 }
9265
9266-static inline unsigned long mmap_base(void)
9267+static inline unsigned long mmap_base(struct mm_struct *mm)
9268 {
9269 unsigned long gap = rlimit(RLIMIT_STACK);
9270
9271@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9272 else if (gap > MAX_GAP)
9273 gap = MAX_GAP;
9274
9275- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9276+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9277 }
9278
9279 /*
9280@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9281 */
9282 if (mmap_is_legacy()) {
9283 mm->mmap_base = TASK_UNMAPPED_BASE;
9284+
9285+#ifdef CONFIG_PAX_RANDMMAP
9286+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9287+ mm->mmap_base += mm->delta_mmap;
9288+#endif
9289+
9290 mm->get_unmapped_area = arch_get_unmapped_area;
9291 } else {
9292- mm->mmap_base = mmap_base();
9293+ mm->mmap_base = mmap_base(mm);
9294+
9295+#ifdef CONFIG_PAX_RANDMMAP
9296+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9297+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9298+#endif
9299+
9300 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9301 }
9302 }
9303diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9304index ded0ea1..f213a9b 100644
9305--- a/arch/powerpc/mm/slice.c
9306+++ b/arch/powerpc/mm/slice.c
9307@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9308 if ((mm->task_size - len) < addr)
9309 return 0;
9310 vma = find_vma(mm, addr);
9311- return (!vma || (addr + len) <= vma->vm_start);
9312+ return check_heap_stack_gap(vma, addr, len, 0);
9313 }
9314
9315 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9316@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9317 info.align_offset = 0;
9318
9319 addr = TASK_UNMAPPED_BASE;
9320+
9321+#ifdef CONFIG_PAX_RANDMMAP
9322+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9323+ addr += mm->delta_mmap;
9324+#endif
9325+
9326 while (addr < TASK_SIZE) {
9327 info.low_limit = addr;
9328 if (!slice_scan_available(addr, available, 1, &addr))
9329@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9330 if (fixed && addr > (mm->task_size - len))
9331 return -ENOMEM;
9332
9333+#ifdef CONFIG_PAX_RANDMMAP
9334+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9335+ addr = 0;
9336+#endif
9337+
9338 /* If hint, make sure it matches our alignment restrictions */
9339 if (!fixed && addr) {
9340 addr = _ALIGN_UP(addr, 1ul << pshift);
9341diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9342index f223875..94170e4 100644
9343--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9344+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9345@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9346 }
9347
9348 static struct pci_ops scc_pciex_pci_ops = {
9349- scc_pciex_read_config,
9350- scc_pciex_write_config,
9351+ .read = scc_pciex_read_config,
9352+ .write = scc_pciex_write_config,
9353 };
9354
9355 static void pciex_clear_intr_all(unsigned int __iomem *base)
9356diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9357index d966bbe..372124a 100644
9358--- a/arch/powerpc/platforms/cell/spufs/file.c
9359+++ b/arch/powerpc/platforms/cell/spufs/file.c
9360@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9361 return VM_FAULT_NOPAGE;
9362 }
9363
9364-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9365+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9366 unsigned long address,
9367- void *buf, int len, int write)
9368+ void *buf, size_t len, int write)
9369 {
9370 struct spu_context *ctx = vma->vm_file->private_data;
9371 unsigned long offset = address - vma->vm_start;
9372diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9373index fa934fe..c296056 100644
9374--- a/arch/s390/include/asm/atomic.h
9375+++ b/arch/s390/include/asm/atomic.h
9376@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9377 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9378 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9379
9380+#define atomic64_read_unchecked(v) atomic64_read(v)
9381+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9382+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9383+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9384+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9385+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9386+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9387+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9388+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9389+
9390 #endif /* __ARCH_S390_ATOMIC__ */
9391diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9392index 8d72471..5322500 100644
9393--- a/arch/s390/include/asm/barrier.h
9394+++ b/arch/s390/include/asm/barrier.h
9395@@ -42,7 +42,7 @@
9396 do { \
9397 compiletime_assert_atomic_type(*p); \
9398 barrier(); \
9399- ACCESS_ONCE(*p) = (v); \
9400+ ACCESS_ONCE_RW(*p) = (v); \
9401 } while (0)
9402
9403 #define smp_load_acquire(p) \
9404diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9405index 4d7ccac..d03d0ad 100644
9406--- a/arch/s390/include/asm/cache.h
9407+++ b/arch/s390/include/asm/cache.h
9408@@ -9,8 +9,10 @@
9409 #ifndef __ARCH_S390_CACHE_H
9410 #define __ARCH_S390_CACHE_H
9411
9412-#define L1_CACHE_BYTES 256
9413+#include <linux/const.h>
9414+
9415 #define L1_CACHE_SHIFT 8
9416+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9417 #define NET_SKB_PAD 32
9418
9419 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9420diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9421index f6e43d3..5f57681 100644
9422--- a/arch/s390/include/asm/elf.h
9423+++ b/arch/s390/include/asm/elf.h
9424@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9425 the loader. We need to make sure that it is out of the way of the program
9426 that it will "exec", and that there is sufficient room for the brk. */
9427
9428-extern unsigned long randomize_et_dyn(unsigned long base);
9429-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9430+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9431+
9432+#ifdef CONFIG_PAX_ASLR
9433+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9434+
9435+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9436+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9437+#endif
9438
9439 /* This yields a mask that user programs can use to figure out what
9440 instruction set this CPU supports. */
9441@@ -223,9 +229,6 @@ struct linux_binprm;
9442 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9443 int arch_setup_additional_pages(struct linux_binprm *, int);
9444
9445-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9446-#define arch_randomize_brk arch_randomize_brk
9447-
9448 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9449
9450 #endif
9451diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9452index c4a93d6..4d2a9b4 100644
9453--- a/arch/s390/include/asm/exec.h
9454+++ b/arch/s390/include/asm/exec.h
9455@@ -7,6 +7,6 @@
9456 #ifndef __ASM_EXEC_H
9457 #define __ASM_EXEC_H
9458
9459-extern unsigned long arch_align_stack(unsigned long sp);
9460+#define arch_align_stack(x) ((x) & ~0xfUL)
9461
9462 #endif /* __ASM_EXEC_H */
9463diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9464index cd4c68e..6764641 100644
9465--- a/arch/s390/include/asm/uaccess.h
9466+++ b/arch/s390/include/asm/uaccess.h
9467@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9468 __range_ok((unsigned long)(addr), (size)); \
9469 })
9470
9471+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9472 #define access_ok(type, addr, size) __access_ok(addr, size)
9473
9474 /*
9475@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9476 copy_to_user(void __user *to, const void *from, unsigned long n)
9477 {
9478 might_fault();
9479+
9480+ if ((long)n < 0)
9481+ return n;
9482+
9483 return __copy_to_user(to, from, n);
9484 }
9485
9486@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9487 static inline unsigned long __must_check
9488 copy_from_user(void *to, const void __user *from, unsigned long n)
9489 {
9490- unsigned int sz = __compiletime_object_size(to);
9491+ size_t sz = __compiletime_object_size(to);
9492
9493 might_fault();
9494- if (unlikely(sz != -1 && sz < n)) {
9495+
9496+ if ((long)n < 0)
9497+ return n;
9498+
9499+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9500 copy_from_user_overflow();
9501 return n;
9502 }
9503diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9504index 409d152..d90d368 100644
9505--- a/arch/s390/kernel/module.c
9506+++ b/arch/s390/kernel/module.c
9507@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9508
9509 /* Increase core size by size of got & plt and set start
9510 offsets for got and plt. */
9511- me->core_size = ALIGN(me->core_size, 4);
9512- me->arch.got_offset = me->core_size;
9513- me->core_size += me->arch.got_size;
9514- me->arch.plt_offset = me->core_size;
9515- me->core_size += me->arch.plt_size;
9516+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9517+ me->arch.got_offset = me->core_size_rw;
9518+ me->core_size_rw += me->arch.got_size;
9519+ me->arch.plt_offset = me->core_size_rx;
9520+ me->core_size_rx += me->arch.plt_size;
9521 return 0;
9522 }
9523
9524@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9525 if (info->got_initialized == 0) {
9526 Elf_Addr *gotent;
9527
9528- gotent = me->module_core + me->arch.got_offset +
9529+ gotent = me->module_core_rw + me->arch.got_offset +
9530 info->got_offset;
9531 *gotent = val;
9532 info->got_initialized = 1;
9533@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9534 rc = apply_rela_bits(loc, val, 0, 64, 0);
9535 else if (r_type == R_390_GOTENT ||
9536 r_type == R_390_GOTPLTENT) {
9537- val += (Elf_Addr) me->module_core - loc;
9538+ val += (Elf_Addr) me->module_core_rw - loc;
9539 rc = apply_rela_bits(loc, val, 1, 32, 1);
9540 }
9541 break;
9542@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9543 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9544 if (info->plt_initialized == 0) {
9545 unsigned int *ip;
9546- ip = me->module_core + me->arch.plt_offset +
9547+ ip = me->module_core_rx + me->arch.plt_offset +
9548 info->plt_offset;
9549 #ifndef CONFIG_64BIT
9550 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9551@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9552 val - loc + 0xffffUL < 0x1ffffeUL) ||
9553 (r_type == R_390_PLT32DBL &&
9554 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9555- val = (Elf_Addr) me->module_core +
9556+ val = (Elf_Addr) me->module_core_rx +
9557 me->arch.plt_offset +
9558 info->plt_offset;
9559 val += rela->r_addend - loc;
9560@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9561 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9562 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9563 val = val + rela->r_addend -
9564- ((Elf_Addr) me->module_core + me->arch.got_offset);
9565+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9566 if (r_type == R_390_GOTOFF16)
9567 rc = apply_rela_bits(loc, val, 0, 16, 0);
9568 else if (r_type == R_390_GOTOFF32)
9569@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9570 break;
9571 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9572 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9573- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9574+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9575 rela->r_addend - loc;
9576 if (r_type == R_390_GOTPC)
9577 rc = apply_rela_bits(loc, val, 1, 32, 0);
9578diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9579index aa7a839..6c2a916 100644
9580--- a/arch/s390/kernel/process.c
9581+++ b/arch/s390/kernel/process.c
9582@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
9583 }
9584 return 0;
9585 }
9586-
9587-unsigned long arch_align_stack(unsigned long sp)
9588-{
9589- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9590- sp -= get_random_int() & ~PAGE_MASK;
9591- return sp & ~0xf;
9592-}
9593-
9594-static inline unsigned long brk_rnd(void)
9595-{
9596- /* 8MB for 32bit, 1GB for 64bit */
9597- if (is_32bit_task())
9598- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9599- else
9600- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9601-}
9602-
9603-unsigned long arch_randomize_brk(struct mm_struct *mm)
9604-{
9605- unsigned long ret;
9606-
9607- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9608- return (ret > mm->brk) ? ret : mm->brk;
9609-}
9610-
9611-unsigned long randomize_et_dyn(unsigned long base)
9612-{
9613- unsigned long ret;
9614-
9615- if (!(current->flags & PF_RANDOMIZE))
9616- return base;
9617- ret = PAGE_ALIGN(base + brk_rnd());
9618- return (ret > base) ? ret : base;
9619-}
9620diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9621index 9b436c2..54fbf0a 100644
9622--- a/arch/s390/mm/mmap.c
9623+++ b/arch/s390/mm/mmap.c
9624@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9625 */
9626 if (mmap_is_legacy()) {
9627 mm->mmap_base = mmap_base_legacy();
9628+
9629+#ifdef CONFIG_PAX_RANDMMAP
9630+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9631+ mm->mmap_base += mm->delta_mmap;
9632+#endif
9633+
9634 mm->get_unmapped_area = arch_get_unmapped_area;
9635 } else {
9636 mm->mmap_base = mmap_base();
9637+
9638+#ifdef CONFIG_PAX_RANDMMAP
9639+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9640+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9641+#endif
9642+
9643 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9644 }
9645 }
9646@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9647 */
9648 if (mmap_is_legacy()) {
9649 mm->mmap_base = mmap_base_legacy();
9650+
9651+#ifdef CONFIG_PAX_RANDMMAP
9652+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9653+ mm->mmap_base += mm->delta_mmap;
9654+#endif
9655+
9656 mm->get_unmapped_area = s390_get_unmapped_area;
9657 } else {
9658 mm->mmap_base = mmap_base();
9659+
9660+#ifdef CONFIG_PAX_RANDMMAP
9661+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9662+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9663+#endif
9664+
9665 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9666 }
9667 }
9668diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9669index ae3d59f..f65f075 100644
9670--- a/arch/score/include/asm/cache.h
9671+++ b/arch/score/include/asm/cache.h
9672@@ -1,7 +1,9 @@
9673 #ifndef _ASM_SCORE_CACHE_H
9674 #define _ASM_SCORE_CACHE_H
9675
9676+#include <linux/const.h>
9677+
9678 #define L1_CACHE_SHIFT 4
9679-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9680+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9681
9682 #endif /* _ASM_SCORE_CACHE_H */
9683diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9684index f9f3cd5..58ff438 100644
9685--- a/arch/score/include/asm/exec.h
9686+++ b/arch/score/include/asm/exec.h
9687@@ -1,6 +1,6 @@
9688 #ifndef _ASM_SCORE_EXEC_H
9689 #define _ASM_SCORE_EXEC_H
9690
9691-extern unsigned long arch_align_stack(unsigned long sp);
9692+#define arch_align_stack(x) (x)
9693
9694 #endif /* _ASM_SCORE_EXEC_H */
9695diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9696index a1519ad3..e8ac1ff 100644
9697--- a/arch/score/kernel/process.c
9698+++ b/arch/score/kernel/process.c
9699@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9700
9701 return task_pt_regs(task)->cp0_epc;
9702 }
9703-
9704-unsigned long arch_align_stack(unsigned long sp)
9705-{
9706- return sp;
9707-}
9708diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9709index ef9e555..331bd29 100644
9710--- a/arch/sh/include/asm/cache.h
9711+++ b/arch/sh/include/asm/cache.h
9712@@ -9,10 +9,11 @@
9713 #define __ASM_SH_CACHE_H
9714 #ifdef __KERNEL__
9715
9716+#include <linux/const.h>
9717 #include <linux/init.h>
9718 #include <cpu/cache.h>
9719
9720-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9721+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9722
9723 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9724
9725diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9726index 6777177..cb5e44f 100644
9727--- a/arch/sh/mm/mmap.c
9728+++ b/arch/sh/mm/mmap.c
9729@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9730 struct mm_struct *mm = current->mm;
9731 struct vm_area_struct *vma;
9732 int do_colour_align;
9733+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9734 struct vm_unmapped_area_info info;
9735
9736 if (flags & MAP_FIXED) {
9737@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9738 if (filp || (flags & MAP_SHARED))
9739 do_colour_align = 1;
9740
9741+#ifdef CONFIG_PAX_RANDMMAP
9742+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9743+#endif
9744+
9745 if (addr) {
9746 if (do_colour_align)
9747 addr = COLOUR_ALIGN(addr, pgoff);
9748@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9749 addr = PAGE_ALIGN(addr);
9750
9751 vma = find_vma(mm, addr);
9752- if (TASK_SIZE - len >= addr &&
9753- (!vma || addr + len <= vma->vm_start))
9754+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9755 return addr;
9756 }
9757
9758 info.flags = 0;
9759 info.length = len;
9760- info.low_limit = TASK_UNMAPPED_BASE;
9761+ info.low_limit = mm->mmap_base;
9762 info.high_limit = TASK_SIZE;
9763 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9764 info.align_offset = pgoff << PAGE_SHIFT;
9765@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9766 struct mm_struct *mm = current->mm;
9767 unsigned long addr = addr0;
9768 int do_colour_align;
9769+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9770 struct vm_unmapped_area_info info;
9771
9772 if (flags & MAP_FIXED) {
9773@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9774 if (filp || (flags & MAP_SHARED))
9775 do_colour_align = 1;
9776
9777+#ifdef CONFIG_PAX_RANDMMAP
9778+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9779+#endif
9780+
9781 /* requesting a specific address */
9782 if (addr) {
9783 if (do_colour_align)
9784@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9785 addr = PAGE_ALIGN(addr);
9786
9787 vma = find_vma(mm, addr);
9788- if (TASK_SIZE - len >= addr &&
9789- (!vma || addr + len <= vma->vm_start))
9790+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9791 return addr;
9792 }
9793
9794@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9795 VM_BUG_ON(addr != -ENOMEM);
9796 info.flags = 0;
9797 info.low_limit = TASK_UNMAPPED_BASE;
9798+
9799+#ifdef CONFIG_PAX_RANDMMAP
9800+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9801+ info.low_limit += mm->delta_mmap;
9802+#endif
9803+
9804 info.high_limit = TASK_SIZE;
9805 addr = vm_unmapped_area(&info);
9806 }
9807diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9808index 4082749..fd97781 100644
9809--- a/arch/sparc/include/asm/atomic_64.h
9810+++ b/arch/sparc/include/asm/atomic_64.h
9811@@ -15,18 +15,38 @@
9812 #define ATOMIC64_INIT(i) { (i) }
9813
9814 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9815+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9816+{
9817+ return ACCESS_ONCE(v->counter);
9818+}
9819 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9820+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9821+{
9822+ return ACCESS_ONCE(v->counter);
9823+}
9824
9825 #define atomic_set(v, i) (((v)->counter) = i)
9826+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9827+{
9828+ v->counter = i;
9829+}
9830 #define atomic64_set(v, i) (((v)->counter) = i)
9831+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9832+{
9833+ v->counter = i;
9834+}
9835
9836-#define ATOMIC_OP(op) \
9837-void atomic_##op(int, atomic_t *); \
9838-void atomic64_##op(long, atomic64_t *);
9839+#define __ATOMIC_OP(op, suffix) \
9840+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9841+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9842
9843-#define ATOMIC_OP_RETURN(op) \
9844-int atomic_##op##_return(int, atomic_t *); \
9845-long atomic64_##op##_return(long, atomic64_t *);
9846+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9847+
9848+#define __ATOMIC_OP_RETURN(op, suffix) \
9849+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9850+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9851+
9852+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9853
9854 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9855
9856@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9857
9858 #undef ATOMIC_OPS
9859 #undef ATOMIC_OP_RETURN
9860+#undef __ATOMIC_OP_RETURN
9861 #undef ATOMIC_OP
9862+#undef __ATOMIC_OP
9863
9864 #define atomic_dec_return(v) atomic_sub_return(1, v)
9865 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9866
9867 #define atomic_inc_return(v) atomic_add_return(1, v)
9868+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9869+{
9870+ return atomic_add_return_unchecked(1, v);
9871+}
9872 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9873+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9874+{
9875+ return atomic64_add_return_unchecked(1, v);
9876+}
9877
9878 /*
9879 * atomic_inc_and_test - increment and test
9880@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9881 * other cases.
9882 */
9883 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9884+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9885+{
9886+ return atomic_inc_return_unchecked(v) == 0;
9887+}
9888 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9889
9890 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9891@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9892 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9893
9894 #define atomic_inc(v) atomic_add(1, v)
9895+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9896+{
9897+ atomic_add_unchecked(1, v);
9898+}
9899 #define atomic64_inc(v) atomic64_add(1, v)
9900+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9901+{
9902+ atomic64_add_unchecked(1, v);
9903+}
9904
9905 #define atomic_dec(v) atomic_sub(1, v)
9906+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9907+{
9908+ atomic_sub_unchecked(1, v);
9909+}
9910 #define atomic64_dec(v) atomic64_sub(1, v)
9911+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9912+{
9913+ atomic64_sub_unchecked(1, v);
9914+}
9915
9916 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9917 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9918
9919 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9920+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9921+{
9922+ return cmpxchg(&v->counter, old, new);
9923+}
9924 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9925+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9926+{
9927+ return xchg(&v->counter, new);
9928+}
9929
9930 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9931 {
9932- int c, old;
9933+ int c, old, new;
9934 c = atomic_read(v);
9935 for (;;) {
9936- if (unlikely(c == (u)))
9937+ if (unlikely(c == u))
9938 break;
9939- old = atomic_cmpxchg((v), c, c + (a));
9940+
9941+ asm volatile("addcc %2, %0, %0\n"
9942+
9943+#ifdef CONFIG_PAX_REFCOUNT
9944+ "tvs %%icc, 6\n"
9945+#endif
9946+
9947+ : "=r" (new)
9948+ : "0" (c), "ir" (a)
9949+ : "cc");
9950+
9951+ old = atomic_cmpxchg(v, c, new);
9952 if (likely(old == c))
9953 break;
9954 c = old;
9955@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9956 #define atomic64_cmpxchg(v, o, n) \
9957 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9958 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9959+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9960+{
9961+ return xchg(&v->counter, new);
9962+}
9963
9964 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9965 {
9966- long c, old;
9967+ long c, old, new;
9968 c = atomic64_read(v);
9969 for (;;) {
9970- if (unlikely(c == (u)))
9971+ if (unlikely(c == u))
9972 break;
9973- old = atomic64_cmpxchg((v), c, c + (a));
9974+
9975+ asm volatile("addcc %2, %0, %0\n"
9976+
9977+#ifdef CONFIG_PAX_REFCOUNT
9978+ "tvs %%xcc, 6\n"
9979+#endif
9980+
9981+ : "=r" (new)
9982+ : "0" (c), "ir" (a)
9983+ : "cc");
9984+
9985+ old = atomic64_cmpxchg(v, c, new);
9986 if (likely(old == c))
9987 break;
9988 c = old;
9989 }
9990- return c != (u);
9991+ return c != u;
9992 }
9993
9994 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9995diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
9996index 7664894..45a974b 100644
9997--- a/arch/sparc/include/asm/barrier_64.h
9998+++ b/arch/sparc/include/asm/barrier_64.h
9999@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10000 do { \
10001 compiletime_assert_atomic_type(*p); \
10002 barrier(); \
10003- ACCESS_ONCE(*p) = (v); \
10004+ ACCESS_ONCE_RW(*p) = (v); \
10005 } while (0)
10006
10007 #define smp_load_acquire(p) \
10008diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10009index 5bb6991..5c2132e 100644
10010--- a/arch/sparc/include/asm/cache.h
10011+++ b/arch/sparc/include/asm/cache.h
10012@@ -7,10 +7,12 @@
10013 #ifndef _SPARC_CACHE_H
10014 #define _SPARC_CACHE_H
10015
10016+#include <linux/const.h>
10017+
10018 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10019
10020 #define L1_CACHE_SHIFT 5
10021-#define L1_CACHE_BYTES 32
10022+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10023
10024 #ifdef CONFIG_SPARC32
10025 #define SMP_CACHE_BYTES_SHIFT 5
10026diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10027index a24e41f..47677ff 100644
10028--- a/arch/sparc/include/asm/elf_32.h
10029+++ b/arch/sparc/include/asm/elf_32.h
10030@@ -114,6 +114,13 @@ typedef struct {
10031
10032 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10033
10034+#ifdef CONFIG_PAX_ASLR
10035+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10036+
10037+#define PAX_DELTA_MMAP_LEN 16
10038+#define PAX_DELTA_STACK_LEN 16
10039+#endif
10040+
10041 /* This yields a mask that user programs can use to figure out what
10042 instruction set this cpu supports. This can NOT be done in userspace
10043 on Sparc. */
10044diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10045index 370ca1e..d4f4a98 100644
10046--- a/arch/sparc/include/asm/elf_64.h
10047+++ b/arch/sparc/include/asm/elf_64.h
10048@@ -189,6 +189,13 @@ typedef struct {
10049 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10050 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10051
10052+#ifdef CONFIG_PAX_ASLR
10053+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10054+
10055+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10056+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10057+#endif
10058+
10059 extern unsigned long sparc64_elf_hwcap;
10060 #define ELF_HWCAP sparc64_elf_hwcap
10061
10062diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10063index a3890da..f6a408e 100644
10064--- a/arch/sparc/include/asm/pgalloc_32.h
10065+++ b/arch/sparc/include/asm/pgalloc_32.h
10066@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10067 }
10068
10069 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10070+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10071
10072 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10073 unsigned long address)
10074diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10075index 5e31871..13469c6 100644
10076--- a/arch/sparc/include/asm/pgalloc_64.h
10077+++ b/arch/sparc/include/asm/pgalloc_64.h
10078@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10079 }
10080
10081 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10082+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10083
10084 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10085 {
10086@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10087 }
10088
10089 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10090+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10091
10092 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10093 {
10094diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10095index 59ba6f6..4518128 100644
10096--- a/arch/sparc/include/asm/pgtable.h
10097+++ b/arch/sparc/include/asm/pgtable.h
10098@@ -5,4 +5,8 @@
10099 #else
10100 #include <asm/pgtable_32.h>
10101 #endif
10102+
10103+#define ktla_ktva(addr) (addr)
10104+#define ktva_ktla(addr) (addr)
10105+
10106 #endif
10107diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10108index b9b91ae..950b91e 100644
10109--- a/arch/sparc/include/asm/pgtable_32.h
10110+++ b/arch/sparc/include/asm/pgtable_32.h
10111@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10112 #define PAGE_SHARED SRMMU_PAGE_SHARED
10113 #define PAGE_COPY SRMMU_PAGE_COPY
10114 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10115+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10116+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10117+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10118 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10119
10120 /* Top-level page directory - dummy used by init-mm.
10121@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10122
10123 /* xwr */
10124 #define __P000 PAGE_NONE
10125-#define __P001 PAGE_READONLY
10126-#define __P010 PAGE_COPY
10127-#define __P011 PAGE_COPY
10128+#define __P001 PAGE_READONLY_NOEXEC
10129+#define __P010 PAGE_COPY_NOEXEC
10130+#define __P011 PAGE_COPY_NOEXEC
10131 #define __P100 PAGE_READONLY
10132 #define __P101 PAGE_READONLY
10133 #define __P110 PAGE_COPY
10134 #define __P111 PAGE_COPY
10135
10136 #define __S000 PAGE_NONE
10137-#define __S001 PAGE_READONLY
10138-#define __S010 PAGE_SHARED
10139-#define __S011 PAGE_SHARED
10140+#define __S001 PAGE_READONLY_NOEXEC
10141+#define __S010 PAGE_SHARED_NOEXEC
10142+#define __S011 PAGE_SHARED_NOEXEC
10143 #define __S100 PAGE_READONLY
10144 #define __S101 PAGE_READONLY
10145 #define __S110 PAGE_SHARED
10146diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10147index 79da178..c2eede8 100644
10148--- a/arch/sparc/include/asm/pgtsrmmu.h
10149+++ b/arch/sparc/include/asm/pgtsrmmu.h
10150@@ -115,6 +115,11 @@
10151 SRMMU_EXEC | SRMMU_REF)
10152 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10153 SRMMU_EXEC | SRMMU_REF)
10154+
10155+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10156+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10157+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10158+
10159 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10160 SRMMU_DIRTY | SRMMU_REF)
10161
10162diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10163index 29d64b1..4272fe8 100644
10164--- a/arch/sparc/include/asm/setup.h
10165+++ b/arch/sparc/include/asm/setup.h
10166@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10167 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10168
10169 /* init_64.c */
10170-extern atomic_t dcpage_flushes;
10171-extern atomic_t dcpage_flushes_xcall;
10172+extern atomic_unchecked_t dcpage_flushes;
10173+extern atomic_unchecked_t dcpage_flushes_xcall;
10174
10175 extern int sysctl_tsb_ratio;
10176 #endif
10177diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10178index 9689176..63c18ea 100644
10179--- a/arch/sparc/include/asm/spinlock_64.h
10180+++ b/arch/sparc/include/asm/spinlock_64.h
10181@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10182
10183 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10184
10185-static void inline arch_read_lock(arch_rwlock_t *lock)
10186+static inline void arch_read_lock(arch_rwlock_t *lock)
10187 {
10188 unsigned long tmp1, tmp2;
10189
10190 __asm__ __volatile__ (
10191 "1: ldsw [%2], %0\n"
10192 " brlz,pn %0, 2f\n"
10193-"4: add %0, 1, %1\n"
10194+"4: addcc %0, 1, %1\n"
10195+
10196+#ifdef CONFIG_PAX_REFCOUNT
10197+" tvs %%icc, 6\n"
10198+#endif
10199+
10200 " cas [%2], %0, %1\n"
10201 " cmp %0, %1\n"
10202 " bne,pn %%icc, 1b\n"
10203@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10204 " .previous"
10205 : "=&r" (tmp1), "=&r" (tmp2)
10206 : "r" (lock)
10207- : "memory");
10208+ : "memory", "cc");
10209 }
10210
10211-static int inline arch_read_trylock(arch_rwlock_t *lock)
10212+static inline int arch_read_trylock(arch_rwlock_t *lock)
10213 {
10214 int tmp1, tmp2;
10215
10216@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10217 "1: ldsw [%2], %0\n"
10218 " brlz,a,pn %0, 2f\n"
10219 " mov 0, %0\n"
10220-" add %0, 1, %1\n"
10221+" addcc %0, 1, %1\n"
10222+
10223+#ifdef CONFIG_PAX_REFCOUNT
10224+" tvs %%icc, 6\n"
10225+#endif
10226+
10227 " cas [%2], %0, %1\n"
10228 " cmp %0, %1\n"
10229 " bne,pn %%icc, 1b\n"
10230@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10231 return tmp1;
10232 }
10233
10234-static void inline arch_read_unlock(arch_rwlock_t *lock)
10235+static inline void arch_read_unlock(arch_rwlock_t *lock)
10236 {
10237 unsigned long tmp1, tmp2;
10238
10239 __asm__ __volatile__(
10240 "1: lduw [%2], %0\n"
10241-" sub %0, 1, %1\n"
10242+" subcc %0, 1, %1\n"
10243+
10244+#ifdef CONFIG_PAX_REFCOUNT
10245+" tvs %%icc, 6\n"
10246+#endif
10247+
10248 " cas [%2], %0, %1\n"
10249 " cmp %0, %1\n"
10250 " bne,pn %%xcc, 1b\n"
10251@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10252 : "memory");
10253 }
10254
10255-static void inline arch_write_lock(arch_rwlock_t *lock)
10256+static inline void arch_write_lock(arch_rwlock_t *lock)
10257 {
10258 unsigned long mask, tmp1, tmp2;
10259
10260@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10261 : "memory");
10262 }
10263
10264-static void inline arch_write_unlock(arch_rwlock_t *lock)
10265+static inline void arch_write_unlock(arch_rwlock_t *lock)
10266 {
10267 __asm__ __volatile__(
10268 " stw %%g0, [%0]"
10269@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10270 : "memory");
10271 }
10272
10273-static int inline arch_write_trylock(arch_rwlock_t *lock)
10274+static inline int arch_write_trylock(arch_rwlock_t *lock)
10275 {
10276 unsigned long mask, tmp1, tmp2, result;
10277
10278diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10279index 025c984..a216504 100644
10280--- a/arch/sparc/include/asm/thread_info_32.h
10281+++ b/arch/sparc/include/asm/thread_info_32.h
10282@@ -49,6 +49,8 @@ struct thread_info {
10283 unsigned long w_saved;
10284
10285 struct restart_block restart_block;
10286+
10287+ unsigned long lowest_stack;
10288 };
10289
10290 /*
10291diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10292index 798f027..b009941 100644
10293--- a/arch/sparc/include/asm/thread_info_64.h
10294+++ b/arch/sparc/include/asm/thread_info_64.h
10295@@ -63,6 +63,8 @@ struct thread_info {
10296 struct pt_regs *kern_una_regs;
10297 unsigned int kern_una_insn;
10298
10299+ unsigned long lowest_stack;
10300+
10301 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10302 __attribute__ ((aligned(64)));
10303 };
10304@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10305 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10306 /* flag bit 4 is available */
10307 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10308-/* flag bit 6 is available */
10309+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10310 #define TIF_32BIT 7 /* 32-bit binary */
10311 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10312 #define TIF_SECCOMP 9 /* secure computing */
10313 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10314 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10315+
10316 /* NOTE: Thread flags >= 12 should be ones we have no interest
10317 * in using in assembly, else we can't use the mask as
10318 * an immediate value in instructions such as andcc.
10319@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10320 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10321 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10322 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10323+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10324
10325 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10326 _TIF_DO_NOTIFY_RESUME_MASK | \
10327 _TIF_NEED_RESCHED)
10328 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10329
10330+#define _TIF_WORK_SYSCALL \
10331+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10332+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10333+
10334 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10335
10336 /*
10337diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10338index bd56c28..4b63d83 100644
10339--- a/arch/sparc/include/asm/uaccess.h
10340+++ b/arch/sparc/include/asm/uaccess.h
10341@@ -1,5 +1,6 @@
10342 #ifndef ___ASM_SPARC_UACCESS_H
10343 #define ___ASM_SPARC_UACCESS_H
10344+
10345 #if defined(__sparc__) && defined(__arch64__)
10346 #include <asm/uaccess_64.h>
10347 #else
10348diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10349index 9634d08..f55fe4f 100644
10350--- a/arch/sparc/include/asm/uaccess_32.h
10351+++ b/arch/sparc/include/asm/uaccess_32.h
10352@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10353
10354 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10355 {
10356- if (n && __access_ok((unsigned long) to, n))
10357+ if ((long)n < 0)
10358+ return n;
10359+
10360+ if (n && __access_ok((unsigned long) to, n)) {
10361+ if (!__builtin_constant_p(n))
10362+ check_object_size(from, n, true);
10363 return __copy_user(to, (__force void __user *) from, n);
10364- else
10365+ } else
10366 return n;
10367 }
10368
10369 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10370 {
10371+ if ((long)n < 0)
10372+ return n;
10373+
10374+ if (!__builtin_constant_p(n))
10375+ check_object_size(from, n, true);
10376+
10377 return __copy_user(to, (__force void __user *) from, n);
10378 }
10379
10380 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10381 {
10382- if (n && __access_ok((unsigned long) from, n))
10383+ if ((long)n < 0)
10384+ return n;
10385+
10386+ if (n && __access_ok((unsigned long) from, n)) {
10387+ if (!__builtin_constant_p(n))
10388+ check_object_size(to, n, false);
10389 return __copy_user((__force void __user *) to, from, n);
10390- else
10391+ } else
10392 return n;
10393 }
10394
10395 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10396 {
10397+ if ((long)n < 0)
10398+ return n;
10399+
10400 return __copy_user((__force void __user *) to, from, n);
10401 }
10402
10403diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10404index c990a5e..f17b9c1 100644
10405--- a/arch/sparc/include/asm/uaccess_64.h
10406+++ b/arch/sparc/include/asm/uaccess_64.h
10407@@ -10,6 +10,7 @@
10408 #include <linux/compiler.h>
10409 #include <linux/string.h>
10410 #include <linux/thread_info.h>
10411+#include <linux/kernel.h>
10412 #include <asm/asi.h>
10413 #include <asm/spitfire.h>
10414 #include <asm-generic/uaccess-unaligned.h>
10415@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10416 static inline unsigned long __must_check
10417 copy_from_user(void *to, const void __user *from, unsigned long size)
10418 {
10419- unsigned long ret = ___copy_from_user(to, from, size);
10420+ unsigned long ret;
10421
10422+ if ((long)size < 0 || size > INT_MAX)
10423+ return size;
10424+
10425+ if (!__builtin_constant_p(size))
10426+ check_object_size(to, size, false);
10427+
10428+ ret = ___copy_from_user(to, from, size);
10429 if (unlikely(ret))
10430 ret = copy_from_user_fixup(to, from, size);
10431
10432@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10433 static inline unsigned long __must_check
10434 copy_to_user(void __user *to, const void *from, unsigned long size)
10435 {
10436- unsigned long ret = ___copy_to_user(to, from, size);
10437+ unsigned long ret;
10438
10439+ if ((long)size < 0 || size > INT_MAX)
10440+ return size;
10441+
10442+ if (!__builtin_constant_p(size))
10443+ check_object_size(from, size, true);
10444+
10445+ ret = ___copy_to_user(to, from, size);
10446 if (unlikely(ret))
10447 ret = copy_to_user_fixup(to, from, size);
10448 return ret;
10449diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10450index 7cf9c6e..6206648 100644
10451--- a/arch/sparc/kernel/Makefile
10452+++ b/arch/sparc/kernel/Makefile
10453@@ -4,7 +4,7 @@
10454 #
10455
10456 asflags-y := -ansi
10457-ccflags-y := -Werror
10458+#ccflags-y := -Werror
10459
10460 extra-y := head_$(BITS).o
10461
10462diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10463index 50e7b62..79fae35 100644
10464--- a/arch/sparc/kernel/process_32.c
10465+++ b/arch/sparc/kernel/process_32.c
10466@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10467
10468 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10469 r->psr, r->pc, r->npc, r->y, print_tainted());
10470- printk("PC: <%pS>\n", (void *) r->pc);
10471+ printk("PC: <%pA>\n", (void *) r->pc);
10472 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10473 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10474 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10475 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10476 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10477 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10478- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10479+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10480
10481 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10482 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10483@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10484 rw = (struct reg_window32 *) fp;
10485 pc = rw->ins[7];
10486 printk("[%08lx : ", pc);
10487- printk("%pS ] ", (void *) pc);
10488+ printk("%pA ] ", (void *) pc);
10489 fp = rw->ins[6];
10490 } while (++count < 16);
10491 printk("\n");
10492diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10493index 0be7bf9..2b1cba8 100644
10494--- a/arch/sparc/kernel/process_64.c
10495+++ b/arch/sparc/kernel/process_64.c
10496@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10497 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10498 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10499 if (regs->tstate & TSTATE_PRIV)
10500- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10501+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10502 }
10503
10504 void show_regs(struct pt_regs *regs)
10505@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10506
10507 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10508 regs->tpc, regs->tnpc, regs->y, print_tainted());
10509- printk("TPC: <%pS>\n", (void *) regs->tpc);
10510+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10511 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10512 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10513 regs->u_regs[3]);
10514@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10515 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10516 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10517 regs->u_regs[15]);
10518- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10519+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10520 show_regwindow(regs);
10521 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10522 }
10523@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10524 ((tp && tp->task) ? tp->task->pid : -1));
10525
10526 if (gp->tstate & TSTATE_PRIV) {
10527- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10528+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10529 (void *) gp->tpc,
10530 (void *) gp->o7,
10531 (void *) gp->i7,
10532diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10533index 79cc0d1..ec62734 100644
10534--- a/arch/sparc/kernel/prom_common.c
10535+++ b/arch/sparc/kernel/prom_common.c
10536@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10537
10538 unsigned int prom_early_allocated __initdata;
10539
10540-static struct of_pdt_ops prom_sparc_ops __initdata = {
10541+static struct of_pdt_ops prom_sparc_ops __initconst = {
10542 .nextprop = prom_common_nextprop,
10543 .getproplen = prom_getproplen,
10544 .getproperty = prom_getproperty,
10545diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10546index 9ddc492..27a5619 100644
10547--- a/arch/sparc/kernel/ptrace_64.c
10548+++ b/arch/sparc/kernel/ptrace_64.c
10549@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10550 return ret;
10551 }
10552
10553+#ifdef CONFIG_GRKERNSEC_SETXID
10554+extern void gr_delayed_cred_worker(void);
10555+#endif
10556+
10557 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10558 {
10559 int ret = 0;
10560@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10561 if (test_thread_flag(TIF_NOHZ))
10562 user_exit();
10563
10564+#ifdef CONFIG_GRKERNSEC_SETXID
10565+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10566+ gr_delayed_cred_worker();
10567+#endif
10568+
10569 if (test_thread_flag(TIF_SYSCALL_TRACE))
10570 ret = tracehook_report_syscall_entry(regs);
10571
10572@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10573 if (test_thread_flag(TIF_NOHZ))
10574 user_exit();
10575
10576+#ifdef CONFIG_GRKERNSEC_SETXID
10577+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10578+ gr_delayed_cred_worker();
10579+#endif
10580+
10581 audit_syscall_exit(regs);
10582
10583 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10584diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10585index da6f1a7..e5dea8f 100644
10586--- a/arch/sparc/kernel/smp_64.c
10587+++ b/arch/sparc/kernel/smp_64.c
10588@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10589 return;
10590
10591 #ifdef CONFIG_DEBUG_DCFLUSH
10592- atomic_inc(&dcpage_flushes);
10593+ atomic_inc_unchecked(&dcpage_flushes);
10594 #endif
10595
10596 this_cpu = get_cpu();
10597@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10598 xcall_deliver(data0, __pa(pg_addr),
10599 (u64) pg_addr, cpumask_of(cpu));
10600 #ifdef CONFIG_DEBUG_DCFLUSH
10601- atomic_inc(&dcpage_flushes_xcall);
10602+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10603 #endif
10604 }
10605 }
10606@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10607 preempt_disable();
10608
10609 #ifdef CONFIG_DEBUG_DCFLUSH
10610- atomic_inc(&dcpage_flushes);
10611+ atomic_inc_unchecked(&dcpage_flushes);
10612 #endif
10613 data0 = 0;
10614 pg_addr = page_address(page);
10615@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10616 xcall_deliver(data0, __pa(pg_addr),
10617 (u64) pg_addr, cpu_online_mask);
10618 #ifdef CONFIG_DEBUG_DCFLUSH
10619- atomic_inc(&dcpage_flushes_xcall);
10620+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10621 #endif
10622 }
10623 __local_flush_dcache_page(page);
10624diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10625index 646988d..b88905f 100644
10626--- a/arch/sparc/kernel/sys_sparc_32.c
10627+++ b/arch/sparc/kernel/sys_sparc_32.c
10628@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10629 if (len > TASK_SIZE - PAGE_SIZE)
10630 return -ENOMEM;
10631 if (!addr)
10632- addr = TASK_UNMAPPED_BASE;
10633+ addr = current->mm->mmap_base;
10634
10635 info.flags = 0;
10636 info.length = len;
10637diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10638index c85403d..6af95c9 100644
10639--- a/arch/sparc/kernel/sys_sparc_64.c
10640+++ b/arch/sparc/kernel/sys_sparc_64.c
10641@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10642 struct vm_area_struct * vma;
10643 unsigned long task_size = TASK_SIZE;
10644 int do_color_align;
10645+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10646 struct vm_unmapped_area_info info;
10647
10648 if (flags & MAP_FIXED) {
10649 /* We do not accept a shared mapping if it would violate
10650 * cache aliasing constraints.
10651 */
10652- if ((flags & MAP_SHARED) &&
10653+ if ((filp || (flags & MAP_SHARED)) &&
10654 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10655 return -EINVAL;
10656 return addr;
10657@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10658 if (filp || (flags & MAP_SHARED))
10659 do_color_align = 1;
10660
10661+#ifdef CONFIG_PAX_RANDMMAP
10662+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10663+#endif
10664+
10665 if (addr) {
10666 if (do_color_align)
10667 addr = COLOR_ALIGN(addr, pgoff);
10668@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10669 addr = PAGE_ALIGN(addr);
10670
10671 vma = find_vma(mm, addr);
10672- if (task_size - len >= addr &&
10673- (!vma || addr + len <= vma->vm_start))
10674+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10675 return addr;
10676 }
10677
10678 info.flags = 0;
10679 info.length = len;
10680- info.low_limit = TASK_UNMAPPED_BASE;
10681+ info.low_limit = mm->mmap_base;
10682 info.high_limit = min(task_size, VA_EXCLUDE_START);
10683 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10684 info.align_offset = pgoff << PAGE_SHIFT;
10685+ info.threadstack_offset = offset;
10686 addr = vm_unmapped_area(&info);
10687
10688 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10689 VM_BUG_ON(addr != -ENOMEM);
10690 info.low_limit = VA_EXCLUDE_END;
10691+
10692+#ifdef CONFIG_PAX_RANDMMAP
10693+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10694+ info.low_limit += mm->delta_mmap;
10695+#endif
10696+
10697 info.high_limit = task_size;
10698 addr = vm_unmapped_area(&info);
10699 }
10700@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10701 unsigned long task_size = STACK_TOP32;
10702 unsigned long addr = addr0;
10703 int do_color_align;
10704+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10705 struct vm_unmapped_area_info info;
10706
10707 /* This should only ever run for 32-bit processes. */
10708@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10709 /* We do not accept a shared mapping if it would violate
10710 * cache aliasing constraints.
10711 */
10712- if ((flags & MAP_SHARED) &&
10713+ if ((filp || (flags & MAP_SHARED)) &&
10714 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10715 return -EINVAL;
10716 return addr;
10717@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10718 if (filp || (flags & MAP_SHARED))
10719 do_color_align = 1;
10720
10721+#ifdef CONFIG_PAX_RANDMMAP
10722+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10723+#endif
10724+
10725 /* requesting a specific address */
10726 if (addr) {
10727 if (do_color_align)
10728@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10729 addr = PAGE_ALIGN(addr);
10730
10731 vma = find_vma(mm, addr);
10732- if (task_size - len >= addr &&
10733- (!vma || addr + len <= vma->vm_start))
10734+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10735 return addr;
10736 }
10737
10738@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10739 info.high_limit = mm->mmap_base;
10740 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10741 info.align_offset = pgoff << PAGE_SHIFT;
10742+ info.threadstack_offset = offset;
10743 addr = vm_unmapped_area(&info);
10744
10745 /*
10746@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10747 VM_BUG_ON(addr != -ENOMEM);
10748 info.flags = 0;
10749 info.low_limit = TASK_UNMAPPED_BASE;
10750+
10751+#ifdef CONFIG_PAX_RANDMMAP
10752+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10753+ info.low_limit += mm->delta_mmap;
10754+#endif
10755+
10756 info.high_limit = STACK_TOP32;
10757 addr = vm_unmapped_area(&info);
10758 }
10759@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10760 EXPORT_SYMBOL(get_fb_unmapped_area);
10761
10762 /* Essentially the same as PowerPC. */
10763-static unsigned long mmap_rnd(void)
10764+static unsigned long mmap_rnd(struct mm_struct *mm)
10765 {
10766 unsigned long rnd = 0UL;
10767
10768+#ifdef CONFIG_PAX_RANDMMAP
10769+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10770+#endif
10771+
10772 if (current->flags & PF_RANDOMIZE) {
10773 unsigned long val = get_random_int();
10774 if (test_thread_flag(TIF_32BIT))
10775@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10776
10777 void arch_pick_mmap_layout(struct mm_struct *mm)
10778 {
10779- unsigned long random_factor = mmap_rnd();
10780+ unsigned long random_factor = mmap_rnd(mm);
10781 unsigned long gap;
10782
10783 /*
10784@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10785 gap == RLIM_INFINITY ||
10786 sysctl_legacy_va_layout) {
10787 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10788+
10789+#ifdef CONFIG_PAX_RANDMMAP
10790+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10791+ mm->mmap_base += mm->delta_mmap;
10792+#endif
10793+
10794 mm->get_unmapped_area = arch_get_unmapped_area;
10795 } else {
10796 /* We know it's 32-bit */
10797@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10798 gap = (task_size / 6 * 5);
10799
10800 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10801+
10802+#ifdef CONFIG_PAX_RANDMMAP
10803+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10804+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10805+#endif
10806+
10807 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10808 }
10809 }
10810diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10811index bb00089..e0ea580 100644
10812--- a/arch/sparc/kernel/syscalls.S
10813+++ b/arch/sparc/kernel/syscalls.S
10814@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10815 #endif
10816 .align 32
10817 1: ldx [%g6 + TI_FLAGS], %l5
10818- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10819+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10820 be,pt %icc, rtrap
10821 nop
10822 call syscall_trace_leave
10823@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10824
10825 srl %i3, 0, %o3 ! IEU0
10826 srl %i2, 0, %o2 ! IEU0 Group
10827- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10828+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10829 bne,pn %icc, linux_syscall_trace32 ! CTI
10830 mov %i0, %l5 ! IEU1
10831 5: call %l7 ! CTI Group brk forced
10832@@ -218,7 +218,7 @@ linux_sparc_syscall:
10833
10834 mov %i3, %o3 ! IEU1
10835 mov %i4, %o4 ! IEU0 Group
10836- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10837+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10838 bne,pn %icc, linux_syscall_trace ! CTI Group
10839 mov %i0, %l5 ! IEU0
10840 2: call %l7 ! CTI Group brk forced
10841@@ -233,7 +233,7 @@ ret_sys_call:
10842
10843 cmp %o0, -ERESTART_RESTARTBLOCK
10844 bgeu,pn %xcc, 1f
10845- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10846+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10847 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10848
10849 2:
10850diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10851index 6fd386c5..6907d81 100644
10852--- a/arch/sparc/kernel/traps_32.c
10853+++ b/arch/sparc/kernel/traps_32.c
10854@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10855 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10856 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10857
10858+extern void gr_handle_kernel_exploit(void);
10859+
10860 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10861 {
10862 static int die_counter;
10863@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10864 count++ < 30 &&
10865 (((unsigned long) rw) >= PAGE_OFFSET) &&
10866 !(((unsigned long) rw) & 0x7)) {
10867- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10868+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10869 (void *) rw->ins[7]);
10870 rw = (struct reg_window32 *)rw->ins[6];
10871 }
10872 }
10873 printk("Instruction DUMP:");
10874 instruction_dump ((unsigned long *) regs->pc);
10875- if(regs->psr & PSR_PS)
10876+ if(regs->psr & PSR_PS) {
10877+ gr_handle_kernel_exploit();
10878 do_exit(SIGKILL);
10879+ }
10880 do_exit(SIGSEGV);
10881 }
10882
10883diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10884index 981a769..d906eda 100644
10885--- a/arch/sparc/kernel/traps_64.c
10886+++ b/arch/sparc/kernel/traps_64.c
10887@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10888 i + 1,
10889 p->trapstack[i].tstate, p->trapstack[i].tpc,
10890 p->trapstack[i].tnpc, p->trapstack[i].tt);
10891- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10892+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10893 }
10894 }
10895
10896@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10897
10898 lvl -= 0x100;
10899 if (regs->tstate & TSTATE_PRIV) {
10900+
10901+#ifdef CONFIG_PAX_REFCOUNT
10902+ if (lvl == 6)
10903+ pax_report_refcount_overflow(regs);
10904+#endif
10905+
10906 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10907 die_if_kernel(buffer, regs);
10908 }
10909@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10910 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10911 {
10912 char buffer[32];
10913-
10914+
10915 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10916 0, lvl, SIGTRAP) == NOTIFY_STOP)
10917 return;
10918
10919+#ifdef CONFIG_PAX_REFCOUNT
10920+ if (lvl == 6)
10921+ pax_report_refcount_overflow(regs);
10922+#endif
10923+
10924 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10925
10926 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10927@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10928 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10929 printk("%s" "ERROR(%d): ",
10930 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10931- printk("TPC<%pS>\n", (void *) regs->tpc);
10932+ printk("TPC<%pA>\n", (void *) regs->tpc);
10933 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10934 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10935 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10936@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10937 smp_processor_id(),
10938 (type & 0x1) ? 'I' : 'D',
10939 regs->tpc);
10940- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
10941+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
10942 panic("Irrecoverable Cheetah+ parity error.");
10943 }
10944
10945@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10946 smp_processor_id(),
10947 (type & 0x1) ? 'I' : 'D',
10948 regs->tpc);
10949- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
10950+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
10951 }
10952
10953 struct sun4v_error_entry {
10954@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
10955 /*0x38*/u64 reserved_5;
10956 };
10957
10958-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10959-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10960+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10961+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10962
10963 static const char *sun4v_err_type_to_str(u8 type)
10964 {
10965@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
10966 }
10967
10968 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10969- int cpu, const char *pfx, atomic_t *ocnt)
10970+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
10971 {
10972 u64 *raw_ptr = (u64 *) ent;
10973 u32 attrs;
10974@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10975
10976 show_regs(regs);
10977
10978- if ((cnt = atomic_read(ocnt)) != 0) {
10979- atomic_set(ocnt, 0);
10980+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
10981+ atomic_set_unchecked(ocnt, 0);
10982 wmb();
10983 printk("%s: Queue overflowed %d times.\n",
10984 pfx, cnt);
10985@@ -2048,7 +2059,7 @@ out:
10986 */
10987 void sun4v_resum_overflow(struct pt_regs *regs)
10988 {
10989- atomic_inc(&sun4v_resum_oflow_cnt);
10990+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
10991 }
10992
10993 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
10994@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
10995 /* XXX Actually even this can make not that much sense. Perhaps
10996 * XXX we should just pull the plug and panic directly from here?
10997 */
10998- atomic_inc(&sun4v_nonresum_oflow_cnt);
10999+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11000 }
11001
11002 static void sun4v_tlb_error(struct pt_regs *regs)
11003@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11004
11005 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11006 regs->tpc, tl);
11007- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11008+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11009 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11010- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11011+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11012 (void *) regs->u_regs[UREG_I7]);
11013 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11014 "pte[%lx] error[%lx]\n",
11015@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11016
11017 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11018 regs->tpc, tl);
11019- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11020+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11021 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11022- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11023+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11024 (void *) regs->u_regs[UREG_I7]);
11025 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11026 "pte[%lx] error[%lx]\n",
11027@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11028 fp = (unsigned long)sf->fp + STACK_BIAS;
11029 }
11030
11031- printk(" [%016lx] %pS\n", pc, (void *) pc);
11032+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11033 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11034 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11035 int index = tsk->curr_ret_stack;
11036 if (tsk->ret_stack && index >= graph) {
11037 pc = tsk->ret_stack[index - graph].ret;
11038- printk(" [%016lx] %pS\n", pc, (void *) pc);
11039+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11040 graph++;
11041 }
11042 }
11043@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11044 return (struct reg_window *) (fp + STACK_BIAS);
11045 }
11046
11047+extern void gr_handle_kernel_exploit(void);
11048+
11049 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11050 {
11051 static int die_counter;
11052@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11053 while (rw &&
11054 count++ < 30 &&
11055 kstack_valid(tp, (unsigned long) rw)) {
11056- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11057+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11058 (void *) rw->ins[7]);
11059
11060 rw = kernel_stack_up(rw);
11061@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11062 }
11063 user_instruction_dump ((unsigned int __user *) regs->tpc);
11064 }
11065- if (regs->tstate & TSTATE_PRIV)
11066+ if (regs->tstate & TSTATE_PRIV) {
11067+ gr_handle_kernel_exploit();
11068 do_exit(SIGKILL);
11069+ }
11070 do_exit(SIGSEGV);
11071 }
11072 EXPORT_SYMBOL(die_if_kernel);
11073diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11074index 62098a8..547ab2c 100644
11075--- a/arch/sparc/kernel/unaligned_64.c
11076+++ b/arch/sparc/kernel/unaligned_64.c
11077@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11078 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11079
11080 if (__ratelimit(&ratelimit)) {
11081- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11082+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11083 regs->tpc, (void *) regs->tpc);
11084 }
11085 }
11086diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11087index 3269b02..64f5231 100644
11088--- a/arch/sparc/lib/Makefile
11089+++ b/arch/sparc/lib/Makefile
11090@@ -2,7 +2,7 @@
11091 #
11092
11093 asflags-y := -ansi -DST_DIV0=0x02
11094-ccflags-y := -Werror
11095+#ccflags-y := -Werror
11096
11097 lib-$(CONFIG_SPARC32) += ashrdi3.o
11098 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11099diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11100index 05dac43..76f8ed4 100644
11101--- a/arch/sparc/lib/atomic_64.S
11102+++ b/arch/sparc/lib/atomic_64.S
11103@@ -15,11 +15,22 @@
11104 * a value and does the barriers.
11105 */
11106
11107-#define ATOMIC_OP(op) \
11108-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11109+#ifdef CONFIG_PAX_REFCOUNT
11110+#define __REFCOUNT_OP(op) op##cc
11111+#define __OVERFLOW_IOP tvs %icc, 6;
11112+#define __OVERFLOW_XOP tvs %xcc, 6;
11113+#else
11114+#define __REFCOUNT_OP(op) op
11115+#define __OVERFLOW_IOP
11116+#define __OVERFLOW_XOP
11117+#endif
11118+
11119+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11120+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11121 BACKOFF_SETUP(%o2); \
11122 1: lduw [%o1], %g1; \
11123- op %g1, %o0, %g7; \
11124+ asm_op %g1, %o0, %g7; \
11125+ post_op \
11126 cas [%o1], %g1, %g7; \
11127 cmp %g1, %g7; \
11128 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11129@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11130 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11131 ENDPROC(atomic_##op); \
11132
11133-#define ATOMIC_OP_RETURN(op) \
11134-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11135+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11136+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11137+
11138+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11139+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11140 BACKOFF_SETUP(%o2); \
11141 1: lduw [%o1], %g1; \
11142- op %g1, %o0, %g7; \
11143+ asm_op %g1, %o0, %g7; \
11144+ post_op \
11145 cas [%o1], %g1, %g7; \
11146 cmp %g1, %g7; \
11147 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11148@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11149 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11150 ENDPROC(atomic_##op##_return);
11151
11152+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11153+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11154+
11155 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11156
11157 ATOMIC_OPS(add)
11158@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11159
11160 #undef ATOMIC_OPS
11161 #undef ATOMIC_OP_RETURN
11162+#undef __ATOMIC_OP_RETURN
11163 #undef ATOMIC_OP
11164+#undef __ATOMIC_OP
11165
11166-#define ATOMIC64_OP(op) \
11167-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11168+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11169+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11170 BACKOFF_SETUP(%o2); \
11171 1: ldx [%o1], %g1; \
11172- op %g1, %o0, %g7; \
11173+ asm_op %g1, %o0, %g7; \
11174+ post_op \
11175 casx [%o1], %g1, %g7; \
11176 cmp %g1, %g7; \
11177 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11178@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11179 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11180 ENDPROC(atomic64_##op); \
11181
11182-#define ATOMIC64_OP_RETURN(op) \
11183-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11184+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11185+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11186+
11187+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11188+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11189 BACKOFF_SETUP(%o2); \
11190 1: ldx [%o1], %g1; \
11191- op %g1, %o0, %g7; \
11192+ asm_op %g1, %o0, %g7; \
11193+ post_op \
11194 casx [%o1], %g1, %g7; \
11195 cmp %g1, %g7; \
11196 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11197@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11198 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11199 ENDPROC(atomic64_##op##_return);
11200
11201+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11202+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11203+
11204 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11205
11206 ATOMIC64_OPS(add)
11207@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11208
11209 #undef ATOMIC64_OPS
11210 #undef ATOMIC64_OP_RETURN
11211+#undef __ATOMIC64_OP_RETURN
11212 #undef ATOMIC64_OP
11213+#undef __ATOMIC64_OP
11214+#undef __OVERFLOW_XOP
11215+#undef __OVERFLOW_IOP
11216+#undef __REFCOUNT_OP
11217
11218 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11219 BACKOFF_SETUP(%o2)
11220diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11221index 1d649a9..fbc5bfc 100644
11222--- a/arch/sparc/lib/ksyms.c
11223+++ b/arch/sparc/lib/ksyms.c
11224@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11225 /* Atomic counter implementation. */
11226 #define ATOMIC_OP(op) \
11227 EXPORT_SYMBOL(atomic_##op); \
11228-EXPORT_SYMBOL(atomic64_##op);
11229+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11230+EXPORT_SYMBOL(atomic64_##op); \
11231+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11232
11233 #define ATOMIC_OP_RETURN(op) \
11234 EXPORT_SYMBOL(atomic_##op##_return); \
11235@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11236 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11237
11238 ATOMIC_OPS(add)
11239+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11240+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11241 ATOMIC_OPS(sub)
11242
11243 #undef ATOMIC_OPS
11244diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11245index 30c3ecc..736f015 100644
11246--- a/arch/sparc/mm/Makefile
11247+++ b/arch/sparc/mm/Makefile
11248@@ -2,7 +2,7 @@
11249 #
11250
11251 asflags-y := -ansi
11252-ccflags-y := -Werror
11253+#ccflags-y := -Werror
11254
11255 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11256 obj-y += fault_$(BITS).o
11257diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11258index 70d8171..274c6c0 100644
11259--- a/arch/sparc/mm/fault_32.c
11260+++ b/arch/sparc/mm/fault_32.c
11261@@ -21,6 +21,9 @@
11262 #include <linux/perf_event.h>
11263 #include <linux/interrupt.h>
11264 #include <linux/kdebug.h>
11265+#include <linux/slab.h>
11266+#include <linux/pagemap.h>
11267+#include <linux/compiler.h>
11268
11269 #include <asm/page.h>
11270 #include <asm/pgtable.h>
11271@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11272 return safe_compute_effective_address(regs, insn);
11273 }
11274
11275+#ifdef CONFIG_PAX_PAGEEXEC
11276+#ifdef CONFIG_PAX_DLRESOLVE
11277+static void pax_emuplt_close(struct vm_area_struct *vma)
11278+{
11279+ vma->vm_mm->call_dl_resolve = 0UL;
11280+}
11281+
11282+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11283+{
11284+ unsigned int *kaddr;
11285+
11286+ vmf->page = alloc_page(GFP_HIGHUSER);
11287+ if (!vmf->page)
11288+ return VM_FAULT_OOM;
11289+
11290+ kaddr = kmap(vmf->page);
11291+ memset(kaddr, 0, PAGE_SIZE);
11292+ kaddr[0] = 0x9DE3BFA8U; /* save */
11293+ flush_dcache_page(vmf->page);
11294+ kunmap(vmf->page);
11295+ return VM_FAULT_MAJOR;
11296+}
11297+
11298+static const struct vm_operations_struct pax_vm_ops = {
11299+ .close = pax_emuplt_close,
11300+ .fault = pax_emuplt_fault
11301+};
11302+
11303+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11304+{
11305+ int ret;
11306+
11307+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11308+ vma->vm_mm = current->mm;
11309+ vma->vm_start = addr;
11310+ vma->vm_end = addr + PAGE_SIZE;
11311+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11312+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11313+ vma->vm_ops = &pax_vm_ops;
11314+
11315+ ret = insert_vm_struct(current->mm, vma);
11316+ if (ret)
11317+ return ret;
11318+
11319+ ++current->mm->total_vm;
11320+ return 0;
11321+}
11322+#endif
11323+
11324+/*
11325+ * PaX: decide what to do with offenders (regs->pc = fault address)
11326+ *
11327+ * returns 1 when task should be killed
11328+ * 2 when patched PLT trampoline was detected
11329+ * 3 when unpatched PLT trampoline was detected
11330+ */
11331+static int pax_handle_fetch_fault(struct pt_regs *regs)
11332+{
11333+
11334+#ifdef CONFIG_PAX_EMUPLT
11335+ int err;
11336+
11337+ do { /* PaX: patched PLT emulation #1 */
11338+ unsigned int sethi1, sethi2, jmpl;
11339+
11340+ err = get_user(sethi1, (unsigned int *)regs->pc);
11341+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11342+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11343+
11344+ if (err)
11345+ break;
11346+
11347+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11348+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11349+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11350+ {
11351+ unsigned int addr;
11352+
11353+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11354+ addr = regs->u_regs[UREG_G1];
11355+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11356+ regs->pc = addr;
11357+ regs->npc = addr+4;
11358+ return 2;
11359+ }
11360+ } while (0);
11361+
11362+ do { /* PaX: patched PLT emulation #2 */
11363+ unsigned int ba;
11364+
11365+ err = get_user(ba, (unsigned int *)regs->pc);
11366+
11367+ if (err)
11368+ break;
11369+
11370+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11371+ unsigned int addr;
11372+
11373+ if ((ba & 0xFFC00000U) == 0x30800000U)
11374+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11375+ else
11376+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11377+ regs->pc = addr;
11378+ regs->npc = addr+4;
11379+ return 2;
11380+ }
11381+ } while (0);
11382+
11383+ do { /* PaX: patched PLT emulation #3 */
11384+ unsigned int sethi, bajmpl, nop;
11385+
11386+ err = get_user(sethi, (unsigned int *)regs->pc);
11387+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11388+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11389+
11390+ if (err)
11391+ break;
11392+
11393+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11394+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11395+ nop == 0x01000000U)
11396+ {
11397+ unsigned int addr;
11398+
11399+ addr = (sethi & 0x003FFFFFU) << 10;
11400+ regs->u_regs[UREG_G1] = addr;
11401+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11402+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11403+ else
11404+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11405+ regs->pc = addr;
11406+ regs->npc = addr+4;
11407+ return 2;
11408+ }
11409+ } while (0);
11410+
11411+ do { /* PaX: unpatched PLT emulation step 1 */
11412+ unsigned int sethi, ba, nop;
11413+
11414+ err = get_user(sethi, (unsigned int *)regs->pc);
11415+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11416+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11417+
11418+ if (err)
11419+ break;
11420+
11421+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11422+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11423+ nop == 0x01000000U)
11424+ {
11425+ unsigned int addr, save, call;
11426+
11427+ if ((ba & 0xFFC00000U) == 0x30800000U)
11428+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11429+ else
11430+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11431+
11432+ err = get_user(save, (unsigned int *)addr);
11433+ err |= get_user(call, (unsigned int *)(addr+4));
11434+ err |= get_user(nop, (unsigned int *)(addr+8));
11435+ if (err)
11436+ break;
11437+
11438+#ifdef CONFIG_PAX_DLRESOLVE
11439+ if (save == 0x9DE3BFA8U &&
11440+ (call & 0xC0000000U) == 0x40000000U &&
11441+ nop == 0x01000000U)
11442+ {
11443+ struct vm_area_struct *vma;
11444+ unsigned long call_dl_resolve;
11445+
11446+ down_read(&current->mm->mmap_sem);
11447+ call_dl_resolve = current->mm->call_dl_resolve;
11448+ up_read(&current->mm->mmap_sem);
11449+ if (likely(call_dl_resolve))
11450+ goto emulate;
11451+
11452+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11453+
11454+ down_write(&current->mm->mmap_sem);
11455+ if (current->mm->call_dl_resolve) {
11456+ call_dl_resolve = current->mm->call_dl_resolve;
11457+ up_write(&current->mm->mmap_sem);
11458+ if (vma)
11459+ kmem_cache_free(vm_area_cachep, vma);
11460+ goto emulate;
11461+ }
11462+
11463+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11464+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11465+ up_write(&current->mm->mmap_sem);
11466+ if (vma)
11467+ kmem_cache_free(vm_area_cachep, vma);
11468+ return 1;
11469+ }
11470+
11471+ if (pax_insert_vma(vma, call_dl_resolve)) {
11472+ up_write(&current->mm->mmap_sem);
11473+ kmem_cache_free(vm_area_cachep, vma);
11474+ return 1;
11475+ }
11476+
11477+ current->mm->call_dl_resolve = call_dl_resolve;
11478+ up_write(&current->mm->mmap_sem);
11479+
11480+emulate:
11481+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11482+ regs->pc = call_dl_resolve;
11483+ regs->npc = addr+4;
11484+ return 3;
11485+ }
11486+#endif
11487+
11488+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11489+ if ((save & 0xFFC00000U) == 0x05000000U &&
11490+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11491+ nop == 0x01000000U)
11492+ {
11493+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11494+ regs->u_regs[UREG_G2] = addr + 4;
11495+ addr = (save & 0x003FFFFFU) << 10;
11496+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11497+ regs->pc = addr;
11498+ regs->npc = addr+4;
11499+ return 3;
11500+ }
11501+ }
11502+ } while (0);
11503+
11504+ do { /* PaX: unpatched PLT emulation step 2 */
11505+ unsigned int save, call, nop;
11506+
11507+ err = get_user(save, (unsigned int *)(regs->pc-4));
11508+ err |= get_user(call, (unsigned int *)regs->pc);
11509+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11510+ if (err)
11511+ break;
11512+
11513+ if (save == 0x9DE3BFA8U &&
11514+ (call & 0xC0000000U) == 0x40000000U &&
11515+ nop == 0x01000000U)
11516+ {
11517+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11518+
11519+ regs->u_regs[UREG_RETPC] = regs->pc;
11520+ regs->pc = dl_resolve;
11521+ regs->npc = dl_resolve+4;
11522+ return 3;
11523+ }
11524+ } while (0);
11525+#endif
11526+
11527+ return 1;
11528+}
11529+
11530+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11531+{
11532+ unsigned long i;
11533+
11534+ printk(KERN_ERR "PAX: bytes at PC: ");
11535+ for (i = 0; i < 8; i++) {
11536+ unsigned int c;
11537+ if (get_user(c, (unsigned int *)pc+i))
11538+ printk(KERN_CONT "???????? ");
11539+ else
11540+ printk(KERN_CONT "%08x ", c);
11541+ }
11542+ printk("\n");
11543+}
11544+#endif
11545+
11546 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11547 int text_fault)
11548 {
11549@@ -226,6 +500,24 @@ good_area:
11550 if (!(vma->vm_flags & VM_WRITE))
11551 goto bad_area;
11552 } else {
11553+
11554+#ifdef CONFIG_PAX_PAGEEXEC
11555+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11556+ up_read(&mm->mmap_sem);
11557+ switch (pax_handle_fetch_fault(regs)) {
11558+
11559+#ifdef CONFIG_PAX_EMUPLT
11560+ case 2:
11561+ case 3:
11562+ return;
11563+#endif
11564+
11565+ }
11566+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11567+ do_group_exit(SIGKILL);
11568+ }
11569+#endif
11570+
11571 /* Allow reads even for write-only mappings */
11572 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11573 goto bad_area;
11574diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11575index 4798232..f76e3aa 100644
11576--- a/arch/sparc/mm/fault_64.c
11577+++ b/arch/sparc/mm/fault_64.c
11578@@ -22,6 +22,9 @@
11579 #include <linux/kdebug.h>
11580 #include <linux/percpu.h>
11581 #include <linux/context_tracking.h>
11582+#include <linux/slab.h>
11583+#include <linux/pagemap.h>
11584+#include <linux/compiler.h>
11585
11586 #include <asm/page.h>
11587 #include <asm/pgtable.h>
11588@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11589 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11590 regs->tpc);
11591 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11592- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11593+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11594 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11595 dump_stack();
11596 unhandled_fault(regs->tpc, current, regs);
11597@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11598 show_regs(regs);
11599 }
11600
11601+#ifdef CONFIG_PAX_PAGEEXEC
11602+#ifdef CONFIG_PAX_DLRESOLVE
11603+static void pax_emuplt_close(struct vm_area_struct *vma)
11604+{
11605+ vma->vm_mm->call_dl_resolve = 0UL;
11606+}
11607+
11608+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11609+{
11610+ unsigned int *kaddr;
11611+
11612+ vmf->page = alloc_page(GFP_HIGHUSER);
11613+ if (!vmf->page)
11614+ return VM_FAULT_OOM;
11615+
11616+ kaddr = kmap(vmf->page);
11617+ memset(kaddr, 0, PAGE_SIZE);
11618+ kaddr[0] = 0x9DE3BFA8U; /* save */
11619+ flush_dcache_page(vmf->page);
11620+ kunmap(vmf->page);
11621+ return VM_FAULT_MAJOR;
11622+}
11623+
11624+static const struct vm_operations_struct pax_vm_ops = {
11625+ .close = pax_emuplt_close,
11626+ .fault = pax_emuplt_fault
11627+};
11628+
11629+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11630+{
11631+ int ret;
11632+
11633+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11634+ vma->vm_mm = current->mm;
11635+ vma->vm_start = addr;
11636+ vma->vm_end = addr + PAGE_SIZE;
11637+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11638+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11639+ vma->vm_ops = &pax_vm_ops;
11640+
11641+ ret = insert_vm_struct(current->mm, vma);
11642+ if (ret)
11643+ return ret;
11644+
11645+ ++current->mm->total_vm;
11646+ return 0;
11647+}
11648+#endif
11649+
11650+/*
11651+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11652+ *
11653+ * returns 1 when task should be killed
11654+ * 2 when patched PLT trampoline was detected
11655+ * 3 when unpatched PLT trampoline was detected
11656+ */
11657+static int pax_handle_fetch_fault(struct pt_regs *regs)
11658+{
11659+
11660+#ifdef CONFIG_PAX_EMUPLT
11661+ int err;
11662+
11663+ do { /* PaX: patched PLT emulation #1 */
11664+ unsigned int sethi1, sethi2, jmpl;
11665+
11666+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11667+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11668+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11669+
11670+ if (err)
11671+ break;
11672+
11673+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11674+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11675+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11676+ {
11677+ unsigned long addr;
11678+
11679+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11680+ addr = regs->u_regs[UREG_G1];
11681+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11682+
11683+ if (test_thread_flag(TIF_32BIT))
11684+ addr &= 0xFFFFFFFFUL;
11685+
11686+ regs->tpc = addr;
11687+ regs->tnpc = addr+4;
11688+ return 2;
11689+ }
11690+ } while (0);
11691+
11692+ do { /* PaX: patched PLT emulation #2 */
11693+ unsigned int ba;
11694+
11695+ err = get_user(ba, (unsigned int *)regs->tpc);
11696+
11697+ if (err)
11698+ break;
11699+
11700+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11701+ unsigned long addr;
11702+
11703+ if ((ba & 0xFFC00000U) == 0x30800000U)
11704+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11705+ else
11706+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11707+
11708+ if (test_thread_flag(TIF_32BIT))
11709+ addr &= 0xFFFFFFFFUL;
11710+
11711+ regs->tpc = addr;
11712+ regs->tnpc = addr+4;
11713+ return 2;
11714+ }
11715+ } while (0);
11716+
11717+ do { /* PaX: patched PLT emulation #3 */
11718+ unsigned int sethi, bajmpl, nop;
11719+
11720+ err = get_user(sethi, (unsigned int *)regs->tpc);
11721+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11722+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11723+
11724+ if (err)
11725+ break;
11726+
11727+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11728+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11729+ nop == 0x01000000U)
11730+ {
11731+ unsigned long addr;
11732+
11733+ addr = (sethi & 0x003FFFFFU) << 10;
11734+ regs->u_regs[UREG_G1] = addr;
11735+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11736+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11737+ else
11738+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11739+
11740+ if (test_thread_flag(TIF_32BIT))
11741+ addr &= 0xFFFFFFFFUL;
11742+
11743+ regs->tpc = addr;
11744+ regs->tnpc = addr+4;
11745+ return 2;
11746+ }
11747+ } while (0);
11748+
11749+ do { /* PaX: patched PLT emulation #4 */
11750+ unsigned int sethi, mov1, call, mov2;
11751+
11752+ err = get_user(sethi, (unsigned int *)regs->tpc);
11753+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11754+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11755+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11756+
11757+ if (err)
11758+ break;
11759+
11760+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11761+ mov1 == 0x8210000FU &&
11762+ (call & 0xC0000000U) == 0x40000000U &&
11763+ mov2 == 0x9E100001U)
11764+ {
11765+ unsigned long addr;
11766+
11767+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11768+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11769+
11770+ if (test_thread_flag(TIF_32BIT))
11771+ addr &= 0xFFFFFFFFUL;
11772+
11773+ regs->tpc = addr;
11774+ regs->tnpc = addr+4;
11775+ return 2;
11776+ }
11777+ } while (0);
11778+
11779+ do { /* PaX: patched PLT emulation #5 */
11780+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11781+
11782+ err = get_user(sethi, (unsigned int *)regs->tpc);
11783+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11784+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11785+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11786+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11787+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11788+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11789+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11790+
11791+ if (err)
11792+ break;
11793+
11794+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11795+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11796+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11797+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11798+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11799+ sllx == 0x83287020U &&
11800+ jmpl == 0x81C04005U &&
11801+ nop == 0x01000000U)
11802+ {
11803+ unsigned long addr;
11804+
11805+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11806+ regs->u_regs[UREG_G1] <<= 32;
11807+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11808+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11809+ regs->tpc = addr;
11810+ regs->tnpc = addr+4;
11811+ return 2;
11812+ }
11813+ } while (0);
11814+
11815+ do { /* PaX: patched PLT emulation #6 */
11816+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11817+
11818+ err = get_user(sethi, (unsigned int *)regs->tpc);
11819+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11820+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11821+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11822+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11823+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11824+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11825+
11826+ if (err)
11827+ break;
11828+
11829+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11830+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11831+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11832+ sllx == 0x83287020U &&
11833+ (or & 0xFFFFE000U) == 0x8A116000U &&
11834+ jmpl == 0x81C04005U &&
11835+ nop == 0x01000000U)
11836+ {
11837+ unsigned long addr;
11838+
11839+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11840+ regs->u_regs[UREG_G1] <<= 32;
11841+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11842+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11843+ regs->tpc = addr;
11844+ regs->tnpc = addr+4;
11845+ return 2;
11846+ }
11847+ } while (0);
11848+
11849+ do { /* PaX: unpatched PLT emulation step 1 */
11850+ unsigned int sethi, ba, nop;
11851+
11852+ err = get_user(sethi, (unsigned int *)regs->tpc);
11853+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11854+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11855+
11856+ if (err)
11857+ break;
11858+
11859+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11860+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11861+ nop == 0x01000000U)
11862+ {
11863+ unsigned long addr;
11864+ unsigned int save, call;
11865+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11866+
11867+ if ((ba & 0xFFC00000U) == 0x30800000U)
11868+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11869+ else
11870+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11871+
11872+ if (test_thread_flag(TIF_32BIT))
11873+ addr &= 0xFFFFFFFFUL;
11874+
11875+ err = get_user(save, (unsigned int *)addr);
11876+ err |= get_user(call, (unsigned int *)(addr+4));
11877+ err |= get_user(nop, (unsigned int *)(addr+8));
11878+ if (err)
11879+ break;
11880+
11881+#ifdef CONFIG_PAX_DLRESOLVE
11882+ if (save == 0x9DE3BFA8U &&
11883+ (call & 0xC0000000U) == 0x40000000U &&
11884+ nop == 0x01000000U)
11885+ {
11886+ struct vm_area_struct *vma;
11887+ unsigned long call_dl_resolve;
11888+
11889+ down_read(&current->mm->mmap_sem);
11890+ call_dl_resolve = current->mm->call_dl_resolve;
11891+ up_read(&current->mm->mmap_sem);
11892+ if (likely(call_dl_resolve))
11893+ goto emulate;
11894+
11895+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11896+
11897+ down_write(&current->mm->mmap_sem);
11898+ if (current->mm->call_dl_resolve) {
11899+ call_dl_resolve = current->mm->call_dl_resolve;
11900+ up_write(&current->mm->mmap_sem);
11901+ if (vma)
11902+ kmem_cache_free(vm_area_cachep, vma);
11903+ goto emulate;
11904+ }
11905+
11906+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11907+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11908+ up_write(&current->mm->mmap_sem);
11909+ if (vma)
11910+ kmem_cache_free(vm_area_cachep, vma);
11911+ return 1;
11912+ }
11913+
11914+ if (pax_insert_vma(vma, call_dl_resolve)) {
11915+ up_write(&current->mm->mmap_sem);
11916+ kmem_cache_free(vm_area_cachep, vma);
11917+ return 1;
11918+ }
11919+
11920+ current->mm->call_dl_resolve = call_dl_resolve;
11921+ up_write(&current->mm->mmap_sem);
11922+
11923+emulate:
11924+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11925+ regs->tpc = call_dl_resolve;
11926+ regs->tnpc = addr+4;
11927+ return 3;
11928+ }
11929+#endif
11930+
11931+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11932+ if ((save & 0xFFC00000U) == 0x05000000U &&
11933+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11934+ nop == 0x01000000U)
11935+ {
11936+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11937+ regs->u_regs[UREG_G2] = addr + 4;
11938+ addr = (save & 0x003FFFFFU) << 10;
11939+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11940+
11941+ if (test_thread_flag(TIF_32BIT))
11942+ addr &= 0xFFFFFFFFUL;
11943+
11944+ regs->tpc = addr;
11945+ regs->tnpc = addr+4;
11946+ return 3;
11947+ }
11948+
11949+ /* PaX: 64-bit PLT stub */
11950+ err = get_user(sethi1, (unsigned int *)addr);
11951+ err |= get_user(sethi2, (unsigned int *)(addr+4));
11952+ err |= get_user(or1, (unsigned int *)(addr+8));
11953+ err |= get_user(or2, (unsigned int *)(addr+12));
11954+ err |= get_user(sllx, (unsigned int *)(addr+16));
11955+ err |= get_user(add, (unsigned int *)(addr+20));
11956+ err |= get_user(jmpl, (unsigned int *)(addr+24));
11957+ err |= get_user(nop, (unsigned int *)(addr+28));
11958+ if (err)
11959+ break;
11960+
11961+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
11962+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11963+ (or1 & 0xFFFFE000U) == 0x88112000U &&
11964+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11965+ sllx == 0x89293020U &&
11966+ add == 0x8A010005U &&
11967+ jmpl == 0x89C14000U &&
11968+ nop == 0x01000000U)
11969+ {
11970+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11971+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11972+ regs->u_regs[UREG_G4] <<= 32;
11973+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11974+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
11975+ regs->u_regs[UREG_G4] = addr + 24;
11976+ addr = regs->u_regs[UREG_G5];
11977+ regs->tpc = addr;
11978+ regs->tnpc = addr+4;
11979+ return 3;
11980+ }
11981+ }
11982+ } while (0);
11983+
11984+#ifdef CONFIG_PAX_DLRESOLVE
11985+ do { /* PaX: unpatched PLT emulation step 2 */
11986+ unsigned int save, call, nop;
11987+
11988+ err = get_user(save, (unsigned int *)(regs->tpc-4));
11989+ err |= get_user(call, (unsigned int *)regs->tpc);
11990+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
11991+ if (err)
11992+ break;
11993+
11994+ if (save == 0x9DE3BFA8U &&
11995+ (call & 0xC0000000U) == 0x40000000U &&
11996+ nop == 0x01000000U)
11997+ {
11998+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11999+
12000+ if (test_thread_flag(TIF_32BIT))
12001+ dl_resolve &= 0xFFFFFFFFUL;
12002+
12003+ regs->u_regs[UREG_RETPC] = regs->tpc;
12004+ regs->tpc = dl_resolve;
12005+ regs->tnpc = dl_resolve+4;
12006+ return 3;
12007+ }
12008+ } while (0);
12009+#endif
12010+
12011+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12012+ unsigned int sethi, ba, nop;
12013+
12014+ err = get_user(sethi, (unsigned int *)regs->tpc);
12015+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12016+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12017+
12018+ if (err)
12019+ break;
12020+
12021+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12022+ (ba & 0xFFF00000U) == 0x30600000U &&
12023+ nop == 0x01000000U)
12024+ {
12025+ unsigned long addr;
12026+
12027+ addr = (sethi & 0x003FFFFFU) << 10;
12028+ regs->u_regs[UREG_G1] = addr;
12029+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12030+
12031+ if (test_thread_flag(TIF_32BIT))
12032+ addr &= 0xFFFFFFFFUL;
12033+
12034+ regs->tpc = addr;
12035+ regs->tnpc = addr+4;
12036+ return 2;
12037+ }
12038+ } while (0);
12039+
12040+#endif
12041+
12042+ return 1;
12043+}
12044+
12045+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12046+{
12047+ unsigned long i;
12048+
12049+ printk(KERN_ERR "PAX: bytes at PC: ");
12050+ for (i = 0; i < 8; i++) {
12051+ unsigned int c;
12052+ if (get_user(c, (unsigned int *)pc+i))
12053+ printk(KERN_CONT "???????? ");
12054+ else
12055+ printk(KERN_CONT "%08x ", c);
12056+ }
12057+ printk("\n");
12058+}
12059+#endif
12060+
12061 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12062 {
12063 enum ctx_state prev_state = exception_enter();
12064@@ -353,6 +816,29 @@ retry:
12065 if (!vma)
12066 goto bad_area;
12067
12068+#ifdef CONFIG_PAX_PAGEEXEC
12069+ /* PaX: detect ITLB misses on non-exec pages */
12070+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12071+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12072+ {
12073+ if (address != regs->tpc)
12074+ goto good_area;
12075+
12076+ up_read(&mm->mmap_sem);
12077+ switch (pax_handle_fetch_fault(regs)) {
12078+
12079+#ifdef CONFIG_PAX_EMUPLT
12080+ case 2:
12081+ case 3:
12082+ return;
12083+#endif
12084+
12085+ }
12086+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12087+ do_group_exit(SIGKILL);
12088+ }
12089+#endif
12090+
12091 /* Pure DTLB misses do not tell us whether the fault causing
12092 * load/store/atomic was a write or not, it only says that there
12093 * was no match. So in such a case we (carefully) read the
12094diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12095index d329537..2c3746a 100644
12096--- a/arch/sparc/mm/hugetlbpage.c
12097+++ b/arch/sparc/mm/hugetlbpage.c
12098@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12099 unsigned long addr,
12100 unsigned long len,
12101 unsigned long pgoff,
12102- unsigned long flags)
12103+ unsigned long flags,
12104+ unsigned long offset)
12105 {
12106+ struct mm_struct *mm = current->mm;
12107 unsigned long task_size = TASK_SIZE;
12108 struct vm_unmapped_area_info info;
12109
12110@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12111
12112 info.flags = 0;
12113 info.length = len;
12114- info.low_limit = TASK_UNMAPPED_BASE;
12115+ info.low_limit = mm->mmap_base;
12116 info.high_limit = min(task_size, VA_EXCLUDE_START);
12117 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12118 info.align_offset = 0;
12119+ info.threadstack_offset = offset;
12120 addr = vm_unmapped_area(&info);
12121
12122 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12123 VM_BUG_ON(addr != -ENOMEM);
12124 info.low_limit = VA_EXCLUDE_END;
12125+
12126+#ifdef CONFIG_PAX_RANDMMAP
12127+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12128+ info.low_limit += mm->delta_mmap;
12129+#endif
12130+
12131 info.high_limit = task_size;
12132 addr = vm_unmapped_area(&info);
12133 }
12134@@ -55,7 +64,8 @@ static unsigned long
12135 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12136 const unsigned long len,
12137 const unsigned long pgoff,
12138- const unsigned long flags)
12139+ const unsigned long flags,
12140+ const unsigned long offset)
12141 {
12142 struct mm_struct *mm = current->mm;
12143 unsigned long addr = addr0;
12144@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12145 info.high_limit = mm->mmap_base;
12146 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12147 info.align_offset = 0;
12148+ info.threadstack_offset = offset;
12149 addr = vm_unmapped_area(&info);
12150
12151 /*
12152@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12153 VM_BUG_ON(addr != -ENOMEM);
12154 info.flags = 0;
12155 info.low_limit = TASK_UNMAPPED_BASE;
12156+
12157+#ifdef CONFIG_PAX_RANDMMAP
12158+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12159+ info.low_limit += mm->delta_mmap;
12160+#endif
12161+
12162 info.high_limit = STACK_TOP32;
12163 addr = vm_unmapped_area(&info);
12164 }
12165@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12166 struct mm_struct *mm = current->mm;
12167 struct vm_area_struct *vma;
12168 unsigned long task_size = TASK_SIZE;
12169+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12170
12171 if (test_thread_flag(TIF_32BIT))
12172 task_size = STACK_TOP32;
12173@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12174 return addr;
12175 }
12176
12177+#ifdef CONFIG_PAX_RANDMMAP
12178+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12179+#endif
12180+
12181 if (addr) {
12182 addr = ALIGN(addr, HPAGE_SIZE);
12183 vma = find_vma(mm, addr);
12184- if (task_size - len >= addr &&
12185- (!vma || addr + len <= vma->vm_start))
12186+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12187 return addr;
12188 }
12189 if (mm->get_unmapped_area == arch_get_unmapped_area)
12190 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12191- pgoff, flags);
12192+ pgoff, flags, offset);
12193 else
12194 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12195- pgoff, flags);
12196+ pgoff, flags, offset);
12197 }
12198
12199 pte_t *huge_pte_alloc(struct mm_struct *mm,
12200diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12201index 3ea267c..93f0659 100644
12202--- a/arch/sparc/mm/init_64.c
12203+++ b/arch/sparc/mm/init_64.c
12204@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12205 int num_kernel_image_mappings;
12206
12207 #ifdef CONFIG_DEBUG_DCFLUSH
12208-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12209+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12210 #ifdef CONFIG_SMP
12211-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12212+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12213 #endif
12214 #endif
12215
12216@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12217 {
12218 BUG_ON(tlb_type == hypervisor);
12219 #ifdef CONFIG_DEBUG_DCFLUSH
12220- atomic_inc(&dcpage_flushes);
12221+ atomic_inc_unchecked(&dcpage_flushes);
12222 #endif
12223
12224 #ifdef DCACHE_ALIASING_POSSIBLE
12225@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12226
12227 #ifdef CONFIG_DEBUG_DCFLUSH
12228 seq_printf(m, "DCPageFlushes\t: %d\n",
12229- atomic_read(&dcpage_flushes));
12230+ atomic_read_unchecked(&dcpage_flushes));
12231 #ifdef CONFIG_SMP
12232 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12233- atomic_read(&dcpage_flushes_xcall));
12234+ atomic_read_unchecked(&dcpage_flushes_xcall));
12235 #endif /* CONFIG_SMP */
12236 #endif /* CONFIG_DEBUG_DCFLUSH */
12237 }
12238diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12239index 7cca418..53fc030 100644
12240--- a/arch/tile/Kconfig
12241+++ b/arch/tile/Kconfig
12242@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12243
12244 config KEXEC
12245 bool "kexec system call"
12246+ depends on !GRKERNSEC_KMEM
12247 ---help---
12248 kexec is a system call that implements the ability to shutdown your
12249 current kernel, and to start another kernel. It is like a reboot
12250diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12251index 7b11c5f..755a026 100644
12252--- a/arch/tile/include/asm/atomic_64.h
12253+++ b/arch/tile/include/asm/atomic_64.h
12254@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12255
12256 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12257
12258+#define atomic64_read_unchecked(v) atomic64_read(v)
12259+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12260+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12261+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12262+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12263+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12264+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12265+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12266+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12267+
12268 /* Define this to indicate that cmpxchg is an efficient operation. */
12269 #define __HAVE_ARCH_CMPXCHG
12270
12271diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12272index 6160761..00cac88 100644
12273--- a/arch/tile/include/asm/cache.h
12274+++ b/arch/tile/include/asm/cache.h
12275@@ -15,11 +15,12 @@
12276 #ifndef _ASM_TILE_CACHE_H
12277 #define _ASM_TILE_CACHE_H
12278
12279+#include <linux/const.h>
12280 #include <arch/chip.h>
12281
12282 /* bytes per L1 data cache line */
12283 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12284-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12285+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12286
12287 /* bytes per L2 cache line */
12288 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12289diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12290index b6cde32..c0cb736 100644
12291--- a/arch/tile/include/asm/uaccess.h
12292+++ b/arch/tile/include/asm/uaccess.h
12293@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12294 const void __user *from,
12295 unsigned long n)
12296 {
12297- int sz = __compiletime_object_size(to);
12298+ size_t sz = __compiletime_object_size(to);
12299
12300- if (likely(sz == -1 || sz >= n))
12301+ if (likely(sz == (size_t)-1 || sz >= n))
12302 n = _copy_from_user(to, from, n);
12303 else
12304 copy_from_user_overflow();
12305diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12306index 3270e00..a77236e 100644
12307--- a/arch/tile/mm/hugetlbpage.c
12308+++ b/arch/tile/mm/hugetlbpage.c
12309@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12310 info.high_limit = TASK_SIZE;
12311 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12312 info.align_offset = 0;
12313+ info.threadstack_offset = 0;
12314 return vm_unmapped_area(&info);
12315 }
12316
12317@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12318 info.high_limit = current->mm->mmap_base;
12319 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12320 info.align_offset = 0;
12321+ info.threadstack_offset = 0;
12322 addr = vm_unmapped_area(&info);
12323
12324 /*
12325diff --git a/arch/um/Makefile b/arch/um/Makefile
12326index e4b1a96..16162f8 100644
12327--- a/arch/um/Makefile
12328+++ b/arch/um/Makefile
12329@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12330 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12331 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12332
12333+ifdef CONSTIFY_PLUGIN
12334+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12335+endif
12336+
12337 #This will adjust *FLAGS accordingly to the platform.
12338 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12339
12340diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12341index 19e1bdd..3665b77 100644
12342--- a/arch/um/include/asm/cache.h
12343+++ b/arch/um/include/asm/cache.h
12344@@ -1,6 +1,7 @@
12345 #ifndef __UM_CACHE_H
12346 #define __UM_CACHE_H
12347
12348+#include <linux/const.h>
12349
12350 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12351 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12352@@ -12,6 +13,6 @@
12353 # define L1_CACHE_SHIFT 5
12354 #endif
12355
12356-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12357+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12358
12359 #endif
12360diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12361index 2e0a6b1..a64d0f5 100644
12362--- a/arch/um/include/asm/kmap_types.h
12363+++ b/arch/um/include/asm/kmap_types.h
12364@@ -8,6 +8,6 @@
12365
12366 /* No more #include "asm/arch/kmap_types.h" ! */
12367
12368-#define KM_TYPE_NR 14
12369+#define KM_TYPE_NR 15
12370
12371 #endif
12372diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12373index 71c5d13..4c7b9f1 100644
12374--- a/arch/um/include/asm/page.h
12375+++ b/arch/um/include/asm/page.h
12376@@ -14,6 +14,9 @@
12377 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12378 #define PAGE_MASK (~(PAGE_SIZE-1))
12379
12380+#define ktla_ktva(addr) (addr)
12381+#define ktva_ktla(addr) (addr)
12382+
12383 #ifndef __ASSEMBLY__
12384
12385 struct page;
12386diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12387index 0032f92..cd151e0 100644
12388--- a/arch/um/include/asm/pgtable-3level.h
12389+++ b/arch/um/include/asm/pgtable-3level.h
12390@@ -58,6 +58,7 @@
12391 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12392 #define pud_populate(mm, pud, pmd) \
12393 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12394+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12395
12396 #ifdef CONFIG_64BIT
12397 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12398diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12399index f17bca8..48adb87 100644
12400--- a/arch/um/kernel/process.c
12401+++ b/arch/um/kernel/process.c
12402@@ -356,22 +356,6 @@ int singlestepping(void * t)
12403 return 2;
12404 }
12405
12406-/*
12407- * Only x86 and x86_64 have an arch_align_stack().
12408- * All other arches have "#define arch_align_stack(x) (x)"
12409- * in their asm/exec.h
12410- * As this is included in UML from asm-um/system-generic.h,
12411- * we can use it to behave as the subarch does.
12412- */
12413-#ifndef arch_align_stack
12414-unsigned long arch_align_stack(unsigned long sp)
12415-{
12416- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12417- sp -= get_random_int() % 8192;
12418- return sp & ~0xf;
12419-}
12420-#endif
12421-
12422 unsigned long get_wchan(struct task_struct *p)
12423 {
12424 unsigned long stack_page, sp, ip;
12425diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12426index ad8f795..2c7eec6 100644
12427--- a/arch/unicore32/include/asm/cache.h
12428+++ b/arch/unicore32/include/asm/cache.h
12429@@ -12,8 +12,10 @@
12430 #ifndef __UNICORE_CACHE_H__
12431 #define __UNICORE_CACHE_H__
12432
12433-#define L1_CACHE_SHIFT (5)
12434-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12435+#include <linux/const.h>
12436+
12437+#define L1_CACHE_SHIFT 5
12438+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12439
12440 /*
12441 * Memory returned by kmalloc() may be used for DMA, so we must make
12442diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12443index 0dc9d01..98df103 100644
12444--- a/arch/x86/Kconfig
12445+++ b/arch/x86/Kconfig
12446@@ -130,7 +130,7 @@ config X86
12447 select RTC_LIB
12448 select HAVE_DEBUG_STACKOVERFLOW
12449 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12450- select HAVE_CC_STACKPROTECTOR
12451+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12452 select GENERIC_CPU_AUTOPROBE
12453 select HAVE_ARCH_AUDITSYSCALL
12454 select ARCH_SUPPORTS_ATOMIC_RMW
12455@@ -263,7 +263,7 @@ config X86_HT
12456
12457 config X86_32_LAZY_GS
12458 def_bool y
12459- depends on X86_32 && !CC_STACKPROTECTOR
12460+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12461
12462 config ARCH_HWEIGHT_CFLAGS
12463 string
12464@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
12465
12466 menuconfig HYPERVISOR_GUEST
12467 bool "Linux guest support"
12468+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12469 ---help---
12470 Say Y here to enable options for running Linux under various hyper-
12471 visors. This option enables basic hypervisor detection and platform
12472@@ -978,6 +979,7 @@ config VM86
12473
12474 config X86_16BIT
12475 bool "Enable support for 16-bit segments" if EXPERT
12476+ depends on !GRKERNSEC
12477 default y
12478 ---help---
12479 This option is required by programs like Wine to run 16-bit
12480@@ -1151,6 +1153,7 @@ choice
12481
12482 config NOHIGHMEM
12483 bool "off"
12484+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12485 ---help---
12486 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12487 However, the address space of 32-bit x86 processors is only 4
12488@@ -1187,6 +1190,7 @@ config NOHIGHMEM
12489
12490 config HIGHMEM4G
12491 bool "4GB"
12492+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12493 ---help---
12494 Select this if you have a 32-bit processor and between 1 and 4
12495 gigabytes of physical RAM.
12496@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
12497 hex
12498 default 0xB0000000 if VMSPLIT_3G_OPT
12499 default 0x80000000 if VMSPLIT_2G
12500- default 0x78000000 if VMSPLIT_2G_OPT
12501+ default 0x70000000 if VMSPLIT_2G_OPT
12502 default 0x40000000 if VMSPLIT_1G
12503 default 0xC0000000
12504 depends on X86_32
12505@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
12506
12507 config KEXEC
12508 bool "kexec system call"
12509+ depends on !GRKERNSEC_KMEM
12510 ---help---
12511 kexec is a system call that implements the ability to shutdown your
12512 current kernel, and to start another kernel. It is like a reboot
12513@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
12514
12515 config PHYSICAL_ALIGN
12516 hex "Alignment value to which kernel should be aligned"
12517- default "0x200000"
12518+ default "0x1000000"
12519+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12520+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12521 range 0x2000 0x1000000 if X86_32
12522 range 0x200000 0x1000000 if X86_64
12523 ---help---
12524@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
12525 def_bool n
12526 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12527 depends on X86_32 || IA32_EMULATION
12528+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12529 ---help---
12530 Certain buggy versions of glibc will crash if they are
12531 presented with a 32-bit vDSO that is not mapped at the address
12532diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12533index 6983314..54ad7e8 100644
12534--- a/arch/x86/Kconfig.cpu
12535+++ b/arch/x86/Kconfig.cpu
12536@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12537
12538 config X86_F00F_BUG
12539 def_bool y
12540- depends on M586MMX || M586TSC || M586 || M486
12541+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12542
12543 config X86_INVD_BUG
12544 def_bool y
12545@@ -327,7 +327,7 @@ config X86_INVD_BUG
12546
12547 config X86_ALIGNMENT_16
12548 def_bool y
12549- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12550+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12551
12552 config X86_INTEL_USERCOPY
12553 def_bool y
12554@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12555 # generates cmov.
12556 config X86_CMOV
12557 def_bool y
12558- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12559+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12560
12561 config X86_MINIMUM_CPU_FAMILY
12562 int
12563diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12564index 61bd2ad..50b625d 100644
12565--- a/arch/x86/Kconfig.debug
12566+++ b/arch/x86/Kconfig.debug
12567@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12568 config DEBUG_RODATA
12569 bool "Write protect kernel read-only data structures"
12570 default y
12571- depends on DEBUG_KERNEL
12572+ depends on DEBUG_KERNEL && BROKEN
12573 ---help---
12574 Mark the kernel read-only data as write-protected in the pagetables,
12575 in order to catch accidental (and incorrect) writes to such const
12576@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12577
12578 config DEBUG_SET_MODULE_RONX
12579 bool "Set loadable kernel module data as NX and text as RO"
12580- depends on MODULES
12581+ depends on MODULES && BROKEN
12582 ---help---
12583 This option helps catch unintended modifications to loadable
12584 kernel module's text and read-only data. It also prevents execution
12585diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12586index 920e616..ac3d4df 100644
12587--- a/arch/x86/Makefile
12588+++ b/arch/x86/Makefile
12589@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12590 # CPU-specific tuning. Anything which can be shared with UML should go here.
12591 include $(srctree)/arch/x86/Makefile_32.cpu
12592 KBUILD_CFLAGS += $(cflags-y)
12593-
12594- # temporary until string.h is fixed
12595- KBUILD_CFLAGS += -ffreestanding
12596 else
12597 BITS := 64
12598 UTS_MACHINE := x86_64
12599@@ -107,6 +104,9 @@ else
12600 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12601 endif
12602
12603+# temporary until string.h is fixed
12604+KBUILD_CFLAGS += -ffreestanding
12605+
12606 # Make sure compiler does not have buggy stack-protector support.
12607 ifdef CONFIG_CC_STACKPROTECTOR
12608 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12609@@ -180,6 +180,7 @@ archheaders:
12610 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12611
12612 archprepare:
12613+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12614 ifeq ($(CONFIG_KEXEC_FILE),y)
12615 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12616 endif
12617@@ -263,3 +264,9 @@ define archhelp
12618 echo ' FDARGS="..." arguments for the booted kernel'
12619 echo ' FDINITRD=file initrd for the booted kernel'
12620 endef
12621+
12622+define OLD_LD
12623+
12624+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12625+*** Please upgrade your binutils to 2.18 or newer
12626+endef
12627diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12628index 3db07f3..9d81d0f 100644
12629--- a/arch/x86/boot/Makefile
12630+++ b/arch/x86/boot/Makefile
12631@@ -56,6 +56,9 @@ clean-files += cpustr.h
12632 # ---------------------------------------------------------------------------
12633
12634 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12635+ifdef CONSTIFY_PLUGIN
12636+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12637+endif
12638 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12639 GCOV_PROFILE := n
12640
12641diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12642index 878e4b9..20537ab 100644
12643--- a/arch/x86/boot/bitops.h
12644+++ b/arch/x86/boot/bitops.h
12645@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12646 u8 v;
12647 const u32 *p = (const u32 *)addr;
12648
12649- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12650+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12651 return v;
12652 }
12653
12654@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12655
12656 static inline void set_bit(int nr, void *addr)
12657 {
12658- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12659+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12660 }
12661
12662 #endif /* BOOT_BITOPS_H */
12663diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12664index bd49ec6..94c7f58 100644
12665--- a/arch/x86/boot/boot.h
12666+++ b/arch/x86/boot/boot.h
12667@@ -84,7 +84,7 @@ static inline void io_delay(void)
12668 static inline u16 ds(void)
12669 {
12670 u16 seg;
12671- asm("movw %%ds,%0" : "=rm" (seg));
12672+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12673 return seg;
12674 }
12675
12676diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12677index 8bd44e8..6b111e9 100644
12678--- a/arch/x86/boot/compressed/Makefile
12679+++ b/arch/x86/boot/compressed/Makefile
12680@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
12681 KBUILD_CFLAGS += -mno-mmx -mno-sse
12682 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12683 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12684+ifdef CONSTIFY_PLUGIN
12685+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12686+endif
12687
12688 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12689 GCOV_PROFILE := n
12690diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12691index a53440e..c3dbf1e 100644
12692--- a/arch/x86/boot/compressed/efi_stub_32.S
12693+++ b/arch/x86/boot/compressed/efi_stub_32.S
12694@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12695 * parameter 2, ..., param n. To make things easy, we save the return
12696 * address of efi_call_phys in a global variable.
12697 */
12698- popl %ecx
12699- movl %ecx, saved_return_addr(%edx)
12700- /* get the function pointer into ECX*/
12701- popl %ecx
12702- movl %ecx, efi_rt_function_ptr(%edx)
12703+ popl saved_return_addr(%edx)
12704+ popl efi_rt_function_ptr(%edx)
12705
12706 /*
12707 * 3. Call the physical function.
12708 */
12709- call *%ecx
12710+ call *efi_rt_function_ptr(%edx)
12711
12712 /*
12713 * 4. Balance the stack. And because EAX contain the return value,
12714@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12715 1: popl %edx
12716 subl $1b, %edx
12717
12718- movl efi_rt_function_ptr(%edx), %ecx
12719- pushl %ecx
12720+ pushl efi_rt_function_ptr(%edx)
12721
12722 /*
12723 * 10. Push the saved return address onto the stack and return.
12724 */
12725- movl saved_return_addr(%edx), %ecx
12726- pushl %ecx
12727- ret
12728+ jmpl *saved_return_addr(%edx)
12729 ENDPROC(efi_call_phys)
12730 .previous
12731
12732diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12733index 630384a..278e788 100644
12734--- a/arch/x86/boot/compressed/efi_thunk_64.S
12735+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12736@@ -189,8 +189,8 @@ efi_gdt64:
12737 .long 0 /* Filled out by user */
12738 .word 0
12739 .quad 0x0000000000000000 /* NULL descriptor */
12740- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12741- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12742+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12743+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12744 .quad 0x0080890000000000 /* TS descriptor */
12745 .quad 0x0000000000000000 /* TS continued */
12746 efi_gdt64_end:
12747diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12748index 1d7fbbc..36ecd58 100644
12749--- a/arch/x86/boot/compressed/head_32.S
12750+++ b/arch/x86/boot/compressed/head_32.S
12751@@ -140,10 +140,10 @@ preferred_addr:
12752 addl %eax, %ebx
12753 notl %eax
12754 andl %eax, %ebx
12755- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12756+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12757 jge 1f
12758 #endif
12759- movl $LOAD_PHYSICAL_ADDR, %ebx
12760+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12761 1:
12762
12763 /* Target address to relocate to for decompression */
12764diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12765index 6b1766c..ad465c9 100644
12766--- a/arch/x86/boot/compressed/head_64.S
12767+++ b/arch/x86/boot/compressed/head_64.S
12768@@ -94,10 +94,10 @@ ENTRY(startup_32)
12769 addl %eax, %ebx
12770 notl %eax
12771 andl %eax, %ebx
12772- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12773+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12774 jge 1f
12775 #endif
12776- movl $LOAD_PHYSICAL_ADDR, %ebx
12777+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12778 1:
12779
12780 /* Target address to relocate to for decompression */
12781@@ -322,10 +322,10 @@ preferred_addr:
12782 addq %rax, %rbp
12783 notq %rax
12784 andq %rax, %rbp
12785- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12786+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12787 jge 1f
12788 #endif
12789- movq $LOAD_PHYSICAL_ADDR, %rbp
12790+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12791 1:
12792
12793 /* Target address to relocate to for decompression */
12794@@ -434,8 +434,8 @@ gdt:
12795 .long gdt
12796 .word 0
12797 .quad 0x0000000000000000 /* NULL descriptor */
12798- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12799- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12800+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12801+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12802 .quad 0x0080890000000000 /* TS descriptor */
12803 .quad 0x0000000000000000 /* TS continued */
12804 gdt_end:
12805diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12806index a950864..c710239 100644
12807--- a/arch/x86/boot/compressed/misc.c
12808+++ b/arch/x86/boot/compressed/misc.c
12809@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12810 * Calculate the delta between where vmlinux was linked to load
12811 * and where it was actually loaded.
12812 */
12813- delta = min_addr - LOAD_PHYSICAL_ADDR;
12814+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12815 if (!delta) {
12816 debug_putstr("No relocation needed... ");
12817 return;
12818@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12819 Elf32_Ehdr ehdr;
12820 Elf32_Phdr *phdrs, *phdr;
12821 #endif
12822- void *dest;
12823+ void *dest, *prev;
12824 int i;
12825
12826 memcpy(&ehdr, output, sizeof(ehdr));
12827@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12828 case PT_LOAD:
12829 #ifdef CONFIG_RELOCATABLE
12830 dest = output;
12831- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12832+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12833 #else
12834 dest = (void *)(phdr->p_paddr);
12835 #endif
12836 memcpy(dest,
12837 output + phdr->p_offset,
12838 phdr->p_filesz);
12839+ if (i)
12840+ memset(prev, 0xff, dest - prev);
12841+ prev = dest + phdr->p_filesz;
12842 break;
12843 default: /* Ignore other PT_* */ break;
12844 }
12845@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12846 error("Destination address too large");
12847 #endif
12848 #ifndef CONFIG_RELOCATABLE
12849- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12850+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12851 error("Wrong destination address");
12852 #endif
12853
12854diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12855index 1fd7d57..0f7d096 100644
12856--- a/arch/x86/boot/cpucheck.c
12857+++ b/arch/x86/boot/cpucheck.c
12858@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12859 u32 ecx = MSR_K7_HWCR;
12860 u32 eax, edx;
12861
12862- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12863+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12864 eax &= ~(1 << 15);
12865- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12866+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12867
12868 get_cpuflags(); /* Make sure it really did something */
12869 err = check_cpuflags();
12870@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12871 u32 ecx = MSR_VIA_FCR;
12872 u32 eax, edx;
12873
12874- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12875+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12876 eax |= (1<<1)|(1<<7);
12877- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12878+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12879
12880 set_bit(X86_FEATURE_CX8, cpu.flags);
12881 err = check_cpuflags();
12882@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12883 u32 eax, edx;
12884 u32 level = 1;
12885
12886- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12887- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12888- asm("cpuid"
12889+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12890+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12891+ asm volatile("cpuid"
12892 : "+a" (level), "=d" (cpu.flags[0])
12893 : : "ecx", "ebx");
12894- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12895+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12896
12897 err = check_cpuflags();
12898 } else if (err == 0x01 &&
12899diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12900index 16ef025..91e033b 100644
12901--- a/arch/x86/boot/header.S
12902+++ b/arch/x86/boot/header.S
12903@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12904 # single linked list of
12905 # struct setup_data
12906
12907-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12908+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12909
12910 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12911+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12912+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12913+#else
12914 #define VO_INIT_SIZE (VO__end - VO__text)
12915+#endif
12916 #if ZO_INIT_SIZE > VO_INIT_SIZE
12917 #define INIT_SIZE ZO_INIT_SIZE
12918 #else
12919diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12920index db75d07..8e6d0af 100644
12921--- a/arch/x86/boot/memory.c
12922+++ b/arch/x86/boot/memory.c
12923@@ -19,7 +19,7 @@
12924
12925 static int detect_memory_e820(void)
12926 {
12927- int count = 0;
12928+ unsigned int count = 0;
12929 struct biosregs ireg, oreg;
12930 struct e820entry *desc = boot_params.e820_map;
12931 static struct e820entry buf; /* static so it is zeroed */
12932diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12933index ba3e100..6501b8f 100644
12934--- a/arch/x86/boot/video-vesa.c
12935+++ b/arch/x86/boot/video-vesa.c
12936@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
12937
12938 boot_params.screen_info.vesapm_seg = oreg.es;
12939 boot_params.screen_info.vesapm_off = oreg.di;
12940+ boot_params.screen_info.vesapm_size = oreg.cx;
12941 }
12942
12943 /*
12944diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
12945index 43eda28..5ab5fdb 100644
12946--- a/arch/x86/boot/video.c
12947+++ b/arch/x86/boot/video.c
12948@@ -96,7 +96,7 @@ static void store_mode_params(void)
12949 static unsigned int get_entry(void)
12950 {
12951 char entry_buf[4];
12952- int i, len = 0;
12953+ unsigned int i, len = 0;
12954 int key;
12955 unsigned int v;
12956
12957diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
12958index 9105655..41779c1 100644
12959--- a/arch/x86/crypto/aes-x86_64-asm_64.S
12960+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
12961@@ -8,6 +8,8 @@
12962 * including this sentence is retained in full.
12963 */
12964
12965+#include <asm/alternative-asm.h>
12966+
12967 .extern crypto_ft_tab
12968 .extern crypto_it_tab
12969 .extern crypto_fl_tab
12970@@ -70,6 +72,8 @@
12971 je B192; \
12972 leaq 32(r9),r9;
12973
12974+#define ret pax_force_retaddr; ret
12975+
12976 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
12977 movq r1,r2; \
12978 movq r3,r4; \
12979diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
12980index 477e9d7..c92c7d8 100644
12981--- a/arch/x86/crypto/aesni-intel_asm.S
12982+++ b/arch/x86/crypto/aesni-intel_asm.S
12983@@ -31,6 +31,7 @@
12984
12985 #include <linux/linkage.h>
12986 #include <asm/inst.h>
12987+#include <asm/alternative-asm.h>
12988
12989 #ifdef __x86_64__
12990 .data
12991@@ -205,7 +206,7 @@ enc: .octa 0x2
12992 * num_initial_blocks = b mod 4
12993 * encrypt the initial num_initial_blocks blocks and apply ghash on
12994 * the ciphertext
12995-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12996+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12997 * are clobbered
12998 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12999 */
13000@@ -214,8 +215,8 @@ enc: .octa 0x2
13001 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13002 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13003 mov arg7, %r10 # %r10 = AAD
13004- mov arg8, %r12 # %r12 = aadLen
13005- mov %r12, %r11
13006+ mov arg8, %r15 # %r15 = aadLen
13007+ mov %r15, %r11
13008 pxor %xmm\i, %xmm\i
13009 _get_AAD_loop\num_initial_blocks\operation:
13010 movd (%r10), \TMP1
13011@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13012 psrldq $4, %xmm\i
13013 pxor \TMP1, %xmm\i
13014 add $4, %r10
13015- sub $4, %r12
13016+ sub $4, %r15
13017 jne _get_AAD_loop\num_initial_blocks\operation
13018 cmp $16, %r11
13019 je _get_AAD_loop2_done\num_initial_blocks\operation
13020- mov $16, %r12
13021+ mov $16, %r15
13022 _get_AAD_loop2\num_initial_blocks\operation:
13023 psrldq $4, %xmm\i
13024- sub $4, %r12
13025- cmp %r11, %r12
13026+ sub $4, %r15
13027+ cmp %r11, %r15
13028 jne _get_AAD_loop2\num_initial_blocks\operation
13029 _get_AAD_loop2_done\num_initial_blocks\operation:
13030 movdqa SHUF_MASK(%rip), %xmm14
13031@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13032 * num_initial_blocks = b mod 4
13033 * encrypt the initial num_initial_blocks blocks and apply ghash on
13034 * the ciphertext
13035-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13036+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13037 * are clobbered
13038 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13039 */
13040@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13041 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13042 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13043 mov arg7, %r10 # %r10 = AAD
13044- mov arg8, %r12 # %r12 = aadLen
13045- mov %r12, %r11
13046+ mov arg8, %r15 # %r15 = aadLen
13047+ mov %r15, %r11
13048 pxor %xmm\i, %xmm\i
13049 _get_AAD_loop\num_initial_blocks\operation:
13050 movd (%r10), \TMP1
13051@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13052 psrldq $4, %xmm\i
13053 pxor \TMP1, %xmm\i
13054 add $4, %r10
13055- sub $4, %r12
13056+ sub $4, %r15
13057 jne _get_AAD_loop\num_initial_blocks\operation
13058 cmp $16, %r11
13059 je _get_AAD_loop2_done\num_initial_blocks\operation
13060- mov $16, %r12
13061+ mov $16, %r15
13062 _get_AAD_loop2\num_initial_blocks\operation:
13063 psrldq $4, %xmm\i
13064- sub $4, %r12
13065- cmp %r11, %r12
13066+ sub $4, %r15
13067+ cmp %r11, %r15
13068 jne _get_AAD_loop2\num_initial_blocks\operation
13069 _get_AAD_loop2_done\num_initial_blocks\operation:
13070 movdqa SHUF_MASK(%rip), %xmm14
13071@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13072 *
13073 *****************************************************************************/
13074 ENTRY(aesni_gcm_dec)
13075- push %r12
13076+ push %r15
13077 push %r13
13078 push %r14
13079 mov %rsp, %r14
13080@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13081 */
13082 sub $VARIABLE_OFFSET, %rsp
13083 and $~63, %rsp # align rsp to 64 bytes
13084- mov %arg6, %r12
13085- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13086+ mov %arg6, %r15
13087+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13088 movdqa SHUF_MASK(%rip), %xmm2
13089 PSHUFB_XMM %xmm2, %xmm13
13090
13091@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13092 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13093 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13094 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13095- mov %r13, %r12
13096- and $(3<<4), %r12
13097+ mov %r13, %r15
13098+ and $(3<<4), %r15
13099 jz _initial_num_blocks_is_0_decrypt
13100- cmp $(2<<4), %r12
13101+ cmp $(2<<4), %r15
13102 jb _initial_num_blocks_is_1_decrypt
13103 je _initial_num_blocks_is_2_decrypt
13104 _initial_num_blocks_is_3_decrypt:
13105@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13106 sub $16, %r11
13107 add %r13, %r11
13108 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13109- lea SHIFT_MASK+16(%rip), %r12
13110- sub %r13, %r12
13111+ lea SHIFT_MASK+16(%rip), %r15
13112+ sub %r13, %r15
13113 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13114 # (%r13 is the number of bytes in plaintext mod 16)
13115- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13116+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13117 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13118
13119 movdqa %xmm1, %xmm2
13120 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13121- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13122+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13123 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13124 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13125 pand %xmm1, %xmm2
13126@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13127 sub $1, %r13
13128 jne _less_than_8_bytes_left_decrypt
13129 _multiple_of_16_bytes_decrypt:
13130- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13131- shl $3, %r12 # convert into number of bits
13132- movd %r12d, %xmm15 # len(A) in %xmm15
13133+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13134+ shl $3, %r15 # convert into number of bits
13135+ movd %r15d, %xmm15 # len(A) in %xmm15
13136 shl $3, %arg4 # len(C) in bits (*128)
13137 MOVQ_R64_XMM %arg4, %xmm1
13138 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13139@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13140 mov %r14, %rsp
13141 pop %r14
13142 pop %r13
13143- pop %r12
13144+ pop %r15
13145+ pax_force_retaddr
13146 ret
13147 ENDPROC(aesni_gcm_dec)
13148
13149@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13150 * poly = x^128 + x^127 + x^126 + x^121 + 1
13151 ***************************************************************************/
13152 ENTRY(aesni_gcm_enc)
13153- push %r12
13154+ push %r15
13155 push %r13
13156 push %r14
13157 mov %rsp, %r14
13158@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13159 #
13160 sub $VARIABLE_OFFSET, %rsp
13161 and $~63, %rsp
13162- mov %arg6, %r12
13163- movdqu (%r12), %xmm13
13164+ mov %arg6, %r15
13165+ movdqu (%r15), %xmm13
13166 movdqa SHUF_MASK(%rip), %xmm2
13167 PSHUFB_XMM %xmm2, %xmm13
13168
13169@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13170 movdqa %xmm13, HashKey(%rsp)
13171 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13172 and $-16, %r13
13173- mov %r13, %r12
13174+ mov %r13, %r15
13175
13176 # Encrypt first few blocks
13177
13178- and $(3<<4), %r12
13179+ and $(3<<4), %r15
13180 jz _initial_num_blocks_is_0_encrypt
13181- cmp $(2<<4), %r12
13182+ cmp $(2<<4), %r15
13183 jb _initial_num_blocks_is_1_encrypt
13184 je _initial_num_blocks_is_2_encrypt
13185 _initial_num_blocks_is_3_encrypt:
13186@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13187 sub $16, %r11
13188 add %r13, %r11
13189 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13190- lea SHIFT_MASK+16(%rip), %r12
13191- sub %r13, %r12
13192+ lea SHIFT_MASK+16(%rip), %r15
13193+ sub %r13, %r15
13194 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13195 # (%r13 is the number of bytes in plaintext mod 16)
13196- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13197+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13198 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13199 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13200- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13201+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13202 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13203 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13204 movdqa SHUF_MASK(%rip), %xmm10
13205@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13206 sub $1, %r13
13207 jne _less_than_8_bytes_left_encrypt
13208 _multiple_of_16_bytes_encrypt:
13209- mov arg8, %r12 # %r12 = addLen (number of bytes)
13210- shl $3, %r12
13211- movd %r12d, %xmm15 # len(A) in %xmm15
13212+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13213+ shl $3, %r15
13214+ movd %r15d, %xmm15 # len(A) in %xmm15
13215 shl $3, %arg4 # len(C) in bits (*128)
13216 MOVQ_R64_XMM %arg4, %xmm1
13217 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13218@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13219 mov %r14, %rsp
13220 pop %r14
13221 pop %r13
13222- pop %r12
13223+ pop %r15
13224+ pax_force_retaddr
13225 ret
13226 ENDPROC(aesni_gcm_enc)
13227
13228@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13229 pxor %xmm1, %xmm0
13230 movaps %xmm0, (TKEYP)
13231 add $0x10, TKEYP
13232+ pax_force_retaddr
13233 ret
13234 ENDPROC(_key_expansion_128)
13235 ENDPROC(_key_expansion_256a)
13236@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13237 shufps $0b01001110, %xmm2, %xmm1
13238 movaps %xmm1, 0x10(TKEYP)
13239 add $0x20, TKEYP
13240+ pax_force_retaddr
13241 ret
13242 ENDPROC(_key_expansion_192a)
13243
13244@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13245
13246 movaps %xmm0, (TKEYP)
13247 add $0x10, TKEYP
13248+ pax_force_retaddr
13249 ret
13250 ENDPROC(_key_expansion_192b)
13251
13252@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13253 pxor %xmm1, %xmm2
13254 movaps %xmm2, (TKEYP)
13255 add $0x10, TKEYP
13256+ pax_force_retaddr
13257 ret
13258 ENDPROC(_key_expansion_256b)
13259
13260@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13261 #ifndef __x86_64__
13262 popl KEYP
13263 #endif
13264+ pax_force_retaddr
13265 ret
13266 ENDPROC(aesni_set_key)
13267
13268@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13269 popl KLEN
13270 popl KEYP
13271 #endif
13272+ pax_force_retaddr
13273 ret
13274 ENDPROC(aesni_enc)
13275
13276@@ -1974,6 +1983,7 @@ _aesni_enc1:
13277 AESENC KEY STATE
13278 movaps 0x70(TKEYP), KEY
13279 AESENCLAST KEY STATE
13280+ pax_force_retaddr
13281 ret
13282 ENDPROC(_aesni_enc1)
13283
13284@@ -2083,6 +2093,7 @@ _aesni_enc4:
13285 AESENCLAST KEY STATE2
13286 AESENCLAST KEY STATE3
13287 AESENCLAST KEY STATE4
13288+ pax_force_retaddr
13289 ret
13290 ENDPROC(_aesni_enc4)
13291
13292@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13293 popl KLEN
13294 popl KEYP
13295 #endif
13296+ pax_force_retaddr
13297 ret
13298 ENDPROC(aesni_dec)
13299
13300@@ -2164,6 +2176,7 @@ _aesni_dec1:
13301 AESDEC KEY STATE
13302 movaps 0x70(TKEYP), KEY
13303 AESDECLAST KEY STATE
13304+ pax_force_retaddr
13305 ret
13306 ENDPROC(_aesni_dec1)
13307
13308@@ -2273,6 +2286,7 @@ _aesni_dec4:
13309 AESDECLAST KEY STATE2
13310 AESDECLAST KEY STATE3
13311 AESDECLAST KEY STATE4
13312+ pax_force_retaddr
13313 ret
13314 ENDPROC(_aesni_dec4)
13315
13316@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13317 popl KEYP
13318 popl LEN
13319 #endif
13320+ pax_force_retaddr
13321 ret
13322 ENDPROC(aesni_ecb_enc)
13323
13324@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13325 popl KEYP
13326 popl LEN
13327 #endif
13328+ pax_force_retaddr
13329 ret
13330 ENDPROC(aesni_ecb_dec)
13331
13332@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13333 popl LEN
13334 popl IVP
13335 #endif
13336+ pax_force_retaddr
13337 ret
13338 ENDPROC(aesni_cbc_enc)
13339
13340@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13341 popl LEN
13342 popl IVP
13343 #endif
13344+ pax_force_retaddr
13345 ret
13346 ENDPROC(aesni_cbc_dec)
13347
13348@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13349 mov $1, TCTR_LOW
13350 MOVQ_R64_XMM TCTR_LOW INC
13351 MOVQ_R64_XMM CTR TCTR_LOW
13352+ pax_force_retaddr
13353 ret
13354 ENDPROC(_aesni_inc_init)
13355
13356@@ -2579,6 +2598,7 @@ _aesni_inc:
13357 .Linc_low:
13358 movaps CTR, IV
13359 PSHUFB_XMM BSWAP_MASK IV
13360+ pax_force_retaddr
13361 ret
13362 ENDPROC(_aesni_inc)
13363
13364@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13365 .Lctr_enc_ret:
13366 movups IV, (IVP)
13367 .Lctr_enc_just_ret:
13368+ pax_force_retaddr
13369 ret
13370 ENDPROC(aesni_ctr_enc)
13371
13372@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13373 pxor INC, STATE4
13374 movdqu STATE4, 0x70(OUTP)
13375
13376+ pax_force_retaddr
13377 ret
13378 ENDPROC(aesni_xts_crypt8)
13379
13380diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13381index 246c670..466e2d6 100644
13382--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13383+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13384@@ -21,6 +21,7 @@
13385 */
13386
13387 #include <linux/linkage.h>
13388+#include <asm/alternative-asm.h>
13389
13390 .file "blowfish-x86_64-asm.S"
13391 .text
13392@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13393 jnz .L__enc_xor;
13394
13395 write_block();
13396+ pax_force_retaddr
13397 ret;
13398 .L__enc_xor:
13399 xor_block();
13400+ pax_force_retaddr
13401 ret;
13402 ENDPROC(__blowfish_enc_blk)
13403
13404@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13405
13406 movq %r11, %rbp;
13407
13408+ pax_force_retaddr
13409 ret;
13410 ENDPROC(blowfish_dec_blk)
13411
13412@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13413
13414 popq %rbx;
13415 popq %rbp;
13416+ pax_force_retaddr
13417 ret;
13418
13419 .L__enc_xor4:
13420@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13421
13422 popq %rbx;
13423 popq %rbp;
13424+ pax_force_retaddr
13425 ret;
13426 ENDPROC(__blowfish_enc_blk_4way)
13427
13428@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13429 popq %rbx;
13430 popq %rbp;
13431
13432+ pax_force_retaddr
13433 ret;
13434 ENDPROC(blowfish_dec_blk_4way)
13435diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13436index ce71f92..1dce7ec 100644
13437--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13438+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13439@@ -16,6 +16,7 @@
13440 */
13441
13442 #include <linux/linkage.h>
13443+#include <asm/alternative-asm.h>
13444
13445 #define CAMELLIA_TABLE_BYTE_LEN 272
13446
13447@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13448 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13449 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13450 %rcx, (%r9));
13451+ pax_force_retaddr
13452 ret;
13453 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13454
13455@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13456 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13457 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13458 %rax, (%r9));
13459+ pax_force_retaddr
13460 ret;
13461 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13462
13463@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13464 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13465 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13466
13467+ pax_force_retaddr
13468 ret;
13469
13470 .align 8
13471@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13472 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13473 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13474
13475+ pax_force_retaddr
13476 ret;
13477
13478 .align 8
13479@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13480 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13481 %xmm8, %rsi);
13482
13483+ pax_force_retaddr
13484 ret;
13485 ENDPROC(camellia_ecb_enc_16way)
13486
13487@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13488 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13489 %xmm8, %rsi);
13490
13491+ pax_force_retaddr
13492 ret;
13493 ENDPROC(camellia_ecb_dec_16way)
13494
13495@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13496 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13497 %xmm8, %rsi);
13498
13499+ pax_force_retaddr
13500 ret;
13501 ENDPROC(camellia_cbc_dec_16way)
13502
13503@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13504 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13505 %xmm8, %rsi);
13506
13507+ pax_force_retaddr
13508 ret;
13509 ENDPROC(camellia_ctr_16way)
13510
13511@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13512 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13513 %xmm8, %rsi);
13514
13515+ pax_force_retaddr
13516 ret;
13517 ENDPROC(camellia_xts_crypt_16way)
13518
13519diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13520index 0e0b886..5a3123c 100644
13521--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13522+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13523@@ -11,6 +11,7 @@
13524 */
13525
13526 #include <linux/linkage.h>
13527+#include <asm/alternative-asm.h>
13528
13529 #define CAMELLIA_TABLE_BYTE_LEN 272
13530
13531@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13532 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13533 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13534 %rcx, (%r9));
13535+ pax_force_retaddr
13536 ret;
13537 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13538
13539@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13540 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13541 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13542 %rax, (%r9));
13543+ pax_force_retaddr
13544 ret;
13545 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13546
13547@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13548 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13549 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13550
13551+ pax_force_retaddr
13552 ret;
13553
13554 .align 8
13555@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13556 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13557 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13558
13559+ pax_force_retaddr
13560 ret;
13561
13562 .align 8
13563@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13564
13565 vzeroupper;
13566
13567+ pax_force_retaddr
13568 ret;
13569 ENDPROC(camellia_ecb_enc_32way)
13570
13571@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13572
13573 vzeroupper;
13574
13575+ pax_force_retaddr
13576 ret;
13577 ENDPROC(camellia_ecb_dec_32way)
13578
13579@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13580
13581 vzeroupper;
13582
13583+ pax_force_retaddr
13584 ret;
13585 ENDPROC(camellia_cbc_dec_32way)
13586
13587@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13588
13589 vzeroupper;
13590
13591+ pax_force_retaddr
13592 ret;
13593 ENDPROC(camellia_ctr_32way)
13594
13595@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13596
13597 vzeroupper;
13598
13599+ pax_force_retaddr
13600 ret;
13601 ENDPROC(camellia_xts_crypt_32way)
13602
13603diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13604index 310319c..db3d7b5 100644
13605--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13606+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13607@@ -21,6 +21,7 @@
13608 */
13609
13610 #include <linux/linkage.h>
13611+#include <asm/alternative-asm.h>
13612
13613 .file "camellia-x86_64-asm_64.S"
13614 .text
13615@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13616 enc_outunpack(mov, RT1);
13617
13618 movq RRBP, %rbp;
13619+ pax_force_retaddr
13620 ret;
13621
13622 .L__enc_xor:
13623 enc_outunpack(xor, RT1);
13624
13625 movq RRBP, %rbp;
13626+ pax_force_retaddr
13627 ret;
13628 ENDPROC(__camellia_enc_blk)
13629
13630@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13631 dec_outunpack();
13632
13633 movq RRBP, %rbp;
13634+ pax_force_retaddr
13635 ret;
13636 ENDPROC(camellia_dec_blk)
13637
13638@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13639
13640 movq RRBP, %rbp;
13641 popq %rbx;
13642+ pax_force_retaddr
13643 ret;
13644
13645 .L__enc2_xor:
13646@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13647
13648 movq RRBP, %rbp;
13649 popq %rbx;
13650+ pax_force_retaddr
13651 ret;
13652 ENDPROC(__camellia_enc_blk_2way)
13653
13654@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13655
13656 movq RRBP, %rbp;
13657 movq RXOR, %rbx;
13658+ pax_force_retaddr
13659 ret;
13660 ENDPROC(camellia_dec_blk_2way)
13661diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13662index c35fd5d..2d8c7db 100644
13663--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13664+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13665@@ -24,6 +24,7 @@
13666 */
13667
13668 #include <linux/linkage.h>
13669+#include <asm/alternative-asm.h>
13670
13671 .file "cast5-avx-x86_64-asm_64.S"
13672
13673@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13674 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13675 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13676
13677+ pax_force_retaddr
13678 ret;
13679 ENDPROC(__cast5_enc_blk16)
13680
13681@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13682 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13683 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13684
13685+ pax_force_retaddr
13686 ret;
13687
13688 .L__skip_dec:
13689@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13690 vmovdqu RR4, (6*4*4)(%r11);
13691 vmovdqu RL4, (7*4*4)(%r11);
13692
13693+ pax_force_retaddr
13694 ret;
13695 ENDPROC(cast5_ecb_enc_16way)
13696
13697@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13698 vmovdqu RR4, (6*4*4)(%r11);
13699 vmovdqu RL4, (7*4*4)(%r11);
13700
13701+ pax_force_retaddr
13702 ret;
13703 ENDPROC(cast5_ecb_dec_16way)
13704
13705@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13706 * %rdx: src
13707 */
13708
13709- pushq %r12;
13710+ pushq %r14;
13711
13712 movq %rsi, %r11;
13713- movq %rdx, %r12;
13714+ movq %rdx, %r14;
13715
13716 vmovdqu (0*16)(%rdx), RL1;
13717 vmovdqu (1*16)(%rdx), RR1;
13718@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13719 call __cast5_dec_blk16;
13720
13721 /* xor with src */
13722- vmovq (%r12), RX;
13723+ vmovq (%r14), RX;
13724 vpshufd $0x4f, RX, RX;
13725 vpxor RX, RR1, RR1;
13726- vpxor 0*16+8(%r12), RL1, RL1;
13727- vpxor 1*16+8(%r12), RR2, RR2;
13728- vpxor 2*16+8(%r12), RL2, RL2;
13729- vpxor 3*16+8(%r12), RR3, RR3;
13730- vpxor 4*16+8(%r12), RL3, RL3;
13731- vpxor 5*16+8(%r12), RR4, RR4;
13732- vpxor 6*16+8(%r12), RL4, RL4;
13733+ vpxor 0*16+8(%r14), RL1, RL1;
13734+ vpxor 1*16+8(%r14), RR2, RR2;
13735+ vpxor 2*16+8(%r14), RL2, RL2;
13736+ vpxor 3*16+8(%r14), RR3, RR3;
13737+ vpxor 4*16+8(%r14), RL3, RL3;
13738+ vpxor 5*16+8(%r14), RR4, RR4;
13739+ vpxor 6*16+8(%r14), RL4, RL4;
13740
13741 vmovdqu RR1, (0*16)(%r11);
13742 vmovdqu RL1, (1*16)(%r11);
13743@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13744 vmovdqu RR4, (6*16)(%r11);
13745 vmovdqu RL4, (7*16)(%r11);
13746
13747- popq %r12;
13748+ popq %r14;
13749
13750+ pax_force_retaddr
13751 ret;
13752 ENDPROC(cast5_cbc_dec_16way)
13753
13754@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13755 * %rcx: iv (big endian, 64bit)
13756 */
13757
13758- pushq %r12;
13759+ pushq %r14;
13760
13761 movq %rsi, %r11;
13762- movq %rdx, %r12;
13763+ movq %rdx, %r14;
13764
13765 vpcmpeqd RTMP, RTMP, RTMP;
13766 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13767@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13768 call __cast5_enc_blk16;
13769
13770 /* dst = src ^ iv */
13771- vpxor (0*16)(%r12), RR1, RR1;
13772- vpxor (1*16)(%r12), RL1, RL1;
13773- vpxor (2*16)(%r12), RR2, RR2;
13774- vpxor (3*16)(%r12), RL2, RL2;
13775- vpxor (4*16)(%r12), RR3, RR3;
13776- vpxor (5*16)(%r12), RL3, RL3;
13777- vpxor (6*16)(%r12), RR4, RR4;
13778- vpxor (7*16)(%r12), RL4, RL4;
13779+ vpxor (0*16)(%r14), RR1, RR1;
13780+ vpxor (1*16)(%r14), RL1, RL1;
13781+ vpxor (2*16)(%r14), RR2, RR2;
13782+ vpxor (3*16)(%r14), RL2, RL2;
13783+ vpxor (4*16)(%r14), RR3, RR3;
13784+ vpxor (5*16)(%r14), RL3, RL3;
13785+ vpxor (6*16)(%r14), RR4, RR4;
13786+ vpxor (7*16)(%r14), RL4, RL4;
13787 vmovdqu RR1, (0*16)(%r11);
13788 vmovdqu RL1, (1*16)(%r11);
13789 vmovdqu RR2, (2*16)(%r11);
13790@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13791 vmovdqu RR4, (6*16)(%r11);
13792 vmovdqu RL4, (7*16)(%r11);
13793
13794- popq %r12;
13795+ popq %r14;
13796
13797+ pax_force_retaddr
13798 ret;
13799 ENDPROC(cast5_ctr_16way)
13800diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13801index e3531f8..e123f35 100644
13802--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13803+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13804@@ -24,6 +24,7 @@
13805 */
13806
13807 #include <linux/linkage.h>
13808+#include <asm/alternative-asm.h>
13809 #include "glue_helper-asm-avx.S"
13810
13811 .file "cast6-avx-x86_64-asm_64.S"
13812@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13813 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13814 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13815
13816+ pax_force_retaddr
13817 ret;
13818 ENDPROC(__cast6_enc_blk8)
13819
13820@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13821 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13822 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13823
13824+ pax_force_retaddr
13825 ret;
13826 ENDPROC(__cast6_dec_blk8)
13827
13828@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13829
13830 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13831
13832+ pax_force_retaddr
13833 ret;
13834 ENDPROC(cast6_ecb_enc_8way)
13835
13836@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13837
13838 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13839
13840+ pax_force_retaddr
13841 ret;
13842 ENDPROC(cast6_ecb_dec_8way)
13843
13844@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13845 * %rdx: src
13846 */
13847
13848- pushq %r12;
13849+ pushq %r14;
13850
13851 movq %rsi, %r11;
13852- movq %rdx, %r12;
13853+ movq %rdx, %r14;
13854
13855 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13856
13857 call __cast6_dec_blk8;
13858
13859- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13860+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13861
13862- popq %r12;
13863+ popq %r14;
13864
13865+ pax_force_retaddr
13866 ret;
13867 ENDPROC(cast6_cbc_dec_8way)
13868
13869@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13870 * %rcx: iv (little endian, 128bit)
13871 */
13872
13873- pushq %r12;
13874+ pushq %r14;
13875
13876 movq %rsi, %r11;
13877- movq %rdx, %r12;
13878+ movq %rdx, %r14;
13879
13880 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13881 RD2, RX, RKR, RKM);
13882
13883 call __cast6_enc_blk8;
13884
13885- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13886+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13887
13888- popq %r12;
13889+ popq %r14;
13890
13891+ pax_force_retaddr
13892 ret;
13893 ENDPROC(cast6_ctr_8way)
13894
13895@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13896 /* dst <= regs xor IVs(in dst) */
13897 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13898
13899+ pax_force_retaddr
13900 ret;
13901 ENDPROC(cast6_xts_enc_8way)
13902
13903@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13904 /* dst <= regs xor IVs(in dst) */
13905 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13906
13907+ pax_force_retaddr
13908 ret;
13909 ENDPROC(cast6_xts_dec_8way)
13910diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13911index 26d49eb..c0a8c84 100644
13912--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13913+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13914@@ -45,6 +45,7 @@
13915
13916 #include <asm/inst.h>
13917 #include <linux/linkage.h>
13918+#include <asm/alternative-asm.h>
13919
13920 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13921
13922@@ -309,6 +310,7 @@ do_return:
13923 popq %rsi
13924 popq %rdi
13925 popq %rbx
13926+ pax_force_retaddr
13927 ret
13928
13929 ################################################################
13930diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13931index 5d1e007..098cb4f 100644
13932--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13933+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13934@@ -18,6 +18,7 @@
13935
13936 #include <linux/linkage.h>
13937 #include <asm/inst.h>
13938+#include <asm/alternative-asm.h>
13939
13940 .data
13941
13942@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
13943 psrlq $1, T2
13944 pxor T2, T1
13945 pxor T1, DATA
13946+ pax_force_retaddr
13947 ret
13948 ENDPROC(__clmul_gf128mul_ble)
13949
13950@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
13951 call __clmul_gf128mul_ble
13952 PSHUFB_XMM BSWAP DATA
13953 movups DATA, (%rdi)
13954+ pax_force_retaddr
13955 ret
13956 ENDPROC(clmul_ghash_mul)
13957
13958@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
13959 PSHUFB_XMM BSWAP DATA
13960 movups DATA, (%rdi)
13961 .Lupdate_just_ret:
13962+ pax_force_retaddr
13963 ret
13964 ENDPROC(clmul_ghash_update)
13965diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13966index 9279e0b..c4b3d2c 100644
13967--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
13968+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13969@@ -1,4 +1,5 @@
13970 #include <linux/linkage.h>
13971+#include <asm/alternative-asm.h>
13972
13973 # enter salsa20_encrypt_bytes
13974 ENTRY(salsa20_encrypt_bytes)
13975@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
13976 add %r11,%rsp
13977 mov %rdi,%rax
13978 mov %rsi,%rdx
13979+ pax_force_retaddr
13980 ret
13981 # bytesatleast65:
13982 ._bytesatleast65:
13983@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
13984 add %r11,%rsp
13985 mov %rdi,%rax
13986 mov %rsi,%rdx
13987+ pax_force_retaddr
13988 ret
13989 ENDPROC(salsa20_keysetup)
13990
13991@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
13992 add %r11,%rsp
13993 mov %rdi,%rax
13994 mov %rsi,%rdx
13995+ pax_force_retaddr
13996 ret
13997 ENDPROC(salsa20_ivsetup)
13998diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13999index 2f202f4..d9164d6 100644
14000--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14001+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14002@@ -24,6 +24,7 @@
14003 */
14004
14005 #include <linux/linkage.h>
14006+#include <asm/alternative-asm.h>
14007 #include "glue_helper-asm-avx.S"
14008
14009 .file "serpent-avx-x86_64-asm_64.S"
14010@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14011 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14012 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14013
14014+ pax_force_retaddr
14015 ret;
14016 ENDPROC(__serpent_enc_blk8_avx)
14017
14018@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14019 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14020 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14021
14022+ pax_force_retaddr
14023 ret;
14024 ENDPROC(__serpent_dec_blk8_avx)
14025
14026@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14027
14028 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14029
14030+ pax_force_retaddr
14031 ret;
14032 ENDPROC(serpent_ecb_enc_8way_avx)
14033
14034@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14035
14036 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14037
14038+ pax_force_retaddr
14039 ret;
14040 ENDPROC(serpent_ecb_dec_8way_avx)
14041
14042@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14043
14044 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14045
14046+ pax_force_retaddr
14047 ret;
14048 ENDPROC(serpent_cbc_dec_8way_avx)
14049
14050@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14051
14052 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14053
14054+ pax_force_retaddr
14055 ret;
14056 ENDPROC(serpent_ctr_8way_avx)
14057
14058@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14059 /* dst <= regs xor IVs(in dst) */
14060 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14061
14062+ pax_force_retaddr
14063 ret;
14064 ENDPROC(serpent_xts_enc_8way_avx)
14065
14066@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14067 /* dst <= regs xor IVs(in dst) */
14068 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14069
14070+ pax_force_retaddr
14071 ret;
14072 ENDPROC(serpent_xts_dec_8way_avx)
14073diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14074index b222085..abd483c 100644
14075--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14076+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14077@@ -15,6 +15,7 @@
14078 */
14079
14080 #include <linux/linkage.h>
14081+#include <asm/alternative-asm.h>
14082 #include "glue_helper-asm-avx2.S"
14083
14084 .file "serpent-avx2-asm_64.S"
14085@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14086 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14087 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14088
14089+ pax_force_retaddr
14090 ret;
14091 ENDPROC(__serpent_enc_blk16)
14092
14093@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14094 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14095 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14096
14097+ pax_force_retaddr
14098 ret;
14099 ENDPROC(__serpent_dec_blk16)
14100
14101@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14102
14103 vzeroupper;
14104
14105+ pax_force_retaddr
14106 ret;
14107 ENDPROC(serpent_ecb_enc_16way)
14108
14109@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14110
14111 vzeroupper;
14112
14113+ pax_force_retaddr
14114 ret;
14115 ENDPROC(serpent_ecb_dec_16way)
14116
14117@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14118
14119 vzeroupper;
14120
14121+ pax_force_retaddr
14122 ret;
14123 ENDPROC(serpent_cbc_dec_16way)
14124
14125@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14126
14127 vzeroupper;
14128
14129+ pax_force_retaddr
14130 ret;
14131 ENDPROC(serpent_ctr_16way)
14132
14133@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14134
14135 vzeroupper;
14136
14137+ pax_force_retaddr
14138 ret;
14139 ENDPROC(serpent_xts_enc_16way)
14140
14141@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14142
14143 vzeroupper;
14144
14145+ pax_force_retaddr
14146 ret;
14147 ENDPROC(serpent_xts_dec_16way)
14148diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14149index acc066c..1559cc4 100644
14150--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14151+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14152@@ -25,6 +25,7 @@
14153 */
14154
14155 #include <linux/linkage.h>
14156+#include <asm/alternative-asm.h>
14157
14158 .file "serpent-sse2-x86_64-asm_64.S"
14159 .text
14160@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14161 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14162 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14163
14164+ pax_force_retaddr
14165 ret;
14166
14167 .L__enc_xor8:
14168 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14169 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14170
14171+ pax_force_retaddr
14172 ret;
14173 ENDPROC(__serpent_enc_blk_8way)
14174
14175@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14176 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14177 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14178
14179+ pax_force_retaddr
14180 ret;
14181 ENDPROC(serpent_dec_blk_8way)
14182diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14183index a410950..9dfe7ad 100644
14184--- a/arch/x86/crypto/sha1_ssse3_asm.S
14185+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14186@@ -29,6 +29,7 @@
14187 */
14188
14189 #include <linux/linkage.h>
14190+#include <asm/alternative-asm.h>
14191
14192 #define CTX %rdi // arg1
14193 #define BUF %rsi // arg2
14194@@ -75,9 +76,9 @@
14195
14196 push %rbx
14197 push %rbp
14198- push %r12
14199+ push %r14
14200
14201- mov %rsp, %r12
14202+ mov %rsp, %r14
14203 sub $64, %rsp # allocate workspace
14204 and $~15, %rsp # align stack
14205
14206@@ -99,11 +100,12 @@
14207 xor %rax, %rax
14208 rep stosq
14209
14210- mov %r12, %rsp # deallocate workspace
14211+ mov %r14, %rsp # deallocate workspace
14212
14213- pop %r12
14214+ pop %r14
14215 pop %rbp
14216 pop %rbx
14217+ pax_force_retaddr
14218 ret
14219
14220 ENDPROC(\name)
14221diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14222index 642f156..51a513c 100644
14223--- a/arch/x86/crypto/sha256-avx-asm.S
14224+++ b/arch/x86/crypto/sha256-avx-asm.S
14225@@ -49,6 +49,7 @@
14226
14227 #ifdef CONFIG_AS_AVX
14228 #include <linux/linkage.h>
14229+#include <asm/alternative-asm.h>
14230
14231 ## assume buffers not aligned
14232 #define VMOVDQ vmovdqu
14233@@ -460,6 +461,7 @@ done_hash:
14234 popq %r13
14235 popq %rbp
14236 popq %rbx
14237+ pax_force_retaddr
14238 ret
14239 ENDPROC(sha256_transform_avx)
14240
14241diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14242index 9e86944..3795e6a 100644
14243--- a/arch/x86/crypto/sha256-avx2-asm.S
14244+++ b/arch/x86/crypto/sha256-avx2-asm.S
14245@@ -50,6 +50,7 @@
14246
14247 #ifdef CONFIG_AS_AVX2
14248 #include <linux/linkage.h>
14249+#include <asm/alternative-asm.h>
14250
14251 ## assume buffers not aligned
14252 #define VMOVDQ vmovdqu
14253@@ -720,6 +721,7 @@ done_hash:
14254 popq %r12
14255 popq %rbp
14256 popq %rbx
14257+ pax_force_retaddr
14258 ret
14259 ENDPROC(sha256_transform_rorx)
14260
14261diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14262index f833b74..8c62a9e 100644
14263--- a/arch/x86/crypto/sha256-ssse3-asm.S
14264+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14265@@ -47,6 +47,7 @@
14266 ########################################################################
14267
14268 #include <linux/linkage.h>
14269+#include <asm/alternative-asm.h>
14270
14271 ## assume buffers not aligned
14272 #define MOVDQ movdqu
14273@@ -471,6 +472,7 @@ done_hash:
14274 popq %rbp
14275 popq %rbx
14276
14277+ pax_force_retaddr
14278 ret
14279 ENDPROC(sha256_transform_ssse3)
14280
14281diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14282index 974dde9..a823ff9 100644
14283--- a/arch/x86/crypto/sha512-avx-asm.S
14284+++ b/arch/x86/crypto/sha512-avx-asm.S
14285@@ -49,6 +49,7 @@
14286
14287 #ifdef CONFIG_AS_AVX
14288 #include <linux/linkage.h>
14289+#include <asm/alternative-asm.h>
14290
14291 .text
14292
14293@@ -364,6 +365,7 @@ updateblock:
14294 mov frame_RSPSAVE(%rsp), %rsp
14295
14296 nowork:
14297+ pax_force_retaddr
14298 ret
14299 ENDPROC(sha512_transform_avx)
14300
14301diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14302index 568b961..ed20c37 100644
14303--- a/arch/x86/crypto/sha512-avx2-asm.S
14304+++ b/arch/x86/crypto/sha512-avx2-asm.S
14305@@ -51,6 +51,7 @@
14306
14307 #ifdef CONFIG_AS_AVX2
14308 #include <linux/linkage.h>
14309+#include <asm/alternative-asm.h>
14310
14311 .text
14312
14313@@ -678,6 +679,7 @@ done_hash:
14314
14315 # Restore Stack Pointer
14316 mov frame_RSPSAVE(%rsp), %rsp
14317+ pax_force_retaddr
14318 ret
14319 ENDPROC(sha512_transform_rorx)
14320
14321diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14322index fb56855..6edd768 100644
14323--- a/arch/x86/crypto/sha512-ssse3-asm.S
14324+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14325@@ -48,6 +48,7 @@
14326 ########################################################################
14327
14328 #include <linux/linkage.h>
14329+#include <asm/alternative-asm.h>
14330
14331 .text
14332
14333@@ -363,6 +364,7 @@ updateblock:
14334 mov frame_RSPSAVE(%rsp), %rsp
14335
14336 nowork:
14337+ pax_force_retaddr
14338 ret
14339 ENDPROC(sha512_transform_ssse3)
14340
14341diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14342index 0505813..b067311 100644
14343--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14344+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14345@@ -24,6 +24,7 @@
14346 */
14347
14348 #include <linux/linkage.h>
14349+#include <asm/alternative-asm.h>
14350 #include "glue_helper-asm-avx.S"
14351
14352 .file "twofish-avx-x86_64-asm_64.S"
14353@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14354 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14355 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14356
14357+ pax_force_retaddr
14358 ret;
14359 ENDPROC(__twofish_enc_blk8)
14360
14361@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14362 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14363 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14364
14365+ pax_force_retaddr
14366 ret;
14367 ENDPROC(__twofish_dec_blk8)
14368
14369@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14370
14371 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14372
14373+ pax_force_retaddr
14374 ret;
14375 ENDPROC(twofish_ecb_enc_8way)
14376
14377@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14378
14379 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14380
14381+ pax_force_retaddr
14382 ret;
14383 ENDPROC(twofish_ecb_dec_8way)
14384
14385@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14386 * %rdx: src
14387 */
14388
14389- pushq %r12;
14390+ pushq %r14;
14391
14392 movq %rsi, %r11;
14393- movq %rdx, %r12;
14394+ movq %rdx, %r14;
14395
14396 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14397
14398 call __twofish_dec_blk8;
14399
14400- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14401+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14402
14403- popq %r12;
14404+ popq %r14;
14405
14406+ pax_force_retaddr
14407 ret;
14408 ENDPROC(twofish_cbc_dec_8way)
14409
14410@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14411 * %rcx: iv (little endian, 128bit)
14412 */
14413
14414- pushq %r12;
14415+ pushq %r14;
14416
14417 movq %rsi, %r11;
14418- movq %rdx, %r12;
14419+ movq %rdx, %r14;
14420
14421 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14422 RD2, RX0, RX1, RY0);
14423
14424 call __twofish_enc_blk8;
14425
14426- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14427+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14428
14429- popq %r12;
14430+ popq %r14;
14431
14432+ pax_force_retaddr
14433 ret;
14434 ENDPROC(twofish_ctr_8way)
14435
14436@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14437 /* dst <= regs xor IVs(in dst) */
14438 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14439
14440+ pax_force_retaddr
14441 ret;
14442 ENDPROC(twofish_xts_enc_8way)
14443
14444@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14445 /* dst <= regs xor IVs(in dst) */
14446 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14447
14448+ pax_force_retaddr
14449 ret;
14450 ENDPROC(twofish_xts_dec_8way)
14451diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14452index 1c3b7ce..02f578d 100644
14453--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14454+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14455@@ -21,6 +21,7 @@
14456 */
14457
14458 #include <linux/linkage.h>
14459+#include <asm/alternative-asm.h>
14460
14461 .file "twofish-x86_64-asm-3way.S"
14462 .text
14463@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14464 popq %r13;
14465 popq %r14;
14466 popq %r15;
14467+ pax_force_retaddr
14468 ret;
14469
14470 .L__enc_xor3:
14471@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14472 popq %r13;
14473 popq %r14;
14474 popq %r15;
14475+ pax_force_retaddr
14476 ret;
14477 ENDPROC(__twofish_enc_blk_3way)
14478
14479@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14480 popq %r13;
14481 popq %r14;
14482 popq %r15;
14483+ pax_force_retaddr
14484 ret;
14485 ENDPROC(twofish_dec_blk_3way)
14486diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14487index a039d21..524b8b2 100644
14488--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14489+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14490@@ -22,6 +22,7 @@
14491
14492 #include <linux/linkage.h>
14493 #include <asm/asm-offsets.h>
14494+#include <asm/alternative-asm.h>
14495
14496 #define a_offset 0
14497 #define b_offset 4
14498@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14499
14500 popq R1
14501 movq $1,%rax
14502+ pax_force_retaddr
14503 ret
14504 ENDPROC(twofish_enc_blk)
14505
14506@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14507
14508 popq R1
14509 movq $1,%rax
14510+ pax_force_retaddr
14511 ret
14512 ENDPROC(twofish_dec_blk)
14513diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14514index ae6aad1..719d6d9 100644
14515--- a/arch/x86/ia32/ia32_aout.c
14516+++ b/arch/x86/ia32/ia32_aout.c
14517@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14518 unsigned long dump_start, dump_size;
14519 struct user32 dump;
14520
14521+ memset(&dump, 0, sizeof(dump));
14522+
14523 fs = get_fs();
14524 set_fs(KERNEL_DS);
14525 has_dumped = 1;
14526diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14527index f9e181a..300544c 100644
14528--- a/arch/x86/ia32/ia32_signal.c
14529+++ b/arch/x86/ia32/ia32_signal.c
14530@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14531 if (__get_user(set.sig[0], &frame->sc.oldmask)
14532 || (_COMPAT_NSIG_WORDS > 1
14533 && __copy_from_user((((char *) &set.sig) + 4),
14534- &frame->extramask,
14535+ frame->extramask,
14536 sizeof(frame->extramask))))
14537 goto badframe;
14538
14539@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14540 sp -= frame_size;
14541 /* Align the stack pointer according to the i386 ABI,
14542 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14543- sp = ((sp + 4) & -16ul) - 4;
14544+ sp = ((sp - 12) & -16ul) - 4;
14545 return (void __user *) sp;
14546 }
14547
14548@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14549 } else {
14550 /* Return stub is in 32bit vsyscall page */
14551 if (current->mm->context.vdso)
14552- restorer = current->mm->context.vdso +
14553- selected_vdso32->sym___kernel_sigreturn;
14554+ restorer = (void __force_user *)(current->mm->context.vdso +
14555+ selected_vdso32->sym___kernel_sigreturn);
14556 else
14557- restorer = &frame->retcode;
14558+ restorer = frame->retcode;
14559 }
14560
14561 put_user_try {
14562@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14563 * These are actually not used anymore, but left because some
14564 * gdb versions depend on them as a marker.
14565 */
14566- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14567+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14568 } put_user_catch(err);
14569
14570 if (err)
14571@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14572 0xb8,
14573 __NR_ia32_rt_sigreturn,
14574 0x80cd,
14575- 0,
14576+ 0
14577 };
14578
14579 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14580@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14581
14582 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14583 restorer = ksig->ka.sa.sa_restorer;
14584+ else if (current->mm->context.vdso)
14585+ /* Return stub is in 32bit vsyscall page */
14586+ restorer = (void __force_user *)(current->mm->context.vdso +
14587+ selected_vdso32->sym___kernel_rt_sigreturn);
14588 else
14589- restorer = current->mm->context.vdso +
14590- selected_vdso32->sym___kernel_rt_sigreturn;
14591+ restorer = frame->retcode;
14592 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14593
14594 /*
14595 * Not actually used anymore, but left because some gdb
14596 * versions need it.
14597 */
14598- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14599+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14600 } put_user_catch(err);
14601
14602 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14603diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14604index 82e8a1d..4e998d5 100644
14605--- a/arch/x86/ia32/ia32entry.S
14606+++ b/arch/x86/ia32/ia32entry.S
14607@@ -15,8 +15,10 @@
14608 #include <asm/irqflags.h>
14609 #include <asm/asm.h>
14610 #include <asm/smap.h>
14611+#include <asm/pgtable.h>
14612 #include <linux/linkage.h>
14613 #include <linux/err.h>
14614+#include <asm/alternative-asm.h>
14615
14616 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14617 #include <linux/elf-em.h>
14618@@ -62,12 +64,12 @@
14619 */
14620 .macro LOAD_ARGS32 offset, _r9=0
14621 .if \_r9
14622- movl \offset+16(%rsp),%r9d
14623+ movl \offset+R9(%rsp),%r9d
14624 .endif
14625- movl \offset+40(%rsp),%ecx
14626- movl \offset+48(%rsp),%edx
14627- movl \offset+56(%rsp),%esi
14628- movl \offset+64(%rsp),%edi
14629+ movl \offset+RCX(%rsp),%ecx
14630+ movl \offset+RDX(%rsp),%edx
14631+ movl \offset+RSI(%rsp),%esi
14632+ movl \offset+RDI(%rsp),%edi
14633 movl %eax,%eax /* zero extension */
14634 .endm
14635
14636@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14637 ENDPROC(native_irq_enable_sysexit)
14638 #endif
14639
14640+ .macro pax_enter_kernel_user
14641+ pax_set_fptr_mask
14642+#ifdef CONFIG_PAX_MEMORY_UDEREF
14643+ call pax_enter_kernel_user
14644+#endif
14645+ .endm
14646+
14647+ .macro pax_exit_kernel_user
14648+#ifdef CONFIG_PAX_MEMORY_UDEREF
14649+ call pax_exit_kernel_user
14650+#endif
14651+#ifdef CONFIG_PAX_RANDKSTACK
14652+ pushq %rax
14653+ pushq %r11
14654+ call pax_randomize_kstack
14655+ popq %r11
14656+ popq %rax
14657+#endif
14658+ .endm
14659+
14660+ .macro pax_erase_kstack
14661+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14662+ call pax_erase_kstack
14663+#endif
14664+ .endm
14665+
14666 /*
14667 * 32bit SYSENTER instruction entry.
14668 *
14669@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14670 CFI_REGISTER rsp,rbp
14671 SWAPGS_UNSAFE_STACK
14672 movq PER_CPU_VAR(kernel_stack), %rsp
14673- addq $(KERNEL_STACK_OFFSET),%rsp
14674- /*
14675- * No need to follow this irqs on/off section: the syscall
14676- * disabled irqs, here we enable it straight after entry:
14677- */
14678- ENABLE_INTERRUPTS(CLBR_NONE)
14679 movl %ebp,%ebp /* zero extension */
14680 pushq_cfi $__USER32_DS
14681 /*CFI_REL_OFFSET ss,0*/
14682@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14683 CFI_REL_OFFSET rsp,0
14684 pushfq_cfi
14685 /*CFI_REL_OFFSET rflags,0*/
14686- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14687- CFI_REGISTER rip,r10
14688+ orl $X86_EFLAGS_IF,(%rsp)
14689+ GET_THREAD_INFO(%r11)
14690+ movl TI_sysenter_return(%r11), %r11d
14691+ CFI_REGISTER rip,r11
14692 pushq_cfi $__USER32_CS
14693 /*CFI_REL_OFFSET cs,0*/
14694 movl %eax, %eax
14695- pushq_cfi %r10
14696+ pushq_cfi %r11
14697 CFI_REL_OFFSET rip,0
14698 pushq_cfi %rax
14699 cld
14700 SAVE_ARGS 0,1,0
14701+ pax_enter_kernel_user
14702+
14703+#ifdef CONFIG_PAX_RANDKSTACK
14704+ pax_erase_kstack
14705+#endif
14706+
14707+ /*
14708+ * No need to follow this irqs on/off section: the syscall
14709+ * disabled irqs, here we enable it straight after entry:
14710+ */
14711+ ENABLE_INTERRUPTS(CLBR_NONE)
14712 /* no need to do an access_ok check here because rbp has been
14713 32bit zero extended */
14714+
14715+#ifdef CONFIG_PAX_MEMORY_UDEREF
14716+ addq pax_user_shadow_base,%rbp
14717+ ASM_PAX_OPEN_USERLAND
14718+#endif
14719+
14720 ASM_STAC
14721 1: movl (%rbp),%ebp
14722 _ASM_EXTABLE(1b,ia32_badarg)
14723 ASM_CLAC
14724
14725+#ifdef CONFIG_PAX_MEMORY_UDEREF
14726+ ASM_PAX_CLOSE_USERLAND
14727+#endif
14728+
14729 /*
14730 * Sysenter doesn't filter flags, so we need to clear NT
14731 * ourselves. To save a few cycles, we can check whether
14732@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14733 jnz sysenter_fix_flags
14734 sysenter_flags_fixed:
14735
14736- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14737- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14738+ GET_THREAD_INFO(%r11)
14739+ orl $TS_COMPAT,TI_status(%r11)
14740+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14741 CFI_REMEMBER_STATE
14742 jnz sysenter_tracesys
14743 cmpq $(IA32_NR_syscalls-1),%rax
14744@@ -172,15 +218,18 @@ sysenter_do_call:
14745 sysenter_dispatch:
14746 call *ia32_sys_call_table(,%rax,8)
14747 movq %rax,RAX-ARGOFFSET(%rsp)
14748+ GET_THREAD_INFO(%r11)
14749 DISABLE_INTERRUPTS(CLBR_NONE)
14750 TRACE_IRQS_OFF
14751- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14752+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14753 jnz sysexit_audit
14754 sysexit_from_sys_call:
14755- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14756+ pax_exit_kernel_user
14757+ pax_erase_kstack
14758+ andl $~TS_COMPAT,TI_status(%r11)
14759 /* clear IF, that popfq doesn't enable interrupts early */
14760- andl $~0x200,EFLAGS-R11(%rsp)
14761- movl RIP-R11(%rsp),%edx /* User %eip */
14762+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14763+ movl RIP(%rsp),%edx /* User %eip */
14764 CFI_REGISTER rip,rdx
14765 RESTORE_ARGS 0,24,0,0,0,0
14766 xorq %r8,%r8
14767@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14768 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14769 movl %eax,%edi /* 1st arg: syscall number */
14770 call __audit_syscall_entry
14771+
14772+ pax_erase_kstack
14773+
14774 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14775 cmpq $(IA32_NR_syscalls-1),%rax
14776 ja ia32_badsys
14777@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14778 .endm
14779
14780 .macro auditsys_exit exit
14781- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14782+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14783 jnz ia32_ret_from_sys_call
14784 TRACE_IRQS_ON
14785 ENABLE_INTERRUPTS(CLBR_NONE)
14786@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14787 1: setbe %al /* 1 if error, 0 if not */
14788 movzbl %al,%edi /* zero-extend that into %edi */
14789 call __audit_syscall_exit
14790+ GET_THREAD_INFO(%r11)
14791 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14792 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14793 DISABLE_INTERRUPTS(CLBR_NONE)
14794 TRACE_IRQS_OFF
14795- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14796+ testl %edi,TI_flags(%r11)
14797 jz \exit
14798 CLEAR_RREGS -ARGOFFSET
14799 jmp int_with_check
14800@@ -253,7 +306,7 @@ sysenter_fix_flags:
14801
14802 sysenter_tracesys:
14803 #ifdef CONFIG_AUDITSYSCALL
14804- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14805+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14806 jz sysenter_auditsys
14807 #endif
14808 SAVE_REST
14809@@ -265,6 +318,9 @@ sysenter_tracesys:
14810 RESTORE_REST
14811 cmpq $(IA32_NR_syscalls-1),%rax
14812 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14813+
14814+ pax_erase_kstack
14815+
14816 jmp sysenter_do_call
14817 CFI_ENDPROC
14818 ENDPROC(ia32_sysenter_target)
14819@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14820 ENTRY(ia32_cstar_target)
14821 CFI_STARTPROC32 simple
14822 CFI_SIGNAL_FRAME
14823- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14824+ CFI_DEF_CFA rsp,0
14825 CFI_REGISTER rip,rcx
14826 /*CFI_REGISTER rflags,r11*/
14827 SWAPGS_UNSAFE_STACK
14828 movl %esp,%r8d
14829 CFI_REGISTER rsp,r8
14830 movq PER_CPU_VAR(kernel_stack),%rsp
14831+ SAVE_ARGS 8*6,0,0
14832+ pax_enter_kernel_user
14833+
14834+#ifdef CONFIG_PAX_RANDKSTACK
14835+ pax_erase_kstack
14836+#endif
14837+
14838 /*
14839 * No need to follow this irqs on/off section: the syscall
14840 * disabled irqs and here we enable it straight after entry:
14841 */
14842 ENABLE_INTERRUPTS(CLBR_NONE)
14843- SAVE_ARGS 8,0,0
14844 movl %eax,%eax /* zero extension */
14845 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14846 movq %rcx,RIP-ARGOFFSET(%rsp)
14847@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14848 /* no need to do an access_ok check here because r8 has been
14849 32bit zero extended */
14850 /* hardware stack frame is complete now */
14851+
14852+#ifdef CONFIG_PAX_MEMORY_UDEREF
14853+ ASM_PAX_OPEN_USERLAND
14854+ movq pax_user_shadow_base,%r8
14855+ addq RSP-ARGOFFSET(%rsp),%r8
14856+#endif
14857+
14858 ASM_STAC
14859 1: movl (%r8),%r9d
14860 _ASM_EXTABLE(1b,ia32_badarg)
14861 ASM_CLAC
14862- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14863- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14864+
14865+#ifdef CONFIG_PAX_MEMORY_UDEREF
14866+ ASM_PAX_CLOSE_USERLAND
14867+#endif
14868+
14869+ GET_THREAD_INFO(%r11)
14870+ orl $TS_COMPAT,TI_status(%r11)
14871+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14872 CFI_REMEMBER_STATE
14873 jnz cstar_tracesys
14874 cmpq $IA32_NR_syscalls-1,%rax
14875@@ -335,13 +410,16 @@ cstar_do_call:
14876 cstar_dispatch:
14877 call *ia32_sys_call_table(,%rax,8)
14878 movq %rax,RAX-ARGOFFSET(%rsp)
14879+ GET_THREAD_INFO(%r11)
14880 DISABLE_INTERRUPTS(CLBR_NONE)
14881 TRACE_IRQS_OFF
14882- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14883+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14884 jnz sysretl_audit
14885 sysretl_from_sys_call:
14886- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14887- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14888+ pax_exit_kernel_user
14889+ pax_erase_kstack
14890+ andl $~TS_COMPAT,TI_status(%r11)
14891+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14892 movl RIP-ARGOFFSET(%rsp),%ecx
14893 CFI_REGISTER rip,rcx
14894 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14895@@ -368,7 +446,7 @@ sysretl_audit:
14896
14897 cstar_tracesys:
14898 #ifdef CONFIG_AUDITSYSCALL
14899- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14900+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14901 jz cstar_auditsys
14902 #endif
14903 xchgl %r9d,%ebp
14904@@ -382,11 +460,19 @@ cstar_tracesys:
14905 xchgl %ebp,%r9d
14906 cmpq $(IA32_NR_syscalls-1),%rax
14907 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14908+
14909+ pax_erase_kstack
14910+
14911 jmp cstar_do_call
14912 END(ia32_cstar_target)
14913
14914 ia32_badarg:
14915 ASM_CLAC
14916+
14917+#ifdef CONFIG_PAX_MEMORY_UDEREF
14918+ ASM_PAX_CLOSE_USERLAND
14919+#endif
14920+
14921 movq $-EFAULT,%rax
14922 jmp ia32_sysret
14923 CFI_ENDPROC
14924@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14925 CFI_REL_OFFSET rip,RIP-RIP
14926 PARAVIRT_ADJUST_EXCEPTION_FRAME
14927 SWAPGS
14928- /*
14929- * No need to follow this irqs on/off section: the syscall
14930- * disabled irqs and here we enable it straight after entry:
14931- */
14932- ENABLE_INTERRUPTS(CLBR_NONE)
14933 movl %eax,%eax
14934 pushq_cfi %rax
14935 cld
14936 /* note the registers are not zero extended to the sf.
14937 this could be a problem. */
14938 SAVE_ARGS 0,1,0
14939- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14940- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14941+ pax_enter_kernel_user
14942+
14943+#ifdef CONFIG_PAX_RANDKSTACK
14944+ pax_erase_kstack
14945+#endif
14946+
14947+ /*
14948+ * No need to follow this irqs on/off section: the syscall
14949+ * disabled irqs and here we enable it straight after entry:
14950+ */
14951+ ENABLE_INTERRUPTS(CLBR_NONE)
14952+ GET_THREAD_INFO(%r11)
14953+ orl $TS_COMPAT,TI_status(%r11)
14954+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14955 jnz ia32_tracesys
14956 cmpq $(IA32_NR_syscalls-1),%rax
14957 ja ia32_badsys
14958@@ -458,6 +551,9 @@ ia32_tracesys:
14959 RESTORE_REST
14960 cmpq $(IA32_NR_syscalls-1),%rax
14961 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
14962+
14963+ pax_erase_kstack
14964+
14965 jmp ia32_do_call
14966 END(ia32_syscall)
14967
14968diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
14969index 8e0ceec..af13504 100644
14970--- a/arch/x86/ia32/sys_ia32.c
14971+++ b/arch/x86/ia32/sys_ia32.c
14972@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
14973 */
14974 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
14975 {
14976- typeof(ubuf->st_uid) uid = 0;
14977- typeof(ubuf->st_gid) gid = 0;
14978+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
14979+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
14980 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
14981 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
14982 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
14983diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
14984index 372231c..51b537d 100644
14985--- a/arch/x86/include/asm/alternative-asm.h
14986+++ b/arch/x86/include/asm/alternative-asm.h
14987@@ -18,6 +18,45 @@
14988 .endm
14989 #endif
14990
14991+#ifdef KERNEXEC_PLUGIN
14992+ .macro pax_force_retaddr_bts rip=0
14993+ btsq $63,\rip(%rsp)
14994+ .endm
14995+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14996+ .macro pax_force_retaddr rip=0, reload=0
14997+ btsq $63,\rip(%rsp)
14998+ .endm
14999+ .macro pax_force_fptr ptr
15000+ btsq $63,\ptr
15001+ .endm
15002+ .macro pax_set_fptr_mask
15003+ .endm
15004+#endif
15005+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15006+ .macro pax_force_retaddr rip=0, reload=0
15007+ .if \reload
15008+ pax_set_fptr_mask
15009+ .endif
15010+ orq %r12,\rip(%rsp)
15011+ .endm
15012+ .macro pax_force_fptr ptr
15013+ orq %r12,\ptr
15014+ .endm
15015+ .macro pax_set_fptr_mask
15016+ movabs $0x8000000000000000,%r12
15017+ .endm
15018+#endif
15019+#else
15020+ .macro pax_force_retaddr rip=0, reload=0
15021+ .endm
15022+ .macro pax_force_fptr ptr
15023+ .endm
15024+ .macro pax_force_retaddr_bts rip=0
15025+ .endm
15026+ .macro pax_set_fptr_mask
15027+ .endm
15028+#endif
15029+
15030 .macro altinstruction_entry orig alt feature orig_len alt_len
15031 .long \orig - .
15032 .long \alt - .
15033diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15034index 473bdbe..b1e3377 100644
15035--- a/arch/x86/include/asm/alternative.h
15036+++ b/arch/x86/include/asm/alternative.h
15037@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15038 ".pushsection .discard,\"aw\",@progbits\n" \
15039 DISCARD_ENTRY(1) \
15040 ".popsection\n" \
15041- ".pushsection .altinstr_replacement, \"ax\"\n" \
15042+ ".pushsection .altinstr_replacement, \"a\"\n" \
15043 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15044 ".popsection"
15045
15046@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15047 DISCARD_ENTRY(1) \
15048 DISCARD_ENTRY(2) \
15049 ".popsection\n" \
15050- ".pushsection .altinstr_replacement, \"ax\"\n" \
15051+ ".pushsection .altinstr_replacement, \"a\"\n" \
15052 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15053 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15054 ".popsection"
15055diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15056index 465b309..ab7e51f 100644
15057--- a/arch/x86/include/asm/apic.h
15058+++ b/arch/x86/include/asm/apic.h
15059@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15060
15061 #ifdef CONFIG_X86_LOCAL_APIC
15062
15063-extern unsigned int apic_verbosity;
15064+extern int apic_verbosity;
15065 extern int local_apic_timer_c2_ok;
15066
15067 extern int disable_apic;
15068diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15069index 20370c6..a2eb9b0 100644
15070--- a/arch/x86/include/asm/apm.h
15071+++ b/arch/x86/include/asm/apm.h
15072@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15073 __asm__ __volatile__(APM_DO_ZERO_SEGS
15074 "pushl %%edi\n\t"
15075 "pushl %%ebp\n\t"
15076- "lcall *%%cs:apm_bios_entry\n\t"
15077+ "lcall *%%ss:apm_bios_entry\n\t"
15078 "setc %%al\n\t"
15079 "popl %%ebp\n\t"
15080 "popl %%edi\n\t"
15081@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15082 __asm__ __volatile__(APM_DO_ZERO_SEGS
15083 "pushl %%edi\n\t"
15084 "pushl %%ebp\n\t"
15085- "lcall *%%cs:apm_bios_entry\n\t"
15086+ "lcall *%%ss:apm_bios_entry\n\t"
15087 "setc %%bl\n\t"
15088 "popl %%ebp\n\t"
15089 "popl %%edi\n\t"
15090diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15091index 5e5cd12..51cdc93 100644
15092--- a/arch/x86/include/asm/atomic.h
15093+++ b/arch/x86/include/asm/atomic.h
15094@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15095 }
15096
15097 /**
15098+ * atomic_read_unchecked - read atomic variable
15099+ * @v: pointer of type atomic_unchecked_t
15100+ *
15101+ * Atomically reads the value of @v.
15102+ */
15103+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15104+{
15105+ return ACCESS_ONCE((v)->counter);
15106+}
15107+
15108+/**
15109 * atomic_set - set atomic variable
15110 * @v: pointer of type atomic_t
15111 * @i: required value
15112@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15113 }
15114
15115 /**
15116+ * atomic_set_unchecked - set atomic variable
15117+ * @v: pointer of type atomic_unchecked_t
15118+ * @i: required value
15119+ *
15120+ * Atomically sets the value of @v to @i.
15121+ */
15122+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15123+{
15124+ v->counter = i;
15125+}
15126+
15127+/**
15128 * atomic_add - add integer to atomic variable
15129 * @i: integer value to add
15130 * @v: pointer of type atomic_t
15131@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15132 */
15133 static inline void atomic_add(int i, atomic_t *v)
15134 {
15135- asm volatile(LOCK_PREFIX "addl %1,%0"
15136+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15137+
15138+#ifdef CONFIG_PAX_REFCOUNT
15139+ "jno 0f\n"
15140+ LOCK_PREFIX "subl %1,%0\n"
15141+ "int $4\n0:\n"
15142+ _ASM_EXTABLE(0b, 0b)
15143+#endif
15144+
15145+ : "+m" (v->counter)
15146+ : "ir" (i));
15147+}
15148+
15149+/**
15150+ * atomic_add_unchecked - add integer to atomic variable
15151+ * @i: integer value to add
15152+ * @v: pointer of type atomic_unchecked_t
15153+ *
15154+ * Atomically adds @i to @v.
15155+ */
15156+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15157+{
15158+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15159 : "+m" (v->counter)
15160 : "ir" (i));
15161 }
15162@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15163 */
15164 static inline void atomic_sub(int i, atomic_t *v)
15165 {
15166- asm volatile(LOCK_PREFIX "subl %1,%0"
15167+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15168+
15169+#ifdef CONFIG_PAX_REFCOUNT
15170+ "jno 0f\n"
15171+ LOCK_PREFIX "addl %1,%0\n"
15172+ "int $4\n0:\n"
15173+ _ASM_EXTABLE(0b, 0b)
15174+#endif
15175+
15176+ : "+m" (v->counter)
15177+ : "ir" (i));
15178+}
15179+
15180+/**
15181+ * atomic_sub_unchecked - subtract integer from atomic variable
15182+ * @i: integer value to subtract
15183+ * @v: pointer of type atomic_unchecked_t
15184+ *
15185+ * Atomically subtracts @i from @v.
15186+ */
15187+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15188+{
15189+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15190 : "+m" (v->counter)
15191 : "ir" (i));
15192 }
15193@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15194 */
15195 static inline int atomic_sub_and_test(int i, atomic_t *v)
15196 {
15197- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15198+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15199 }
15200
15201 /**
15202@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15203 */
15204 static inline void atomic_inc(atomic_t *v)
15205 {
15206- asm volatile(LOCK_PREFIX "incl %0"
15207+ asm volatile(LOCK_PREFIX "incl %0\n"
15208+
15209+#ifdef CONFIG_PAX_REFCOUNT
15210+ "jno 0f\n"
15211+ LOCK_PREFIX "decl %0\n"
15212+ "int $4\n0:\n"
15213+ _ASM_EXTABLE(0b, 0b)
15214+#endif
15215+
15216+ : "+m" (v->counter));
15217+}
15218+
15219+/**
15220+ * atomic_inc_unchecked - increment atomic variable
15221+ * @v: pointer of type atomic_unchecked_t
15222+ *
15223+ * Atomically increments @v by 1.
15224+ */
15225+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15226+{
15227+ asm volatile(LOCK_PREFIX "incl %0\n"
15228 : "+m" (v->counter));
15229 }
15230
15231@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15232 */
15233 static inline void atomic_dec(atomic_t *v)
15234 {
15235- asm volatile(LOCK_PREFIX "decl %0"
15236+ asm volatile(LOCK_PREFIX "decl %0\n"
15237+
15238+#ifdef CONFIG_PAX_REFCOUNT
15239+ "jno 0f\n"
15240+ LOCK_PREFIX "incl %0\n"
15241+ "int $4\n0:\n"
15242+ _ASM_EXTABLE(0b, 0b)
15243+#endif
15244+
15245+ : "+m" (v->counter));
15246+}
15247+
15248+/**
15249+ * atomic_dec_unchecked - decrement atomic variable
15250+ * @v: pointer of type atomic_unchecked_t
15251+ *
15252+ * Atomically decrements @v by 1.
15253+ */
15254+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15255+{
15256+ asm volatile(LOCK_PREFIX "decl %0\n"
15257 : "+m" (v->counter));
15258 }
15259
15260@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15261 */
15262 static inline int atomic_dec_and_test(atomic_t *v)
15263 {
15264- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15265+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15266 }
15267
15268 /**
15269@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15270 */
15271 static inline int atomic_inc_and_test(atomic_t *v)
15272 {
15273- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15274+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15275+}
15276+
15277+/**
15278+ * atomic_inc_and_test_unchecked - increment and test
15279+ * @v: pointer of type atomic_unchecked_t
15280+ *
15281+ * Atomically increments @v by 1
15282+ * and returns true if the result is zero, or false for all
15283+ * other cases.
15284+ */
15285+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15286+{
15287+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15288 }
15289
15290 /**
15291@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15292 */
15293 static inline int atomic_add_negative(int i, atomic_t *v)
15294 {
15295- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15296+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15297 }
15298
15299 /**
15300@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15301 *
15302 * Atomically adds @i to @v and returns @i + @v
15303 */
15304-static inline int atomic_add_return(int i, atomic_t *v)
15305+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15306+{
15307+ return i + xadd_check_overflow(&v->counter, i);
15308+}
15309+
15310+/**
15311+ * atomic_add_return_unchecked - add integer and return
15312+ * @i: integer value to add
15313+ * @v: pointer of type atomic_unchecked_t
15314+ *
15315+ * Atomically adds @i to @v and returns @i + @v
15316+ */
15317+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15318 {
15319 return i + xadd(&v->counter, i);
15320 }
15321@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15322 *
15323 * Atomically subtracts @i from @v and returns @v - @i
15324 */
15325-static inline int atomic_sub_return(int i, atomic_t *v)
15326+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15327 {
15328 return atomic_add_return(-i, v);
15329 }
15330
15331 #define atomic_inc_return(v) (atomic_add_return(1, v))
15332+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15333+{
15334+ return atomic_add_return_unchecked(1, v);
15335+}
15336 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15337
15338-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15339+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15340+{
15341+ return cmpxchg(&v->counter, old, new);
15342+}
15343+
15344+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15345 {
15346 return cmpxchg(&v->counter, old, new);
15347 }
15348@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15349 return xchg(&v->counter, new);
15350 }
15351
15352+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15353+{
15354+ return xchg(&v->counter, new);
15355+}
15356+
15357 /**
15358 * __atomic_add_unless - add unless the number is already a given value
15359 * @v: pointer of type atomic_t
15360@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15361 */
15362 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15363 {
15364- int c, old;
15365+ int c, old, new;
15366 c = atomic_read(v);
15367 for (;;) {
15368- if (unlikely(c == (u)))
15369+ if (unlikely(c == u))
15370 break;
15371- old = atomic_cmpxchg((v), c, c + (a));
15372+
15373+ asm volatile("addl %2,%0\n"
15374+
15375+#ifdef CONFIG_PAX_REFCOUNT
15376+ "jno 0f\n"
15377+ "subl %2,%0\n"
15378+ "int $4\n0:\n"
15379+ _ASM_EXTABLE(0b, 0b)
15380+#endif
15381+
15382+ : "=r" (new)
15383+ : "0" (c), "ir" (a));
15384+
15385+ old = atomic_cmpxchg(v, c, new);
15386 if (likely(old == c))
15387 break;
15388 c = old;
15389@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15390 }
15391
15392 /**
15393+ * atomic_inc_not_zero_hint - increment if not null
15394+ * @v: pointer of type atomic_t
15395+ * @hint: probable value of the atomic before the increment
15396+ *
15397+ * This version of atomic_inc_not_zero() gives a hint of probable
15398+ * value of the atomic. This helps processor to not read the memory
15399+ * before doing the atomic read/modify/write cycle, lowering
15400+ * number of bus transactions on some arches.
15401+ *
15402+ * Returns: 0 if increment was not done, 1 otherwise.
15403+ */
15404+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15405+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15406+{
15407+ int val, c = hint, new;
15408+
15409+ /* sanity test, should be removed by compiler if hint is a constant */
15410+ if (!hint)
15411+ return __atomic_add_unless(v, 1, 0);
15412+
15413+ do {
15414+ asm volatile("incl %0\n"
15415+
15416+#ifdef CONFIG_PAX_REFCOUNT
15417+ "jno 0f\n"
15418+ "decl %0\n"
15419+ "int $4\n0:\n"
15420+ _ASM_EXTABLE(0b, 0b)
15421+#endif
15422+
15423+ : "=r" (new)
15424+ : "0" (c));
15425+
15426+ val = atomic_cmpxchg(v, c, new);
15427+ if (val == c)
15428+ return 1;
15429+ c = val;
15430+ } while (c);
15431+
15432+ return 0;
15433+}
15434+
15435+/**
15436 * atomic_inc_short - increment of a short integer
15437 * @v: pointer to type int
15438 *
15439@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15440 }
15441
15442 /* These are x86-specific, used by some header files */
15443-#define atomic_clear_mask(mask, addr) \
15444- asm volatile(LOCK_PREFIX "andl %0,%1" \
15445- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15446+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15447+{
15448+ asm volatile(LOCK_PREFIX "andl %1,%0"
15449+ : "+m" (v->counter)
15450+ : "r" (~(mask))
15451+ : "memory");
15452+}
15453
15454-#define atomic_set_mask(mask, addr) \
15455- asm volatile(LOCK_PREFIX "orl %0,%1" \
15456- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15457- : "memory")
15458+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15459+{
15460+ asm volatile(LOCK_PREFIX "andl %1,%0"
15461+ : "+m" (v->counter)
15462+ : "r" (~(mask))
15463+ : "memory");
15464+}
15465+
15466+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15467+{
15468+ asm volatile(LOCK_PREFIX "orl %1,%0"
15469+ : "+m" (v->counter)
15470+ : "r" (mask)
15471+ : "memory");
15472+}
15473+
15474+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15475+{
15476+ asm volatile(LOCK_PREFIX "orl %1,%0"
15477+ : "+m" (v->counter)
15478+ : "r" (mask)
15479+ : "memory");
15480+}
15481
15482 #ifdef CONFIG_X86_32
15483 # include <asm/atomic64_32.h>
15484diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15485index b154de7..bf18a5a 100644
15486--- a/arch/x86/include/asm/atomic64_32.h
15487+++ b/arch/x86/include/asm/atomic64_32.h
15488@@ -12,6 +12,14 @@ typedef struct {
15489 u64 __aligned(8) counter;
15490 } atomic64_t;
15491
15492+#ifdef CONFIG_PAX_REFCOUNT
15493+typedef struct {
15494+ u64 __aligned(8) counter;
15495+} atomic64_unchecked_t;
15496+#else
15497+typedef atomic64_t atomic64_unchecked_t;
15498+#endif
15499+
15500 #define ATOMIC64_INIT(val) { (val) }
15501
15502 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15503@@ -37,21 +45,31 @@ typedef struct {
15504 ATOMIC64_DECL_ONE(sym##_386)
15505
15506 ATOMIC64_DECL_ONE(add_386);
15507+ATOMIC64_DECL_ONE(add_unchecked_386);
15508 ATOMIC64_DECL_ONE(sub_386);
15509+ATOMIC64_DECL_ONE(sub_unchecked_386);
15510 ATOMIC64_DECL_ONE(inc_386);
15511+ATOMIC64_DECL_ONE(inc_unchecked_386);
15512 ATOMIC64_DECL_ONE(dec_386);
15513+ATOMIC64_DECL_ONE(dec_unchecked_386);
15514 #endif
15515
15516 #define alternative_atomic64(f, out, in...) \
15517 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15518
15519 ATOMIC64_DECL(read);
15520+ATOMIC64_DECL(read_unchecked);
15521 ATOMIC64_DECL(set);
15522+ATOMIC64_DECL(set_unchecked);
15523 ATOMIC64_DECL(xchg);
15524 ATOMIC64_DECL(add_return);
15525+ATOMIC64_DECL(add_return_unchecked);
15526 ATOMIC64_DECL(sub_return);
15527+ATOMIC64_DECL(sub_return_unchecked);
15528 ATOMIC64_DECL(inc_return);
15529+ATOMIC64_DECL(inc_return_unchecked);
15530 ATOMIC64_DECL(dec_return);
15531+ATOMIC64_DECL(dec_return_unchecked);
15532 ATOMIC64_DECL(dec_if_positive);
15533 ATOMIC64_DECL(inc_not_zero);
15534 ATOMIC64_DECL(add_unless);
15535@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15536 }
15537
15538 /**
15539+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15540+ * @p: pointer to type atomic64_unchecked_t
15541+ * @o: expected value
15542+ * @n: new value
15543+ *
15544+ * Atomically sets @v to @n if it was equal to @o and returns
15545+ * the old value.
15546+ */
15547+
15548+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15549+{
15550+ return cmpxchg64(&v->counter, o, n);
15551+}
15552+
15553+/**
15554 * atomic64_xchg - xchg atomic64 variable
15555 * @v: pointer to type atomic64_t
15556 * @n: value to assign
15557@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15558 }
15559
15560 /**
15561+ * atomic64_set_unchecked - set atomic64 variable
15562+ * @v: pointer to type atomic64_unchecked_t
15563+ * @n: value to assign
15564+ *
15565+ * Atomically sets the value of @v to @n.
15566+ */
15567+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15568+{
15569+ unsigned high = (unsigned)(i >> 32);
15570+ unsigned low = (unsigned)i;
15571+ alternative_atomic64(set, /* no output */,
15572+ "S" (v), "b" (low), "c" (high)
15573+ : "eax", "edx", "memory");
15574+}
15575+
15576+/**
15577 * atomic64_read - read atomic64 variable
15578 * @v: pointer to type atomic64_t
15579 *
15580@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15581 }
15582
15583 /**
15584+ * atomic64_read_unchecked - read atomic64 variable
15585+ * @v: pointer to type atomic64_unchecked_t
15586+ *
15587+ * Atomically reads the value of @v and returns it.
15588+ */
15589+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15590+{
15591+ long long r;
15592+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15593+ return r;
15594+ }
15595+
15596+/**
15597 * atomic64_add_return - add and return
15598 * @i: integer value to add
15599 * @v: pointer to type atomic64_t
15600@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15601 return i;
15602 }
15603
15604+/**
15605+ * atomic64_add_return_unchecked - add and return
15606+ * @i: integer value to add
15607+ * @v: pointer to type atomic64_unchecked_t
15608+ *
15609+ * Atomically adds @i to @v and returns @i + *@v
15610+ */
15611+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15612+{
15613+ alternative_atomic64(add_return_unchecked,
15614+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15615+ ASM_NO_INPUT_CLOBBER("memory"));
15616+ return i;
15617+}
15618+
15619 /*
15620 * Other variants with different arithmetic operators:
15621 */
15622@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15623 return a;
15624 }
15625
15626+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15627+{
15628+ long long a;
15629+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15630+ "S" (v) : "memory", "ecx");
15631+ return a;
15632+}
15633+
15634 static inline long long atomic64_dec_return(atomic64_t *v)
15635 {
15636 long long a;
15637@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15638 }
15639
15640 /**
15641+ * atomic64_add_unchecked - add integer to atomic64 variable
15642+ * @i: integer value to add
15643+ * @v: pointer to type atomic64_unchecked_t
15644+ *
15645+ * Atomically adds @i to @v.
15646+ */
15647+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15648+{
15649+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15650+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15651+ ASM_NO_INPUT_CLOBBER("memory"));
15652+ return i;
15653+}
15654+
15655+/**
15656 * atomic64_sub - subtract the atomic64 variable
15657 * @i: integer value to subtract
15658 * @v: pointer to type atomic64_t
15659diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15660index f8d273e..02f39f3 100644
15661--- a/arch/x86/include/asm/atomic64_64.h
15662+++ b/arch/x86/include/asm/atomic64_64.h
15663@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15664 }
15665
15666 /**
15667+ * atomic64_read_unchecked - read atomic64 variable
15668+ * @v: pointer of type atomic64_unchecked_t
15669+ *
15670+ * Atomically reads the value of @v.
15671+ * Doesn't imply a read memory barrier.
15672+ */
15673+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15674+{
15675+ return ACCESS_ONCE((v)->counter);
15676+}
15677+
15678+/**
15679 * atomic64_set - set atomic64 variable
15680 * @v: pointer to type atomic64_t
15681 * @i: required value
15682@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15683 }
15684
15685 /**
15686+ * atomic64_set_unchecked - set atomic64 variable
15687+ * @v: pointer to type atomic64_unchecked_t
15688+ * @i: required value
15689+ *
15690+ * Atomically sets the value of @v to @i.
15691+ */
15692+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15693+{
15694+ v->counter = i;
15695+}
15696+
15697+/**
15698 * atomic64_add - add integer to atomic64 variable
15699 * @i: integer value to add
15700 * @v: pointer to type atomic64_t
15701@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15702 */
15703 static inline void atomic64_add(long i, atomic64_t *v)
15704 {
15705+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15706+
15707+#ifdef CONFIG_PAX_REFCOUNT
15708+ "jno 0f\n"
15709+ LOCK_PREFIX "subq %1,%0\n"
15710+ "int $4\n0:\n"
15711+ _ASM_EXTABLE(0b, 0b)
15712+#endif
15713+
15714+ : "=m" (v->counter)
15715+ : "er" (i), "m" (v->counter));
15716+}
15717+
15718+/**
15719+ * atomic64_add_unchecked - add integer to atomic64 variable
15720+ * @i: integer value to add
15721+ * @v: pointer to type atomic64_unchecked_t
15722+ *
15723+ * Atomically adds @i to @v.
15724+ */
15725+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15726+{
15727 asm volatile(LOCK_PREFIX "addq %1,%0"
15728 : "=m" (v->counter)
15729 : "er" (i), "m" (v->counter));
15730@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15731 */
15732 static inline void atomic64_sub(long i, atomic64_t *v)
15733 {
15734- asm volatile(LOCK_PREFIX "subq %1,%0"
15735+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15736+
15737+#ifdef CONFIG_PAX_REFCOUNT
15738+ "jno 0f\n"
15739+ LOCK_PREFIX "addq %1,%0\n"
15740+ "int $4\n0:\n"
15741+ _ASM_EXTABLE(0b, 0b)
15742+#endif
15743+
15744+ : "=m" (v->counter)
15745+ : "er" (i), "m" (v->counter));
15746+}
15747+
15748+/**
15749+ * atomic64_sub_unchecked - subtract the atomic64 variable
15750+ * @i: integer value to subtract
15751+ * @v: pointer to type atomic64_unchecked_t
15752+ *
15753+ * Atomically subtracts @i from @v.
15754+ */
15755+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15756+{
15757+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15758 : "=m" (v->counter)
15759 : "er" (i), "m" (v->counter));
15760 }
15761@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15762 */
15763 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15764 {
15765- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15766+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15767 }
15768
15769 /**
15770@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15771 */
15772 static inline void atomic64_inc(atomic64_t *v)
15773 {
15774+ asm volatile(LOCK_PREFIX "incq %0\n"
15775+
15776+#ifdef CONFIG_PAX_REFCOUNT
15777+ "jno 0f\n"
15778+ LOCK_PREFIX "decq %0\n"
15779+ "int $4\n0:\n"
15780+ _ASM_EXTABLE(0b, 0b)
15781+#endif
15782+
15783+ : "=m" (v->counter)
15784+ : "m" (v->counter));
15785+}
15786+
15787+/**
15788+ * atomic64_inc_unchecked - increment atomic64 variable
15789+ * @v: pointer to type atomic64_unchecked_t
15790+ *
15791+ * Atomically increments @v by 1.
15792+ */
15793+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15794+{
15795 asm volatile(LOCK_PREFIX "incq %0"
15796 : "=m" (v->counter)
15797 : "m" (v->counter));
15798@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15799 */
15800 static inline void atomic64_dec(atomic64_t *v)
15801 {
15802- asm volatile(LOCK_PREFIX "decq %0"
15803+ asm volatile(LOCK_PREFIX "decq %0\n"
15804+
15805+#ifdef CONFIG_PAX_REFCOUNT
15806+ "jno 0f\n"
15807+ LOCK_PREFIX "incq %0\n"
15808+ "int $4\n0:\n"
15809+ _ASM_EXTABLE(0b, 0b)
15810+#endif
15811+
15812+ : "=m" (v->counter)
15813+ : "m" (v->counter));
15814+}
15815+
15816+/**
15817+ * atomic64_dec_unchecked - decrement atomic64 variable
15818+ * @v: pointer to type atomic64_t
15819+ *
15820+ * Atomically decrements @v by 1.
15821+ */
15822+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15823+{
15824+ asm volatile(LOCK_PREFIX "decq %0\n"
15825 : "=m" (v->counter)
15826 : "m" (v->counter));
15827 }
15828@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15829 */
15830 static inline int atomic64_dec_and_test(atomic64_t *v)
15831 {
15832- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15833+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15834 }
15835
15836 /**
15837@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15838 */
15839 static inline int atomic64_inc_and_test(atomic64_t *v)
15840 {
15841- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15842+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15843 }
15844
15845 /**
15846@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15847 */
15848 static inline int atomic64_add_negative(long i, atomic64_t *v)
15849 {
15850- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15851+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15852 }
15853
15854 /**
15855@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15856 */
15857 static inline long atomic64_add_return(long i, atomic64_t *v)
15858 {
15859+ return i + xadd_check_overflow(&v->counter, i);
15860+}
15861+
15862+/**
15863+ * atomic64_add_return_unchecked - add and return
15864+ * @i: integer value to add
15865+ * @v: pointer to type atomic64_unchecked_t
15866+ *
15867+ * Atomically adds @i to @v and returns @i + @v
15868+ */
15869+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15870+{
15871 return i + xadd(&v->counter, i);
15872 }
15873
15874@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15875 }
15876
15877 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15878+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15879+{
15880+ return atomic64_add_return_unchecked(1, v);
15881+}
15882 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15883
15884 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15885@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15886 return cmpxchg(&v->counter, old, new);
15887 }
15888
15889+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15890+{
15891+ return cmpxchg(&v->counter, old, new);
15892+}
15893+
15894 static inline long atomic64_xchg(atomic64_t *v, long new)
15895 {
15896 return xchg(&v->counter, new);
15897@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15898 */
15899 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15900 {
15901- long c, old;
15902+ long c, old, new;
15903 c = atomic64_read(v);
15904 for (;;) {
15905- if (unlikely(c == (u)))
15906+ if (unlikely(c == u))
15907 break;
15908- old = atomic64_cmpxchg((v), c, c + (a));
15909+
15910+ asm volatile("add %2,%0\n"
15911+
15912+#ifdef CONFIG_PAX_REFCOUNT
15913+ "jno 0f\n"
15914+ "sub %2,%0\n"
15915+ "int $4\n0:\n"
15916+ _ASM_EXTABLE(0b, 0b)
15917+#endif
15918+
15919+ : "=r" (new)
15920+ : "0" (c), "ir" (a));
15921+
15922+ old = atomic64_cmpxchg(v, c, new);
15923 if (likely(old == c))
15924 break;
15925 c = old;
15926 }
15927- return c != (u);
15928+ return c != u;
15929 }
15930
15931 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15932diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
15933index 2ab1eb3..1e8cc5d 100644
15934--- a/arch/x86/include/asm/barrier.h
15935+++ b/arch/x86/include/asm/barrier.h
15936@@ -57,7 +57,7 @@
15937 do { \
15938 compiletime_assert_atomic_type(*p); \
15939 smp_mb(); \
15940- ACCESS_ONCE(*p) = (v); \
15941+ ACCESS_ONCE_RW(*p) = (v); \
15942 } while (0)
15943
15944 #define smp_load_acquire(p) \
15945@@ -74,7 +74,7 @@ do { \
15946 do { \
15947 compiletime_assert_atomic_type(*p); \
15948 barrier(); \
15949- ACCESS_ONCE(*p) = (v); \
15950+ ACCESS_ONCE_RW(*p) = (v); \
15951 } while (0)
15952
15953 #define smp_load_acquire(p) \
15954diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
15955index cfe3b95..d01b118 100644
15956--- a/arch/x86/include/asm/bitops.h
15957+++ b/arch/x86/include/asm/bitops.h
15958@@ -50,7 +50,7 @@
15959 * a mask operation on a byte.
15960 */
15961 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
15962-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
15963+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
15964 #define CONST_MASK(nr) (1 << ((nr) & 7))
15965
15966 /**
15967@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
15968 */
15969 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
15970 {
15971- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15972+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15973 }
15974
15975 /**
15976@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
15977 */
15978 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
15979 {
15980- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15981+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15982 }
15983
15984 /**
15985@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
15986 */
15987 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
15988 {
15989- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15990+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15991 }
15992
15993 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
15994@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
15995 *
15996 * Undefined if no bit exists, so code should check against 0 first.
15997 */
15998-static inline unsigned long __ffs(unsigned long word)
15999+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16000 {
16001 asm("rep; bsf %1,%0"
16002 : "=r" (word)
16003@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16004 *
16005 * Undefined if no zero exists, so code should check against ~0UL first.
16006 */
16007-static inline unsigned long ffz(unsigned long word)
16008+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16009 {
16010 asm("rep; bsf %1,%0"
16011 : "=r" (word)
16012@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16013 *
16014 * Undefined if no set bit exists, so code should check against 0 first.
16015 */
16016-static inline unsigned long __fls(unsigned long word)
16017+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16018 {
16019 asm("bsr %1,%0"
16020 : "=r" (word)
16021@@ -434,7 +434,7 @@ static inline int ffs(int x)
16022 * set bit if value is nonzero. The last (most significant) bit is
16023 * at position 32.
16024 */
16025-static inline int fls(int x)
16026+static inline int __intentional_overflow(-1) fls(int x)
16027 {
16028 int r;
16029
16030@@ -476,7 +476,7 @@ static inline int fls(int x)
16031 * at position 64.
16032 */
16033 #ifdef CONFIG_X86_64
16034-static __always_inline int fls64(__u64 x)
16035+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16036 {
16037 int bitpos = -1;
16038 /*
16039diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16040index 4fa687a..60f2d39 100644
16041--- a/arch/x86/include/asm/boot.h
16042+++ b/arch/x86/include/asm/boot.h
16043@@ -6,10 +6,15 @@
16044 #include <uapi/asm/boot.h>
16045
16046 /* Physical address where kernel should be loaded. */
16047-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16048+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16049 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16050 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16051
16052+#ifndef __ASSEMBLY__
16053+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16054+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16055+#endif
16056+
16057 /* Minimum kernel alignment, as a power of two */
16058 #ifdef CONFIG_X86_64
16059 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16060diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16061index 48f99f1..d78ebf9 100644
16062--- a/arch/x86/include/asm/cache.h
16063+++ b/arch/x86/include/asm/cache.h
16064@@ -5,12 +5,13 @@
16065
16066 /* L1 cache line size */
16067 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16068-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16069+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16070
16071 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16072+#define __read_only __attribute__((__section__(".data..read_only")))
16073
16074 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16075-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16076+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16077
16078 #ifdef CONFIG_X86_VSMP
16079 #ifdef CONFIG_SMP
16080diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16081index 76659b6..72b8439 100644
16082--- a/arch/x86/include/asm/calling.h
16083+++ b/arch/x86/include/asm/calling.h
16084@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16085 #define RSP 152
16086 #define SS 160
16087
16088-#define ARGOFFSET R11
16089-#define SWFRAME ORIG_RAX
16090+#define ARGOFFSET R15
16091
16092 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16093- subq $9*8+\addskip, %rsp
16094- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16095- movq_cfi rdi, 8*8
16096- movq_cfi rsi, 7*8
16097- movq_cfi rdx, 6*8
16098+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16099+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16100+ movq_cfi rdi, RDI
16101+ movq_cfi rsi, RSI
16102+ movq_cfi rdx, RDX
16103
16104 .if \save_rcx
16105- movq_cfi rcx, 5*8
16106+ movq_cfi rcx, RCX
16107 .endif
16108
16109 .if \rax_enosys
16110- movq $-ENOSYS, 4*8(%rsp)
16111+ movq $-ENOSYS, RAX(%rsp)
16112 .else
16113- movq_cfi rax, 4*8
16114+ movq_cfi rax, RAX
16115 .endif
16116
16117 .if \save_r891011
16118- movq_cfi r8, 3*8
16119- movq_cfi r9, 2*8
16120- movq_cfi r10, 1*8
16121- movq_cfi r11, 0*8
16122+ movq_cfi r8, R8
16123+ movq_cfi r9, R9
16124+ movq_cfi r10, R10
16125+ movq_cfi r11, R11
16126 .endif
16127
16128+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16129+ movq_cfi r12, R12
16130+#endif
16131+
16132 .endm
16133
16134-#define ARG_SKIP (9*8)
16135+#define ARG_SKIP ORIG_RAX
16136
16137 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16138 rstor_r8910=1, rstor_rdx=1
16139+
16140+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16141+ movq_cfi_restore R12, r12
16142+#endif
16143+
16144 .if \rstor_r11
16145- movq_cfi_restore 0*8, r11
16146+ movq_cfi_restore R11, r11
16147 .endif
16148
16149 .if \rstor_r8910
16150- movq_cfi_restore 1*8, r10
16151- movq_cfi_restore 2*8, r9
16152- movq_cfi_restore 3*8, r8
16153+ movq_cfi_restore R10, r10
16154+ movq_cfi_restore R9, r9
16155+ movq_cfi_restore R8, r8
16156 .endif
16157
16158 .if \rstor_rax
16159- movq_cfi_restore 4*8, rax
16160+ movq_cfi_restore RAX, rax
16161 .endif
16162
16163 .if \rstor_rcx
16164- movq_cfi_restore 5*8, rcx
16165+ movq_cfi_restore RCX, rcx
16166 .endif
16167
16168 .if \rstor_rdx
16169- movq_cfi_restore 6*8, rdx
16170+ movq_cfi_restore RDX, rdx
16171 .endif
16172
16173- movq_cfi_restore 7*8, rsi
16174- movq_cfi_restore 8*8, rdi
16175+ movq_cfi_restore RSI, rsi
16176+ movq_cfi_restore RDI, rdi
16177
16178- .if ARG_SKIP+\addskip > 0
16179- addq $ARG_SKIP+\addskip, %rsp
16180- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16181+ .if ORIG_RAX+\addskip > 0
16182+ addq $ORIG_RAX+\addskip, %rsp
16183+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16184 .endif
16185 .endm
16186
16187- .macro LOAD_ARGS offset, skiprax=0
16188- movq \offset(%rsp), %r11
16189- movq \offset+8(%rsp), %r10
16190- movq \offset+16(%rsp), %r9
16191- movq \offset+24(%rsp), %r8
16192- movq \offset+40(%rsp), %rcx
16193- movq \offset+48(%rsp), %rdx
16194- movq \offset+56(%rsp), %rsi
16195- movq \offset+64(%rsp), %rdi
16196+ .macro LOAD_ARGS skiprax=0
16197+ movq R11(%rsp), %r11
16198+ movq R10(%rsp), %r10
16199+ movq R9(%rsp), %r9
16200+ movq R8(%rsp), %r8
16201+ movq RCX(%rsp), %rcx
16202+ movq RDX(%rsp), %rdx
16203+ movq RSI(%rsp), %rsi
16204+ movq RDI(%rsp), %rdi
16205 .if \skiprax
16206 .else
16207- movq \offset+72(%rsp), %rax
16208+ movq ORIG_RAX(%rsp), %rax
16209 .endif
16210 .endm
16211
16212-#define REST_SKIP (6*8)
16213-
16214 .macro SAVE_REST
16215- subq $REST_SKIP, %rsp
16216- CFI_ADJUST_CFA_OFFSET REST_SKIP
16217- movq_cfi rbx, 5*8
16218- movq_cfi rbp, 4*8
16219- movq_cfi r12, 3*8
16220- movq_cfi r13, 2*8
16221- movq_cfi r14, 1*8
16222- movq_cfi r15, 0*8
16223+ movq_cfi rbx, RBX
16224+ movq_cfi rbp, RBP
16225+
16226+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16227+ movq_cfi r12, R12
16228+#endif
16229+
16230+ movq_cfi r13, R13
16231+ movq_cfi r14, R14
16232+ movq_cfi r15, R15
16233 .endm
16234
16235 .macro RESTORE_REST
16236- movq_cfi_restore 0*8, r15
16237- movq_cfi_restore 1*8, r14
16238- movq_cfi_restore 2*8, r13
16239- movq_cfi_restore 3*8, r12
16240- movq_cfi_restore 4*8, rbp
16241- movq_cfi_restore 5*8, rbx
16242- addq $REST_SKIP, %rsp
16243- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16244+ movq_cfi_restore R15, r15
16245+ movq_cfi_restore R14, r14
16246+ movq_cfi_restore R13, r13
16247+
16248+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16249+ movq_cfi_restore R12, r12
16250+#endif
16251+
16252+ movq_cfi_restore RBP, rbp
16253+ movq_cfi_restore RBX, rbx
16254 .endm
16255
16256 .macro SAVE_ALL
16257diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16258index f50de69..2b0a458 100644
16259--- a/arch/x86/include/asm/checksum_32.h
16260+++ b/arch/x86/include/asm/checksum_32.h
16261@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16262 int len, __wsum sum,
16263 int *src_err_ptr, int *dst_err_ptr);
16264
16265+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16266+ int len, __wsum sum,
16267+ int *src_err_ptr, int *dst_err_ptr);
16268+
16269+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16270+ int len, __wsum sum,
16271+ int *src_err_ptr, int *dst_err_ptr);
16272+
16273 /*
16274 * Note: when you get a NULL pointer exception here this means someone
16275 * passed in an incorrect kernel address to one of these functions.
16276@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16277
16278 might_sleep();
16279 stac();
16280- ret = csum_partial_copy_generic((__force void *)src, dst,
16281+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16282 len, sum, err_ptr, NULL);
16283 clac();
16284
16285@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16286 might_sleep();
16287 if (access_ok(VERIFY_WRITE, dst, len)) {
16288 stac();
16289- ret = csum_partial_copy_generic(src, (__force void *)dst,
16290+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16291 len, sum, NULL, err_ptr);
16292 clac();
16293 return ret;
16294diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16295index 99c105d7..2f667ac 100644
16296--- a/arch/x86/include/asm/cmpxchg.h
16297+++ b/arch/x86/include/asm/cmpxchg.h
16298@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16299 __compiletime_error("Bad argument size for cmpxchg");
16300 extern void __xadd_wrong_size(void)
16301 __compiletime_error("Bad argument size for xadd");
16302+extern void __xadd_check_overflow_wrong_size(void)
16303+ __compiletime_error("Bad argument size for xadd_check_overflow");
16304 extern void __add_wrong_size(void)
16305 __compiletime_error("Bad argument size for add");
16306+extern void __add_check_overflow_wrong_size(void)
16307+ __compiletime_error("Bad argument size for add_check_overflow");
16308
16309 /*
16310 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16311@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16312 __ret; \
16313 })
16314
16315+#ifdef CONFIG_PAX_REFCOUNT
16316+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16317+ ({ \
16318+ __typeof__ (*(ptr)) __ret = (arg); \
16319+ switch (sizeof(*(ptr))) { \
16320+ case __X86_CASE_L: \
16321+ asm volatile (lock #op "l %0, %1\n" \
16322+ "jno 0f\n" \
16323+ "mov %0,%1\n" \
16324+ "int $4\n0:\n" \
16325+ _ASM_EXTABLE(0b, 0b) \
16326+ : "+r" (__ret), "+m" (*(ptr)) \
16327+ : : "memory", "cc"); \
16328+ break; \
16329+ case __X86_CASE_Q: \
16330+ asm volatile (lock #op "q %q0, %1\n" \
16331+ "jno 0f\n" \
16332+ "mov %0,%1\n" \
16333+ "int $4\n0:\n" \
16334+ _ASM_EXTABLE(0b, 0b) \
16335+ : "+r" (__ret), "+m" (*(ptr)) \
16336+ : : "memory", "cc"); \
16337+ break; \
16338+ default: \
16339+ __ ## op ## _check_overflow_wrong_size(); \
16340+ } \
16341+ __ret; \
16342+ })
16343+#else
16344+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16345+#endif
16346+
16347 /*
16348 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16349 * Since this is generally used to protect other memory information, we
16350@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16351 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16352 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16353
16354+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16355+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16356+
16357 #define __add(ptr, inc, lock) \
16358 ({ \
16359 __typeof__ (*(ptr)) __ret = (inc); \
16360diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16361index 59c6c40..5e0b22c 100644
16362--- a/arch/x86/include/asm/compat.h
16363+++ b/arch/x86/include/asm/compat.h
16364@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16365 typedef u32 compat_uint_t;
16366 typedef u32 compat_ulong_t;
16367 typedef u64 __attribute__((aligned(4))) compat_u64;
16368-typedef u32 compat_uptr_t;
16369+typedef u32 __user compat_uptr_t;
16370
16371 struct compat_timespec {
16372 compat_time_t tv_sec;
16373diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16374index aede2c3..40d7a8f 100644
16375--- a/arch/x86/include/asm/cpufeature.h
16376+++ b/arch/x86/include/asm/cpufeature.h
16377@@ -212,7 +212,7 @@
16378 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16379 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16380 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16381-
16382+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16383
16384 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16385 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16386@@ -220,7 +220,7 @@
16387 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16388 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16389 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16390-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16391+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16392 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16393 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16394 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16395@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16396 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
16397 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16398 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16399+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16400
16401 #if __GNUC__ >= 4
16402 extern void warn_pre_alternatives(void);
16403@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16404
16405 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16406 t_warn:
16407- warn_pre_alternatives();
16408+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16409+ warn_pre_alternatives();
16410 return false;
16411 #endif
16412
16413@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16414 ".section .discard,\"aw\",@progbits\n"
16415 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16416 ".previous\n"
16417- ".section .altinstr_replacement,\"ax\"\n"
16418+ ".section .altinstr_replacement,\"a\"\n"
16419 "3: movb $1,%0\n"
16420 "4:\n"
16421 ".previous\n"
16422@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16423 " .byte 2b - 1b\n" /* src len */
16424 " .byte 4f - 3f\n" /* repl len */
16425 ".previous\n"
16426- ".section .altinstr_replacement,\"ax\"\n"
16427+ ".section .altinstr_replacement,\"a\"\n"
16428 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16429 "4:\n"
16430 ".previous\n"
16431@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16432 ".section .discard,\"aw\",@progbits\n"
16433 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16434 ".previous\n"
16435- ".section .altinstr_replacement,\"ax\"\n"
16436+ ".section .altinstr_replacement,\"a\"\n"
16437 "3: movb $0,%0\n"
16438 "4:\n"
16439 ".previous\n"
16440@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16441 ".section .discard,\"aw\",@progbits\n"
16442 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16443 ".previous\n"
16444- ".section .altinstr_replacement,\"ax\"\n"
16445+ ".section .altinstr_replacement,\"a\"\n"
16446 "5: movb $1,%0\n"
16447 "6:\n"
16448 ".previous\n"
16449diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16450index a94b82e..59ecefa 100644
16451--- a/arch/x86/include/asm/desc.h
16452+++ b/arch/x86/include/asm/desc.h
16453@@ -4,6 +4,7 @@
16454 #include <asm/desc_defs.h>
16455 #include <asm/ldt.h>
16456 #include <asm/mmu.h>
16457+#include <asm/pgtable.h>
16458
16459 #include <linux/smp.h>
16460 #include <linux/percpu.h>
16461@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16462
16463 desc->type = (info->read_exec_only ^ 1) << 1;
16464 desc->type |= info->contents << 2;
16465+ desc->type |= info->seg_not_present ^ 1;
16466
16467 desc->s = 1;
16468 desc->dpl = 0x3;
16469@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16470 }
16471
16472 extern struct desc_ptr idt_descr;
16473-extern gate_desc idt_table[];
16474-extern struct desc_ptr debug_idt_descr;
16475-extern gate_desc debug_idt_table[];
16476-
16477-struct gdt_page {
16478- struct desc_struct gdt[GDT_ENTRIES];
16479-} __attribute__((aligned(PAGE_SIZE)));
16480-
16481-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16482+extern gate_desc idt_table[IDT_ENTRIES];
16483+extern const struct desc_ptr debug_idt_descr;
16484+extern gate_desc debug_idt_table[IDT_ENTRIES];
16485
16486+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16487 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16488 {
16489- return per_cpu(gdt_page, cpu).gdt;
16490+ return cpu_gdt_table[cpu];
16491 }
16492
16493 #ifdef CONFIG_X86_64
16494@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16495 unsigned long base, unsigned dpl, unsigned flags,
16496 unsigned short seg)
16497 {
16498- gate->a = (seg << 16) | (base & 0xffff);
16499- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16500+ gate->gate.offset_low = base;
16501+ gate->gate.seg = seg;
16502+ gate->gate.reserved = 0;
16503+ gate->gate.type = type;
16504+ gate->gate.s = 0;
16505+ gate->gate.dpl = dpl;
16506+ gate->gate.p = 1;
16507+ gate->gate.offset_high = base >> 16;
16508 }
16509
16510 #endif
16511@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16512
16513 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16514 {
16515+ pax_open_kernel();
16516 memcpy(&idt[entry], gate, sizeof(*gate));
16517+ pax_close_kernel();
16518 }
16519
16520 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16521 {
16522+ pax_open_kernel();
16523 memcpy(&ldt[entry], desc, 8);
16524+ pax_close_kernel();
16525 }
16526
16527 static inline void
16528@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16529 default: size = sizeof(*gdt); break;
16530 }
16531
16532+ pax_open_kernel();
16533 memcpy(&gdt[entry], desc, size);
16534+ pax_close_kernel();
16535 }
16536
16537 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16538@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16539
16540 static inline void native_load_tr_desc(void)
16541 {
16542+ pax_open_kernel();
16543 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16544+ pax_close_kernel();
16545 }
16546
16547 static inline void native_load_gdt(const struct desc_ptr *dtr)
16548@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16549 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16550 unsigned int i;
16551
16552+ pax_open_kernel();
16553 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16554 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16555+ pax_close_kernel();
16556 }
16557
16558 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16559@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16560 preempt_enable();
16561 }
16562
16563-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16564+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16565 {
16566 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16567 }
16568@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16569 }
16570
16571 #ifdef CONFIG_X86_64
16572-static inline void set_nmi_gate(int gate, void *addr)
16573+static inline void set_nmi_gate(int gate, const void *addr)
16574 {
16575 gate_desc s;
16576
16577@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16578 #endif
16579
16580 #ifdef CONFIG_TRACING
16581-extern struct desc_ptr trace_idt_descr;
16582-extern gate_desc trace_idt_table[];
16583+extern const struct desc_ptr trace_idt_descr;
16584+extern gate_desc trace_idt_table[IDT_ENTRIES];
16585 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16586 {
16587 write_idt_entry(trace_idt_table, entry, gate);
16588 }
16589
16590-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16591+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16592 unsigned dpl, unsigned ist, unsigned seg)
16593 {
16594 gate_desc s;
16595@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16596 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16597 #endif
16598
16599-static inline void _set_gate(int gate, unsigned type, void *addr,
16600+static inline void _set_gate(int gate, unsigned type, const void *addr,
16601 unsigned dpl, unsigned ist, unsigned seg)
16602 {
16603 gate_desc s;
16604@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16605 #define set_intr_gate(n, addr) \
16606 do { \
16607 BUG_ON((unsigned)n > 0xFF); \
16608- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16609+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16610 __KERNEL_CS); \
16611- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16612+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16613 0, 0, __KERNEL_CS); \
16614 } while (0)
16615
16616@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16617 /*
16618 * This routine sets up an interrupt gate at directory privilege level 3.
16619 */
16620-static inline void set_system_intr_gate(unsigned int n, void *addr)
16621+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16622 {
16623 BUG_ON((unsigned)n > 0xFF);
16624 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16625 }
16626
16627-static inline void set_system_trap_gate(unsigned int n, void *addr)
16628+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16629 {
16630 BUG_ON((unsigned)n > 0xFF);
16631 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16632 }
16633
16634-static inline void set_trap_gate(unsigned int n, void *addr)
16635+static inline void set_trap_gate(unsigned int n, const void *addr)
16636 {
16637 BUG_ON((unsigned)n > 0xFF);
16638 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16639@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16640 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16641 {
16642 BUG_ON((unsigned)n > 0xFF);
16643- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16644+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16645 }
16646
16647-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16648+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16649 {
16650 BUG_ON((unsigned)n > 0xFF);
16651 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16652 }
16653
16654-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16655+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16656 {
16657 BUG_ON((unsigned)n > 0xFF);
16658 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16659@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16660 else
16661 load_idt((const struct desc_ptr *)&idt_descr);
16662 }
16663+
16664+#ifdef CONFIG_X86_32
16665+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16666+{
16667+ struct desc_struct d;
16668+
16669+ if (likely(limit))
16670+ limit = (limit - 1UL) >> PAGE_SHIFT;
16671+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16672+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16673+}
16674+#endif
16675+
16676 #endif /* _ASM_X86_DESC_H */
16677diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16678index 278441f..b95a174 100644
16679--- a/arch/x86/include/asm/desc_defs.h
16680+++ b/arch/x86/include/asm/desc_defs.h
16681@@ -31,6 +31,12 @@ struct desc_struct {
16682 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16683 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16684 };
16685+ struct {
16686+ u16 offset_low;
16687+ u16 seg;
16688+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16689+ unsigned offset_high: 16;
16690+ } gate;
16691 };
16692 } __attribute__((packed));
16693
16694diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16695index ced283a..ffe04cc 100644
16696--- a/arch/x86/include/asm/div64.h
16697+++ b/arch/x86/include/asm/div64.h
16698@@ -39,7 +39,7 @@
16699 __mod; \
16700 })
16701
16702-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16703+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16704 {
16705 union {
16706 u64 v64;
16707diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16708index ca3347a..1a5082a 100644
16709--- a/arch/x86/include/asm/elf.h
16710+++ b/arch/x86/include/asm/elf.h
16711@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16712
16713 #include <asm/vdso.h>
16714
16715-#ifdef CONFIG_X86_64
16716-extern unsigned int vdso64_enabled;
16717-#endif
16718 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16719 extern unsigned int vdso32_enabled;
16720 #endif
16721@@ -249,7 +246,25 @@ extern int force_personality32;
16722 the loader. We need to make sure that it is out of the way of the program
16723 that it will "exec", and that there is sufficient room for the brk. */
16724
16725+#ifdef CONFIG_PAX_SEGMEXEC
16726+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16727+#else
16728 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16729+#endif
16730+
16731+#ifdef CONFIG_PAX_ASLR
16732+#ifdef CONFIG_X86_32
16733+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16734+
16735+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16736+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16737+#else
16738+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16739+
16740+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16741+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16742+#endif
16743+#endif
16744
16745 /* This yields a mask that user programs can use to figure out what
16746 instruction set this CPU supports. This could be done in user space,
16747@@ -298,17 +313,13 @@ do { \
16748
16749 #define ARCH_DLINFO \
16750 do { \
16751- if (vdso64_enabled) \
16752- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16753- (unsigned long __force)current->mm->context.vdso); \
16754+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16755 } while (0)
16756
16757 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16758 #define ARCH_DLINFO_X32 \
16759 do { \
16760- if (vdso64_enabled) \
16761- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16762- (unsigned long __force)current->mm->context.vdso); \
16763+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16764 } while (0)
16765
16766 #define AT_SYSINFO 32
16767@@ -323,10 +334,10 @@ else \
16768
16769 #endif /* !CONFIG_X86_32 */
16770
16771-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16772+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16773
16774 #define VDSO_ENTRY \
16775- ((unsigned long)current->mm->context.vdso + \
16776+ (current->mm->context.vdso + \
16777 selected_vdso32->sym___kernel_vsyscall)
16778
16779 struct linux_binprm;
16780@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16781 int uses_interp);
16782 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16783
16784-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16785-#define arch_randomize_brk arch_randomize_brk
16786-
16787 /*
16788 * True on X86_32 or when emulating IA32 on X86_64
16789 */
16790diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16791index 77a99ac..39ff7f5 100644
16792--- a/arch/x86/include/asm/emergency-restart.h
16793+++ b/arch/x86/include/asm/emergency-restart.h
16794@@ -1,6 +1,6 @@
16795 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16796 #define _ASM_X86_EMERGENCY_RESTART_H
16797
16798-extern void machine_emergency_restart(void);
16799+extern void machine_emergency_restart(void) __noreturn;
16800
16801 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16802diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16803index 1c7eefe..d0e4702 100644
16804--- a/arch/x86/include/asm/floppy.h
16805+++ b/arch/x86/include/asm/floppy.h
16806@@ -229,18 +229,18 @@ static struct fd_routine_l {
16807 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16808 } fd_routine[] = {
16809 {
16810- request_dma,
16811- free_dma,
16812- get_dma_residue,
16813- dma_mem_alloc,
16814- hard_dma_setup
16815+ ._request_dma = request_dma,
16816+ ._free_dma = free_dma,
16817+ ._get_dma_residue = get_dma_residue,
16818+ ._dma_mem_alloc = dma_mem_alloc,
16819+ ._dma_setup = hard_dma_setup
16820 },
16821 {
16822- vdma_request_dma,
16823- vdma_nop,
16824- vdma_get_dma_residue,
16825- vdma_mem_alloc,
16826- vdma_dma_setup
16827+ ._request_dma = vdma_request_dma,
16828+ ._free_dma = vdma_nop,
16829+ ._get_dma_residue = vdma_get_dma_residue,
16830+ ._dma_mem_alloc = vdma_mem_alloc,
16831+ ._dma_setup = vdma_dma_setup
16832 }
16833 };
16834
16835diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16836index e97622f..d0ba77a 100644
16837--- a/arch/x86/include/asm/fpu-internal.h
16838+++ b/arch/x86/include/asm/fpu-internal.h
16839@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16840 #define user_insn(insn, output, input...) \
16841 ({ \
16842 int err; \
16843+ pax_open_userland(); \
16844 asm volatile(ASM_STAC "\n" \
16845- "1:" #insn "\n\t" \
16846+ "1:" \
16847+ __copyuser_seg \
16848+ #insn "\n\t" \
16849 "2: " ASM_CLAC "\n" \
16850 ".section .fixup,\"ax\"\n" \
16851 "3: movl $-1,%[err]\n" \
16852@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16853 _ASM_EXTABLE(1b, 3b) \
16854 : [err] "=r" (err), output \
16855 : "0"(0), input); \
16856+ pax_close_userland(); \
16857 err; \
16858 })
16859
16860@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16861 "fnclex\n\t"
16862 "emms\n\t"
16863 "fildl %P[addr]" /* set F?P to defined value */
16864- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16865+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16866 }
16867
16868 return fpu_restore_checking(&tsk->thread.fpu);
16869diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16870index b4c1f54..e290c08 100644
16871--- a/arch/x86/include/asm/futex.h
16872+++ b/arch/x86/include/asm/futex.h
16873@@ -12,6 +12,7 @@
16874 #include <asm/smap.h>
16875
16876 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16877+ typecheck(u32 __user *, uaddr); \
16878 asm volatile("\t" ASM_STAC "\n" \
16879 "1:\t" insn "\n" \
16880 "2:\t" ASM_CLAC "\n" \
16881@@ -20,15 +21,16 @@
16882 "\tjmp\t2b\n" \
16883 "\t.previous\n" \
16884 _ASM_EXTABLE(1b, 3b) \
16885- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16886+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16887 : "i" (-EFAULT), "0" (oparg), "1" (0))
16888
16889 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16890+ typecheck(u32 __user *, uaddr); \
16891 asm volatile("\t" ASM_STAC "\n" \
16892 "1:\tmovl %2, %0\n" \
16893 "\tmovl\t%0, %3\n" \
16894 "\t" insn "\n" \
16895- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16896+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16897 "\tjnz\t1b\n" \
16898 "3:\t" ASM_CLAC "\n" \
16899 "\t.section .fixup,\"ax\"\n" \
16900@@ -38,7 +40,7 @@
16901 _ASM_EXTABLE(1b, 4b) \
16902 _ASM_EXTABLE(2b, 4b) \
16903 : "=&a" (oldval), "=&r" (ret), \
16904- "+m" (*uaddr), "=&r" (tem) \
16905+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16906 : "r" (oparg), "i" (-EFAULT), "1" (0))
16907
16908 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16909@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16910
16911 pagefault_disable();
16912
16913+ pax_open_userland();
16914 switch (op) {
16915 case FUTEX_OP_SET:
16916- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16917+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16918 break;
16919 case FUTEX_OP_ADD:
16920- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16921+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16922 uaddr, oparg);
16923 break;
16924 case FUTEX_OP_OR:
16925@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16926 default:
16927 ret = -ENOSYS;
16928 }
16929+ pax_close_userland();
16930
16931 pagefault_enable();
16932
16933diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16934index 9662290..49ca5e5 100644
16935--- a/arch/x86/include/asm/hw_irq.h
16936+++ b/arch/x86/include/asm/hw_irq.h
16937@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
16938 #endif /* CONFIG_X86_LOCAL_APIC */
16939
16940 /* Statistics */
16941-extern atomic_t irq_err_count;
16942-extern atomic_t irq_mis_count;
16943+extern atomic_unchecked_t irq_err_count;
16944+extern atomic_unchecked_t irq_mis_count;
16945
16946 /* EISA */
16947 extern void eisa_set_level_irq(unsigned int irq);
16948diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
16949index ccffa53..3c90c87 100644
16950--- a/arch/x86/include/asm/i8259.h
16951+++ b/arch/x86/include/asm/i8259.h
16952@@ -62,7 +62,7 @@ struct legacy_pic {
16953 void (*init)(int auto_eoi);
16954 int (*irq_pending)(unsigned int irq);
16955 void (*make_irq)(unsigned int irq);
16956-};
16957+} __do_const;
16958
16959 extern struct legacy_pic *legacy_pic;
16960 extern struct legacy_pic null_legacy_pic;
16961diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
16962index 34a5b93..27e40a6 100644
16963--- a/arch/x86/include/asm/io.h
16964+++ b/arch/x86/include/asm/io.h
16965@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
16966 "m" (*(volatile type __force *)addr) barrier); }
16967
16968 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
16969-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
16970-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
16971+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
16972+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
16973
16974 build_mmio_read(__readb, "b", unsigned char, "=q", )
16975-build_mmio_read(__readw, "w", unsigned short, "=r", )
16976-build_mmio_read(__readl, "l", unsigned int, "=r", )
16977+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
16978+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
16979
16980 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
16981 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
16982@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
16983 * this function
16984 */
16985
16986-static inline phys_addr_t virt_to_phys(volatile void *address)
16987+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
16988 {
16989 return __pa(address);
16990 }
16991@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
16992 return ioremap_nocache(offset, size);
16993 }
16994
16995-extern void iounmap(volatile void __iomem *addr);
16996+extern void iounmap(const volatile void __iomem *addr);
16997
16998 extern void set_iounmap_nonlazy(void);
16999
17000@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17001
17002 #include <linux/vmalloc.h>
17003
17004+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17005+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17006+{
17007+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17008+}
17009+
17010+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17011+{
17012+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17013+}
17014+
17015 /*
17016 * Convert a virtual cached pointer to an uncached pointer
17017 */
17018diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17019index 0a8b519..80e7d5b 100644
17020--- a/arch/x86/include/asm/irqflags.h
17021+++ b/arch/x86/include/asm/irqflags.h
17022@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17023 sti; \
17024 sysexit
17025
17026+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17027+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17028+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17029+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17030+
17031 #else
17032 #define INTERRUPT_RETURN iret
17033 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17034diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17035index 4421b5d..8543006 100644
17036--- a/arch/x86/include/asm/kprobes.h
17037+++ b/arch/x86/include/asm/kprobes.h
17038@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17039 #define RELATIVEJUMP_SIZE 5
17040 #define RELATIVECALL_OPCODE 0xe8
17041 #define RELATIVE_ADDR_SIZE 4
17042-#define MAX_STACK_SIZE 64
17043-#define MIN_STACK_SIZE(ADDR) \
17044- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17045- THREAD_SIZE - (unsigned long)(ADDR))) \
17046- ? (MAX_STACK_SIZE) \
17047- : (((unsigned long)current_thread_info()) + \
17048- THREAD_SIZE - (unsigned long)(ADDR)))
17049+#define MAX_STACK_SIZE 64UL
17050+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17051
17052 #define flush_insn_slot(p) do { } while (0)
17053
17054diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17055index d89c6b8..e711c69 100644
17056--- a/arch/x86/include/asm/kvm_host.h
17057+++ b/arch/x86/include/asm/kvm_host.h
17058@@ -51,7 +51,7 @@
17059 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
17060
17061 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
17062-#define CR3_PCID_INVD (1UL << 63)
17063+#define CR3_PCID_INVD (1ULL << 63)
17064 #define CR4_RESERVED_BITS \
17065 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
17066 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
17067diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17068index 4ad6560..75c7bdd 100644
17069--- a/arch/x86/include/asm/local.h
17070+++ b/arch/x86/include/asm/local.h
17071@@ -10,33 +10,97 @@ typedef struct {
17072 atomic_long_t a;
17073 } local_t;
17074
17075+typedef struct {
17076+ atomic_long_unchecked_t a;
17077+} local_unchecked_t;
17078+
17079 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17080
17081 #define local_read(l) atomic_long_read(&(l)->a)
17082+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17083 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17084+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17085
17086 static inline void local_inc(local_t *l)
17087 {
17088- asm volatile(_ASM_INC "%0"
17089+ asm volatile(_ASM_INC "%0\n"
17090+
17091+#ifdef CONFIG_PAX_REFCOUNT
17092+ "jno 0f\n"
17093+ _ASM_DEC "%0\n"
17094+ "int $4\n0:\n"
17095+ _ASM_EXTABLE(0b, 0b)
17096+#endif
17097+
17098+ : "+m" (l->a.counter));
17099+}
17100+
17101+static inline void local_inc_unchecked(local_unchecked_t *l)
17102+{
17103+ asm volatile(_ASM_INC "%0\n"
17104 : "+m" (l->a.counter));
17105 }
17106
17107 static inline void local_dec(local_t *l)
17108 {
17109- asm volatile(_ASM_DEC "%0"
17110+ asm volatile(_ASM_DEC "%0\n"
17111+
17112+#ifdef CONFIG_PAX_REFCOUNT
17113+ "jno 0f\n"
17114+ _ASM_INC "%0\n"
17115+ "int $4\n0:\n"
17116+ _ASM_EXTABLE(0b, 0b)
17117+#endif
17118+
17119+ : "+m" (l->a.counter));
17120+}
17121+
17122+static inline void local_dec_unchecked(local_unchecked_t *l)
17123+{
17124+ asm volatile(_ASM_DEC "%0\n"
17125 : "+m" (l->a.counter));
17126 }
17127
17128 static inline void local_add(long i, local_t *l)
17129 {
17130- asm volatile(_ASM_ADD "%1,%0"
17131+ asm volatile(_ASM_ADD "%1,%0\n"
17132+
17133+#ifdef CONFIG_PAX_REFCOUNT
17134+ "jno 0f\n"
17135+ _ASM_SUB "%1,%0\n"
17136+ "int $4\n0:\n"
17137+ _ASM_EXTABLE(0b, 0b)
17138+#endif
17139+
17140+ : "+m" (l->a.counter)
17141+ : "ir" (i));
17142+}
17143+
17144+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17145+{
17146+ asm volatile(_ASM_ADD "%1,%0\n"
17147 : "+m" (l->a.counter)
17148 : "ir" (i));
17149 }
17150
17151 static inline void local_sub(long i, local_t *l)
17152 {
17153- asm volatile(_ASM_SUB "%1,%0"
17154+ asm volatile(_ASM_SUB "%1,%0\n"
17155+
17156+#ifdef CONFIG_PAX_REFCOUNT
17157+ "jno 0f\n"
17158+ _ASM_ADD "%1,%0\n"
17159+ "int $4\n0:\n"
17160+ _ASM_EXTABLE(0b, 0b)
17161+#endif
17162+
17163+ : "+m" (l->a.counter)
17164+ : "ir" (i));
17165+}
17166+
17167+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17168+{
17169+ asm volatile(_ASM_SUB "%1,%0\n"
17170 : "+m" (l->a.counter)
17171 : "ir" (i));
17172 }
17173@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17174 */
17175 static inline int local_sub_and_test(long i, local_t *l)
17176 {
17177- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17178+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17179 }
17180
17181 /**
17182@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17183 */
17184 static inline int local_dec_and_test(local_t *l)
17185 {
17186- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17187+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17188 }
17189
17190 /**
17191@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17192 */
17193 static inline int local_inc_and_test(local_t *l)
17194 {
17195- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17196+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17197 }
17198
17199 /**
17200@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17201 */
17202 static inline int local_add_negative(long i, local_t *l)
17203 {
17204- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17205+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17206 }
17207
17208 /**
17209@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17210 static inline long local_add_return(long i, local_t *l)
17211 {
17212 long __i = i;
17213+ asm volatile(_ASM_XADD "%0, %1\n"
17214+
17215+#ifdef CONFIG_PAX_REFCOUNT
17216+ "jno 0f\n"
17217+ _ASM_MOV "%0,%1\n"
17218+ "int $4\n0:\n"
17219+ _ASM_EXTABLE(0b, 0b)
17220+#endif
17221+
17222+ : "+r" (i), "+m" (l->a.counter)
17223+ : : "memory");
17224+ return i + __i;
17225+}
17226+
17227+/**
17228+ * local_add_return_unchecked - add and return
17229+ * @i: integer value to add
17230+ * @l: pointer to type local_unchecked_t
17231+ *
17232+ * Atomically adds @i to @l and returns @i + @l
17233+ */
17234+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17235+{
17236+ long __i = i;
17237 asm volatile(_ASM_XADD "%0, %1;"
17238 : "+r" (i), "+m" (l->a.counter)
17239 : : "memory");
17240@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17241
17242 #define local_cmpxchg(l, o, n) \
17243 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17244+#define local_cmpxchg_unchecked(l, o, n) \
17245+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17246 /* Always has a lock prefix */
17247 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17248
17249diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17250new file mode 100644
17251index 0000000..2bfd3ba
17252--- /dev/null
17253+++ b/arch/x86/include/asm/mman.h
17254@@ -0,0 +1,15 @@
17255+#ifndef _X86_MMAN_H
17256+#define _X86_MMAN_H
17257+
17258+#include <uapi/asm/mman.h>
17259+
17260+#ifdef __KERNEL__
17261+#ifndef __ASSEMBLY__
17262+#ifdef CONFIG_X86_32
17263+#define arch_mmap_check i386_mmap_check
17264+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17265+#endif
17266+#endif
17267+#endif
17268+
17269+#endif /* X86_MMAN_H */
17270diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17271index 876e74e..e20bfb1 100644
17272--- a/arch/x86/include/asm/mmu.h
17273+++ b/arch/x86/include/asm/mmu.h
17274@@ -9,7 +9,7 @@
17275 * we put the segment information here.
17276 */
17277 typedef struct {
17278- void *ldt;
17279+ struct desc_struct *ldt;
17280 int size;
17281
17282 #ifdef CONFIG_X86_64
17283@@ -18,7 +18,19 @@ typedef struct {
17284 #endif
17285
17286 struct mutex lock;
17287- void __user *vdso;
17288+ unsigned long vdso;
17289+
17290+#ifdef CONFIG_X86_32
17291+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17292+ unsigned long user_cs_base;
17293+ unsigned long user_cs_limit;
17294+
17295+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17296+ cpumask_t cpu_user_cs_mask;
17297+#endif
17298+
17299+#endif
17300+#endif
17301 } mm_context_t;
17302
17303 #ifdef CONFIG_SMP
17304diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17305index 4b75d59..8ffacb6 100644
17306--- a/arch/x86/include/asm/mmu_context.h
17307+++ b/arch/x86/include/asm/mmu_context.h
17308@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
17309
17310 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17311 {
17312+
17313+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17314+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17315+ unsigned int i;
17316+ pgd_t *pgd;
17317+
17318+ pax_open_kernel();
17319+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17320+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17321+ set_pgd_batched(pgd+i, native_make_pgd(0));
17322+ pax_close_kernel();
17323+ }
17324+#endif
17325+
17326 #ifdef CONFIG_SMP
17327 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17328 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17329@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17330 struct task_struct *tsk)
17331 {
17332 unsigned cpu = smp_processor_id();
17333+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17334+ int tlbstate = TLBSTATE_OK;
17335+#endif
17336
17337 if (likely(prev != next)) {
17338 #ifdef CONFIG_SMP
17339+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17340+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17341+#endif
17342 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17343 this_cpu_write(cpu_tlbstate.active_mm, next);
17344 #endif
17345 cpumask_set_cpu(cpu, mm_cpumask(next));
17346
17347 /* Re-load page tables */
17348+#ifdef CONFIG_PAX_PER_CPU_PGD
17349+ pax_open_kernel();
17350+
17351+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17352+ if (static_cpu_has(X86_FEATURE_PCID))
17353+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17354+ else
17355+#endif
17356+
17357+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17358+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17359+ pax_close_kernel();
17360+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17361+
17362+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17363+ if (static_cpu_has(X86_FEATURE_PCID)) {
17364+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17365+ u64 descriptor[2];
17366+ descriptor[0] = PCID_USER;
17367+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17368+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17369+ descriptor[0] = PCID_KERNEL;
17370+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17371+ }
17372+ } else {
17373+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17374+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17375+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17376+ else
17377+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17378+ }
17379+ } else
17380+#endif
17381+
17382+ load_cr3(get_cpu_pgd(cpu, kernel));
17383+#else
17384 load_cr3(next->pgd);
17385+#endif
17386 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17387
17388 /* Stop flush ipis for the previous mm */
17389@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17390 */
17391 if (unlikely(prev->context.ldt != next->context.ldt))
17392 load_LDT_nolock(&next->context);
17393+
17394+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17395+ if (!(__supported_pte_mask & _PAGE_NX)) {
17396+ smp_mb__before_atomic();
17397+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17398+ smp_mb__after_atomic();
17399+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17400+ }
17401+#endif
17402+
17403+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17404+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17405+ prev->context.user_cs_limit != next->context.user_cs_limit))
17406+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17407+#ifdef CONFIG_SMP
17408+ else if (unlikely(tlbstate != TLBSTATE_OK))
17409+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17410+#endif
17411+#endif
17412+
17413 }
17414+ else {
17415+
17416+#ifdef CONFIG_PAX_PER_CPU_PGD
17417+ pax_open_kernel();
17418+
17419+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17420+ if (static_cpu_has(X86_FEATURE_PCID))
17421+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17422+ else
17423+#endif
17424+
17425+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17426+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17427+ pax_close_kernel();
17428+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17429+
17430+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17431+ if (static_cpu_has(X86_FEATURE_PCID)) {
17432+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17433+ u64 descriptor[2];
17434+ descriptor[0] = PCID_USER;
17435+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17436+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17437+ descriptor[0] = PCID_KERNEL;
17438+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17439+ }
17440+ } else {
17441+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17442+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17443+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17444+ else
17445+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17446+ }
17447+ } else
17448+#endif
17449+
17450+ load_cr3(get_cpu_pgd(cpu, kernel));
17451+#endif
17452+
17453 #ifdef CONFIG_SMP
17454- else {
17455 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17456 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17457
17458@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17459 * tlb flush IPI delivery. We must reload CR3
17460 * to make sure to use no freed page tables.
17461 */
17462+
17463+#ifndef CONFIG_PAX_PER_CPU_PGD
17464 load_cr3(next->pgd);
17465 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17466+#endif
17467+
17468 load_LDT_nolock(&next->context);
17469+
17470+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17471+ if (!(__supported_pte_mask & _PAGE_NX))
17472+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17473+#endif
17474+
17475+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17476+#ifdef CONFIG_PAX_PAGEEXEC
17477+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17478+#endif
17479+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17480+#endif
17481+
17482 }
17483+#endif
17484 }
17485-#endif
17486 }
17487
17488 #define activate_mm(prev, next) \
17489diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17490index e3b7819..b257c64 100644
17491--- a/arch/x86/include/asm/module.h
17492+++ b/arch/x86/include/asm/module.h
17493@@ -5,6 +5,7 @@
17494
17495 #ifdef CONFIG_X86_64
17496 /* X86_64 does not define MODULE_PROC_FAMILY */
17497+#define MODULE_PROC_FAMILY ""
17498 #elif defined CONFIG_M486
17499 #define MODULE_PROC_FAMILY "486 "
17500 #elif defined CONFIG_M586
17501@@ -57,8 +58,20 @@
17502 #error unknown processor family
17503 #endif
17504
17505-#ifdef CONFIG_X86_32
17506-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17507+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17508+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17509+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17510+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17511+#else
17512+#define MODULE_PAX_KERNEXEC ""
17513 #endif
17514
17515+#ifdef CONFIG_PAX_MEMORY_UDEREF
17516+#define MODULE_PAX_UDEREF "UDEREF "
17517+#else
17518+#define MODULE_PAX_UDEREF ""
17519+#endif
17520+
17521+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17522+
17523 #endif /* _ASM_X86_MODULE_H */
17524diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17525index 5f2fc44..106caa6 100644
17526--- a/arch/x86/include/asm/nmi.h
17527+++ b/arch/x86/include/asm/nmi.h
17528@@ -36,26 +36,35 @@ enum {
17529
17530 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17531
17532+struct nmiaction;
17533+
17534+struct nmiwork {
17535+ const struct nmiaction *action;
17536+ u64 max_duration;
17537+ struct irq_work irq_work;
17538+};
17539+
17540 struct nmiaction {
17541 struct list_head list;
17542 nmi_handler_t handler;
17543- u64 max_duration;
17544- struct irq_work irq_work;
17545 unsigned long flags;
17546 const char *name;
17547-};
17548+ struct nmiwork *work;
17549+} __do_const;
17550
17551 #define register_nmi_handler(t, fn, fg, n, init...) \
17552 ({ \
17553- static struct nmiaction init fn##_na = { \
17554+ static struct nmiwork fn##_nw; \
17555+ static const struct nmiaction init fn##_na = { \
17556 .handler = (fn), \
17557 .name = (n), \
17558 .flags = (fg), \
17559+ .work = &fn##_nw, \
17560 }; \
17561 __register_nmi_handler((t), &fn##_na); \
17562 })
17563
17564-int __register_nmi_handler(unsigned int, struct nmiaction *);
17565+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17566
17567 void unregister_nmi_handler(unsigned int, const char *);
17568
17569diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17570index 802dde3..9183e68 100644
17571--- a/arch/x86/include/asm/page.h
17572+++ b/arch/x86/include/asm/page.h
17573@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17574 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17575
17576 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17577+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17578
17579 #define __boot_va(x) __va(x)
17580 #define __boot_pa(x) __pa(x)
17581@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17582 * virt_to_page(kaddr) returns a valid pointer if and only if
17583 * virt_addr_valid(kaddr) returns true.
17584 */
17585-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17586 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17587 extern bool __virt_addr_valid(unsigned long kaddr);
17588 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17589
17590+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17591+#define virt_to_page(kaddr) \
17592+ ({ \
17593+ const void *__kaddr = (const void *)(kaddr); \
17594+ BUG_ON(!virt_addr_valid(__kaddr)); \
17595+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17596+ })
17597+#else
17598+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17599+#endif
17600+
17601 #endif /* __ASSEMBLY__ */
17602
17603 #include <asm-generic/memory_model.h>
17604diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17605index b3bebf9..e1f5d95 100644
17606--- a/arch/x86/include/asm/page_64.h
17607+++ b/arch/x86/include/asm/page_64.h
17608@@ -7,9 +7,9 @@
17609
17610 /* duplicated to the one in bootmem.h */
17611 extern unsigned long max_pfn;
17612-extern unsigned long phys_base;
17613+extern const unsigned long phys_base;
17614
17615-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17616+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17617 {
17618 unsigned long y = x - __START_KERNEL_map;
17619
17620diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17621index 32444ae..1a1624b 100644
17622--- a/arch/x86/include/asm/paravirt.h
17623+++ b/arch/x86/include/asm/paravirt.h
17624@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17625 return (pmd_t) { ret };
17626 }
17627
17628-static inline pmdval_t pmd_val(pmd_t pmd)
17629+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17630 {
17631 pmdval_t ret;
17632
17633@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17634 val);
17635 }
17636
17637+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17638+{
17639+ pgdval_t val = native_pgd_val(pgd);
17640+
17641+ if (sizeof(pgdval_t) > sizeof(long))
17642+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17643+ val, (u64)val >> 32);
17644+ else
17645+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17646+ val);
17647+}
17648+
17649 static inline void pgd_clear(pgd_t *pgdp)
17650 {
17651 set_pgd(pgdp, __pgd(0));
17652@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17653 pv_mmu_ops.set_fixmap(idx, phys, flags);
17654 }
17655
17656+#ifdef CONFIG_PAX_KERNEXEC
17657+static inline unsigned long pax_open_kernel(void)
17658+{
17659+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17660+}
17661+
17662+static inline unsigned long pax_close_kernel(void)
17663+{
17664+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17665+}
17666+#else
17667+static inline unsigned long pax_open_kernel(void) { return 0; }
17668+static inline unsigned long pax_close_kernel(void) { return 0; }
17669+#endif
17670+
17671 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17672
17673 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17674@@ -906,7 +933,7 @@ extern void default_banner(void);
17675
17676 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17677 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17678-#define PARA_INDIRECT(addr) *%cs:addr
17679+#define PARA_INDIRECT(addr) *%ss:addr
17680 #endif
17681
17682 #define INTERRUPT_RETURN \
17683@@ -981,6 +1008,21 @@ extern void default_banner(void);
17684 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17685 CLBR_NONE, \
17686 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17687+
17688+#define GET_CR0_INTO_RDI \
17689+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17690+ mov %rax,%rdi
17691+
17692+#define SET_RDI_INTO_CR0 \
17693+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17694+
17695+#define GET_CR3_INTO_RDI \
17696+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17697+ mov %rax,%rdi
17698+
17699+#define SET_RDI_INTO_CR3 \
17700+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17701+
17702 #endif /* CONFIG_X86_32 */
17703
17704 #endif /* __ASSEMBLY__ */
17705diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17706index 7549b8b..f0edfda 100644
17707--- a/arch/x86/include/asm/paravirt_types.h
17708+++ b/arch/x86/include/asm/paravirt_types.h
17709@@ -84,7 +84,7 @@ struct pv_init_ops {
17710 */
17711 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17712 unsigned long addr, unsigned len);
17713-};
17714+} __no_const __no_randomize_layout;
17715
17716
17717 struct pv_lazy_ops {
17718@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17719 void (*enter)(void);
17720 void (*leave)(void);
17721 void (*flush)(void);
17722-};
17723+} __no_randomize_layout;
17724
17725 struct pv_time_ops {
17726 unsigned long long (*sched_clock)(void);
17727 unsigned long long (*steal_clock)(int cpu);
17728 unsigned long (*get_tsc_khz)(void);
17729-};
17730+} __no_const __no_randomize_layout;
17731
17732 struct pv_cpu_ops {
17733 /* hooks for various privileged instructions */
17734@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17735
17736 void (*start_context_switch)(struct task_struct *prev);
17737 void (*end_context_switch)(struct task_struct *next);
17738-};
17739+} __no_const __no_randomize_layout;
17740
17741 struct pv_irq_ops {
17742 /*
17743@@ -215,7 +215,7 @@ struct pv_irq_ops {
17744 #ifdef CONFIG_X86_64
17745 void (*adjust_exception_frame)(void);
17746 #endif
17747-};
17748+} __no_randomize_layout;
17749
17750 struct pv_apic_ops {
17751 #ifdef CONFIG_X86_LOCAL_APIC
17752@@ -223,7 +223,7 @@ struct pv_apic_ops {
17753 unsigned long start_eip,
17754 unsigned long start_esp);
17755 #endif
17756-};
17757+} __no_const __no_randomize_layout;
17758
17759 struct pv_mmu_ops {
17760 unsigned long (*read_cr2)(void);
17761@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17762 struct paravirt_callee_save make_pud;
17763
17764 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17765+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17766 #endif /* PAGETABLE_LEVELS == 4 */
17767 #endif /* PAGETABLE_LEVELS >= 3 */
17768
17769@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17770 an mfn. We can tell which is which from the index. */
17771 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17772 phys_addr_t phys, pgprot_t flags);
17773-};
17774+
17775+#ifdef CONFIG_PAX_KERNEXEC
17776+ unsigned long (*pax_open_kernel)(void);
17777+ unsigned long (*pax_close_kernel)(void);
17778+#endif
17779+
17780+} __no_randomize_layout;
17781
17782 struct arch_spinlock;
17783 #ifdef CONFIG_SMP
17784@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17785 struct pv_lock_ops {
17786 struct paravirt_callee_save lock_spinning;
17787 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17788-};
17789+} __no_randomize_layout;
17790
17791 /* This contains all the paravirt structures: we get a convenient
17792 * number for each function using the offset which we use to indicate
17793- * what to patch. */
17794+ * what to patch.
17795+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17796+ */
17797+
17798 struct paravirt_patch_template {
17799 struct pv_init_ops pv_init_ops;
17800 struct pv_time_ops pv_time_ops;
17801@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17802 struct pv_apic_ops pv_apic_ops;
17803 struct pv_mmu_ops pv_mmu_ops;
17804 struct pv_lock_ops pv_lock_ops;
17805-};
17806+} __no_randomize_layout;
17807
17808 extern struct pv_info pv_info;
17809 extern struct pv_init_ops pv_init_ops;
17810diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17811index c4412e9..90e88c5 100644
17812--- a/arch/x86/include/asm/pgalloc.h
17813+++ b/arch/x86/include/asm/pgalloc.h
17814@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17815 pmd_t *pmd, pte_t *pte)
17816 {
17817 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17818+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17819+}
17820+
17821+static inline void pmd_populate_user(struct mm_struct *mm,
17822+ pmd_t *pmd, pte_t *pte)
17823+{
17824+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17825 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17826 }
17827
17828@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17829
17830 #ifdef CONFIG_X86_PAE
17831 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17832+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17833+{
17834+ pud_populate(mm, pudp, pmd);
17835+}
17836 #else /* !CONFIG_X86_PAE */
17837 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17838 {
17839 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17840 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17841 }
17842+
17843+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17844+{
17845+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17846+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17847+}
17848 #endif /* CONFIG_X86_PAE */
17849
17850 #if PAGETABLE_LEVELS > 3
17851@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17852 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17853 }
17854
17855+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17856+{
17857+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17858+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17859+}
17860+
17861 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17862 {
17863 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17864diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17865index 206a87f..1623b06 100644
17866--- a/arch/x86/include/asm/pgtable-2level.h
17867+++ b/arch/x86/include/asm/pgtable-2level.h
17868@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17869
17870 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17871 {
17872+ pax_open_kernel();
17873 *pmdp = pmd;
17874+ pax_close_kernel();
17875 }
17876
17877 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17878diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17879index 81bb91b..9392125 100644
17880--- a/arch/x86/include/asm/pgtable-3level.h
17881+++ b/arch/x86/include/asm/pgtable-3level.h
17882@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17883
17884 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17885 {
17886+ pax_open_kernel();
17887 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17888+ pax_close_kernel();
17889 }
17890
17891 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17892 {
17893+ pax_open_kernel();
17894 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17895+ pax_close_kernel();
17896 }
17897
17898 /*
17899diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17900index e8a5454..1539359 100644
17901--- a/arch/x86/include/asm/pgtable.h
17902+++ b/arch/x86/include/asm/pgtable.h
17903@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17904
17905 #ifndef __PAGETABLE_PUD_FOLDED
17906 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17907+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17908 #define pgd_clear(pgd) native_pgd_clear(pgd)
17909 #endif
17910
17911@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17912
17913 #define arch_end_context_switch(prev) do {} while(0)
17914
17915+#define pax_open_kernel() native_pax_open_kernel()
17916+#define pax_close_kernel() native_pax_close_kernel()
17917 #endif /* CONFIG_PARAVIRT */
17918
17919+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17920+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17921+
17922+#ifdef CONFIG_PAX_KERNEXEC
17923+static inline unsigned long native_pax_open_kernel(void)
17924+{
17925+ unsigned long cr0;
17926+
17927+ preempt_disable();
17928+ barrier();
17929+ cr0 = read_cr0() ^ X86_CR0_WP;
17930+ BUG_ON(cr0 & X86_CR0_WP);
17931+ write_cr0(cr0);
17932+ barrier();
17933+ return cr0 ^ X86_CR0_WP;
17934+}
17935+
17936+static inline unsigned long native_pax_close_kernel(void)
17937+{
17938+ unsigned long cr0;
17939+
17940+ barrier();
17941+ cr0 = read_cr0() ^ X86_CR0_WP;
17942+ BUG_ON(!(cr0 & X86_CR0_WP));
17943+ write_cr0(cr0);
17944+ barrier();
17945+ preempt_enable_no_resched();
17946+ return cr0 ^ X86_CR0_WP;
17947+}
17948+#else
17949+static inline unsigned long native_pax_open_kernel(void) { return 0; }
17950+static inline unsigned long native_pax_close_kernel(void) { return 0; }
17951+#endif
17952+
17953 /*
17954 * The following only work if pte_present() is true.
17955 * Undefined behaviour if not..
17956 */
17957+static inline int pte_user(pte_t pte)
17958+{
17959+ return pte_val(pte) & _PAGE_USER;
17960+}
17961+
17962 static inline int pte_dirty(pte_t pte)
17963 {
17964 return pte_flags(pte) & _PAGE_DIRTY;
17965@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
17966 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
17967 }
17968
17969+static inline unsigned long pgd_pfn(pgd_t pgd)
17970+{
17971+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
17972+}
17973+
17974 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
17975
17976 static inline int pmd_large(pmd_t pte)
17977@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
17978 return pte_clear_flags(pte, _PAGE_RW);
17979 }
17980
17981+static inline pte_t pte_mkread(pte_t pte)
17982+{
17983+ return __pte(pte_val(pte) | _PAGE_USER);
17984+}
17985+
17986 static inline pte_t pte_mkexec(pte_t pte)
17987 {
17988- return pte_clear_flags(pte, _PAGE_NX);
17989+#ifdef CONFIG_X86_PAE
17990+ if (__supported_pte_mask & _PAGE_NX)
17991+ return pte_clear_flags(pte, _PAGE_NX);
17992+ else
17993+#endif
17994+ return pte_set_flags(pte, _PAGE_USER);
17995+}
17996+
17997+static inline pte_t pte_exprotect(pte_t pte)
17998+{
17999+#ifdef CONFIG_X86_PAE
18000+ if (__supported_pte_mask & _PAGE_NX)
18001+ return pte_set_flags(pte, _PAGE_NX);
18002+ else
18003+#endif
18004+ return pte_clear_flags(pte, _PAGE_USER);
18005 }
18006
18007 static inline pte_t pte_mkdirty(pte_t pte)
18008@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18009 #endif
18010
18011 #ifndef __ASSEMBLY__
18012+
18013+#ifdef CONFIG_PAX_PER_CPU_PGD
18014+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18015+enum cpu_pgd_type {kernel = 0, user = 1};
18016+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18017+{
18018+ return cpu_pgd[cpu][type];
18019+}
18020+#endif
18021+
18022 #include <linux/mm_types.h>
18023 #include <linux/mmdebug.h>
18024 #include <linux/log2.h>
18025@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18026 * Currently stuck as a macro due to indirect forward reference to
18027 * linux/mmzone.h's __section_mem_map_addr() definition:
18028 */
18029-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18030+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18031
18032 /* Find an entry in the second-level page table.. */
18033 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18034@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18035 * Currently stuck as a macro due to indirect forward reference to
18036 * linux/mmzone.h's __section_mem_map_addr() definition:
18037 */
18038-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18039+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18040
18041 /* to find an entry in a page-table-directory. */
18042 static inline unsigned long pud_index(unsigned long address)
18043@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18044
18045 static inline int pgd_bad(pgd_t pgd)
18046 {
18047- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18048+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18049 }
18050
18051 static inline int pgd_none(pgd_t pgd)
18052@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
18053 * pgd_offset() returns a (pgd_t *)
18054 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18055 */
18056-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18057+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18058+
18059+#ifdef CONFIG_PAX_PER_CPU_PGD
18060+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18061+#endif
18062+
18063 /*
18064 * a shortcut which implies the use of the kernel's pgd, instead
18065 * of a process's
18066@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
18067 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18068 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18069
18070+#ifdef CONFIG_X86_32
18071+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18072+#else
18073+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18074+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18075+
18076+#ifdef CONFIG_PAX_MEMORY_UDEREF
18077+#ifdef __ASSEMBLY__
18078+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18079+#else
18080+extern unsigned long pax_user_shadow_base;
18081+extern pgdval_t clone_pgd_mask;
18082+#endif
18083+#endif
18084+
18085+#endif
18086+
18087 #ifndef __ASSEMBLY__
18088
18089 extern int direct_gbpages;
18090@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18091 * dst and src can be on the same page, but the range must not overlap,
18092 * and must not cross a page boundary.
18093 */
18094-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18095+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18096 {
18097- memcpy(dst, src, count * sizeof(pgd_t));
18098+ pax_open_kernel();
18099+ while (count--)
18100+ *dst++ = *src++;
18101+ pax_close_kernel();
18102 }
18103
18104+#ifdef CONFIG_PAX_PER_CPU_PGD
18105+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18106+#endif
18107+
18108+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18109+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18110+#else
18111+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18112+#endif
18113+
18114 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18115 static inline int page_level_shift(enum pg_level level)
18116 {
18117diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18118index b6c0b40..3535d47 100644
18119--- a/arch/x86/include/asm/pgtable_32.h
18120+++ b/arch/x86/include/asm/pgtable_32.h
18121@@ -25,9 +25,6 @@
18122 struct mm_struct;
18123 struct vm_area_struct;
18124
18125-extern pgd_t swapper_pg_dir[1024];
18126-extern pgd_t initial_page_table[1024];
18127-
18128 static inline void pgtable_cache_init(void) { }
18129 static inline void check_pgt_cache(void) { }
18130 void paging_init(void);
18131@@ -45,6 +42,12 @@ void paging_init(void);
18132 # include <asm/pgtable-2level.h>
18133 #endif
18134
18135+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18136+extern pgd_t initial_page_table[PTRS_PER_PGD];
18137+#ifdef CONFIG_X86_PAE
18138+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18139+#endif
18140+
18141 #if defined(CONFIG_HIGHPTE)
18142 #define pte_offset_map(dir, address) \
18143 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18144@@ -59,12 +62,17 @@ void paging_init(void);
18145 /* Clear a kernel PTE and flush it from the TLB */
18146 #define kpte_clear_flush(ptep, vaddr) \
18147 do { \
18148+ pax_open_kernel(); \
18149 pte_clear(&init_mm, (vaddr), (ptep)); \
18150+ pax_close_kernel(); \
18151 __flush_tlb_one((vaddr)); \
18152 } while (0)
18153
18154 #endif /* !__ASSEMBLY__ */
18155
18156+#define HAVE_ARCH_UNMAPPED_AREA
18157+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18158+
18159 /*
18160 * kern_addr_valid() is (1) for FLATMEM and (0) for
18161 * SPARSEMEM and DISCONTIGMEM
18162diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18163index 9fb2f2b..b04b4bf 100644
18164--- a/arch/x86/include/asm/pgtable_32_types.h
18165+++ b/arch/x86/include/asm/pgtable_32_types.h
18166@@ -8,7 +8,7 @@
18167 */
18168 #ifdef CONFIG_X86_PAE
18169 # include <asm/pgtable-3level_types.h>
18170-# define PMD_SIZE (1UL << PMD_SHIFT)
18171+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18172 # define PMD_MASK (~(PMD_SIZE - 1))
18173 #else
18174 # include <asm/pgtable-2level_types.h>
18175@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18176 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18177 #endif
18178
18179+#ifdef CONFIG_PAX_KERNEXEC
18180+#ifndef __ASSEMBLY__
18181+extern unsigned char MODULES_EXEC_VADDR[];
18182+extern unsigned char MODULES_EXEC_END[];
18183+#endif
18184+#include <asm/boot.h>
18185+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18186+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18187+#else
18188+#define ktla_ktva(addr) (addr)
18189+#define ktva_ktla(addr) (addr)
18190+#endif
18191+
18192 #define MODULES_VADDR VMALLOC_START
18193 #define MODULES_END VMALLOC_END
18194 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18195diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18196index 4572b2f..4430113 100644
18197--- a/arch/x86/include/asm/pgtable_64.h
18198+++ b/arch/x86/include/asm/pgtable_64.h
18199@@ -16,11 +16,16 @@
18200
18201 extern pud_t level3_kernel_pgt[512];
18202 extern pud_t level3_ident_pgt[512];
18203+extern pud_t level3_vmalloc_start_pgt[512];
18204+extern pud_t level3_vmalloc_end_pgt[512];
18205+extern pud_t level3_vmemmap_pgt[512];
18206+extern pud_t level2_vmemmap_pgt[512];
18207 extern pmd_t level2_kernel_pgt[512];
18208 extern pmd_t level2_fixmap_pgt[512];
18209-extern pmd_t level2_ident_pgt[512];
18210+extern pmd_t level2_ident_pgt[512*2];
18211 extern pte_t level1_fixmap_pgt[512];
18212-extern pgd_t init_level4_pgt[];
18213+extern pte_t level1_vsyscall_pgt[512];
18214+extern pgd_t init_level4_pgt[512];
18215
18216 #define swapper_pg_dir init_level4_pgt
18217
18218@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18219
18220 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18221 {
18222+ pax_open_kernel();
18223 *pmdp = pmd;
18224+ pax_close_kernel();
18225 }
18226
18227 static inline void native_pmd_clear(pmd_t *pmd)
18228@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18229
18230 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18231 {
18232+ pax_open_kernel();
18233 *pudp = pud;
18234+ pax_close_kernel();
18235 }
18236
18237 static inline void native_pud_clear(pud_t *pud)
18238@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18239
18240 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18241 {
18242+ pax_open_kernel();
18243+ *pgdp = pgd;
18244+ pax_close_kernel();
18245+}
18246+
18247+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18248+{
18249 *pgdp = pgd;
18250 }
18251
18252diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18253index 602b602..acb53ed 100644
18254--- a/arch/x86/include/asm/pgtable_64_types.h
18255+++ b/arch/x86/include/asm/pgtable_64_types.h
18256@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18257 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18258 #define MODULES_END _AC(0xffffffffff000000, UL)
18259 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18260+#define MODULES_EXEC_VADDR MODULES_VADDR
18261+#define MODULES_EXEC_END MODULES_END
18262 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18263 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18264 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18265 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18266
18267+#define ktla_ktva(addr) (addr)
18268+#define ktva_ktla(addr) (addr)
18269+
18270 #define EARLY_DYNAMIC_PAGE_TABLES 64
18271
18272 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18273diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18274index 25bcd4a..bf3f815 100644
18275--- a/arch/x86/include/asm/pgtable_types.h
18276+++ b/arch/x86/include/asm/pgtable_types.h
18277@@ -110,8 +110,10 @@
18278
18279 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18280 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18281-#else
18282+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18283 #define _PAGE_NX (_AT(pteval_t, 0))
18284+#else
18285+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18286 #endif
18287
18288 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18289@@ -167,6 +169,9 @@ enum page_cache_mode {
18290 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18291 _PAGE_ACCESSED)
18292
18293+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18294+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18295+
18296 #define __PAGE_KERNEL_EXEC \
18297 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18298 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18299@@ -174,7 +179,7 @@ enum page_cache_mode {
18300 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18301 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18302 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18303-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18304+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18305 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18306 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18307 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18308@@ -220,7 +225,7 @@ enum page_cache_mode {
18309 #ifdef CONFIG_X86_64
18310 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18311 #else
18312-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18313+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18314 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18315 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18316 #endif
18317@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18318 {
18319 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18320 }
18321+#endif
18322
18323+#if PAGETABLE_LEVELS == 3
18324+#include <asm-generic/pgtable-nopud.h>
18325+#endif
18326+
18327+#if PAGETABLE_LEVELS == 2
18328+#include <asm-generic/pgtable-nopmd.h>
18329+#endif
18330+
18331+#ifndef __ASSEMBLY__
18332 #if PAGETABLE_LEVELS > 3
18333 typedef struct { pudval_t pud; } pud_t;
18334
18335@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18336 return pud.pud;
18337 }
18338 #else
18339-#include <asm-generic/pgtable-nopud.h>
18340-
18341 static inline pudval_t native_pud_val(pud_t pud)
18342 {
18343 return native_pgd_val(pud.pgd);
18344@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18345 return pmd.pmd;
18346 }
18347 #else
18348-#include <asm-generic/pgtable-nopmd.h>
18349-
18350 static inline pmdval_t native_pmd_val(pmd_t pmd)
18351 {
18352 return native_pgd_val(pmd.pud.pgd);
18353@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
18354
18355 extern pteval_t __supported_pte_mask;
18356 extern void set_nx(void);
18357-extern int nx_enabled;
18358
18359 #define pgprot_writecombine pgprot_writecombine
18360 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18361diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18362index 8f327184..368fb29 100644
18363--- a/arch/x86/include/asm/preempt.h
18364+++ b/arch/x86/include/asm/preempt.h
18365@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18366 */
18367 static __always_inline bool __preempt_count_dec_and_test(void)
18368 {
18369- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18370+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18371 }
18372
18373 /*
18374diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18375index a092a0c..8e9640b 100644
18376--- a/arch/x86/include/asm/processor.h
18377+++ b/arch/x86/include/asm/processor.h
18378@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18379 /* Index into per_cpu list: */
18380 u16 cpu_index;
18381 u32 microcode;
18382-};
18383+} __randomize_layout;
18384
18385 #define X86_VENDOR_INTEL 0
18386 #define X86_VENDOR_CYRIX 1
18387@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18388 : "memory");
18389 }
18390
18391+/* invpcid (%rdx),%rax */
18392+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18393+
18394+#define INVPCID_SINGLE_ADDRESS 0UL
18395+#define INVPCID_SINGLE_CONTEXT 1UL
18396+#define INVPCID_ALL_GLOBAL 2UL
18397+#define INVPCID_ALL_NONGLOBAL 3UL
18398+
18399+#define PCID_KERNEL 0UL
18400+#define PCID_USER 1UL
18401+#define PCID_NOFLUSH (1UL << 63)
18402+
18403 static inline void load_cr3(pgd_t *pgdir)
18404 {
18405- write_cr3(__pa(pgdir));
18406+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18407 }
18408
18409 #ifdef CONFIG_X86_32
18410@@ -282,7 +294,7 @@ struct tss_struct {
18411
18412 } ____cacheline_aligned;
18413
18414-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18415+extern struct tss_struct init_tss[NR_CPUS];
18416
18417 /*
18418 * Save the original ist values for checking stack pointers during debugging
18419@@ -479,6 +491,7 @@ struct thread_struct {
18420 unsigned short ds;
18421 unsigned short fsindex;
18422 unsigned short gsindex;
18423+ unsigned short ss;
18424 #endif
18425 #ifdef CONFIG_X86_32
18426 unsigned long ip;
18427@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18428 extern unsigned long mmu_cr4_features;
18429 extern u32 *trampoline_cr4_features;
18430
18431-static inline void set_in_cr4(unsigned long mask)
18432-{
18433- unsigned long cr4;
18434-
18435- mmu_cr4_features |= mask;
18436- if (trampoline_cr4_features)
18437- *trampoline_cr4_features = mmu_cr4_features;
18438- cr4 = read_cr4();
18439- cr4 |= mask;
18440- write_cr4(cr4);
18441-}
18442-
18443-static inline void clear_in_cr4(unsigned long mask)
18444-{
18445- unsigned long cr4;
18446-
18447- mmu_cr4_features &= ~mask;
18448- if (trampoline_cr4_features)
18449- *trampoline_cr4_features = mmu_cr4_features;
18450- cr4 = read_cr4();
18451- cr4 &= ~mask;
18452- write_cr4(cr4);
18453-}
18454+extern void set_in_cr4(unsigned long mask);
18455+extern void clear_in_cr4(unsigned long mask);
18456
18457 typedef struct {
18458 unsigned long seg;
18459@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
18460 */
18461 #define TASK_SIZE PAGE_OFFSET
18462 #define TASK_SIZE_MAX TASK_SIZE
18463+
18464+#ifdef CONFIG_PAX_SEGMEXEC
18465+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18466+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18467+#else
18468 #define STACK_TOP TASK_SIZE
18469-#define STACK_TOP_MAX STACK_TOP
18470+#endif
18471+
18472+#define STACK_TOP_MAX TASK_SIZE
18473
18474 #define INIT_THREAD { \
18475- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18476+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18477 .vm86_info = NULL, \
18478 .sysenter_cs = __KERNEL_CS, \
18479 .io_bitmap_ptr = NULL, \
18480@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18481 */
18482 #define INIT_TSS { \
18483 .x86_tss = { \
18484- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18485+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18486 .ss0 = __KERNEL_DS, \
18487 .ss1 = __KERNEL_CS, \
18488 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18489@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
18490 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18491
18492 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18493-#define KSTK_TOP(info) \
18494-({ \
18495- unsigned long *__ptr = (unsigned long *)(info); \
18496- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18497-})
18498+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18499
18500 /*
18501 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18502@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18503 #define task_pt_regs(task) \
18504 ({ \
18505 struct pt_regs *__regs__; \
18506- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18507+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18508 __regs__ - 1; \
18509 })
18510
18511@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18512 * particular problem by preventing anything from being mapped
18513 * at the maximum canonical address.
18514 */
18515-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18516+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18517
18518 /* This decides where the kernel will search for a free chunk of vm
18519 * space during mmap's.
18520 */
18521 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18522- 0xc0000000 : 0xFFFFe000)
18523+ 0xc0000000 : 0xFFFFf000)
18524
18525 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18526 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18527@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18528 #define STACK_TOP_MAX TASK_SIZE_MAX
18529
18530 #define INIT_THREAD { \
18531- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18532+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18533 }
18534
18535 #define INIT_TSS { \
18536- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18537+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18538 }
18539
18540 /*
18541@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18542 */
18543 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18544
18545+#ifdef CONFIG_PAX_SEGMEXEC
18546+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18547+#endif
18548+
18549 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18550
18551 /* Get/set a process' ability to use the timestamp counter instruction */
18552@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18553 return 0;
18554 }
18555
18556-extern unsigned long arch_align_stack(unsigned long sp);
18557+#define arch_align_stack(x) ((x) & ~0xfUL)
18558 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18559
18560 void default_idle(void);
18561@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
18562 #define xen_set_default_idle 0
18563 #endif
18564
18565-void stop_this_cpu(void *dummy);
18566+void stop_this_cpu(void *dummy) __noreturn;
18567 void df_debug(struct pt_regs *regs, long error_code);
18568 #endif /* _ASM_X86_PROCESSOR_H */
18569diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18570index 86fc2bb..bd5049a 100644
18571--- a/arch/x86/include/asm/ptrace.h
18572+++ b/arch/x86/include/asm/ptrace.h
18573@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18574 }
18575
18576 /*
18577- * user_mode_vm(regs) determines whether a register set came from user mode.
18578+ * user_mode(regs) determines whether a register set came from user mode.
18579 * This is true if V8086 mode was enabled OR if the register set was from
18580 * protected mode with RPL-3 CS value. This tricky test checks that with
18581 * one comparison. Many places in the kernel can bypass this full check
18582- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18583+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18584+ * be used.
18585 */
18586-static inline int user_mode(struct pt_regs *regs)
18587+static inline int user_mode_novm(struct pt_regs *regs)
18588 {
18589 #ifdef CONFIG_X86_32
18590 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18591 #else
18592- return !!(regs->cs & 3);
18593+ return !!(regs->cs & SEGMENT_RPL_MASK);
18594 #endif
18595 }
18596
18597-static inline int user_mode_vm(struct pt_regs *regs)
18598+static inline int user_mode(struct pt_regs *regs)
18599 {
18600 #ifdef CONFIG_X86_32
18601 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18602 USER_RPL;
18603 #else
18604- return user_mode(regs);
18605+ return user_mode_novm(regs);
18606 #endif
18607 }
18608
18609@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18610 #ifdef CONFIG_X86_64
18611 static inline bool user_64bit_mode(struct pt_regs *regs)
18612 {
18613+ unsigned long cs = regs->cs & 0xffff;
18614 #ifndef CONFIG_PARAVIRT
18615 /*
18616 * On non-paravirt systems, this is the only long mode CPL 3
18617 * selector. We do not allow long mode selectors in the LDT.
18618 */
18619- return regs->cs == __USER_CS;
18620+ return cs == __USER_CS;
18621 #else
18622 /* Headers are too twisted for this to go in paravirt.h. */
18623- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18624+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18625 #endif
18626 }
18627
18628@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18629 * Traps from the kernel do not save sp and ss.
18630 * Use the helper function to retrieve sp.
18631 */
18632- if (offset == offsetof(struct pt_regs, sp) &&
18633- regs->cs == __KERNEL_CS)
18634- return kernel_stack_pointer(regs);
18635+ if (offset == offsetof(struct pt_regs, sp)) {
18636+ unsigned long cs = regs->cs & 0xffff;
18637+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18638+ return kernel_stack_pointer(regs);
18639+ }
18640 #endif
18641 return *(unsigned long *)((unsigned long)regs + offset);
18642 }
18643diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18644index ae0e241..e80b10b 100644
18645--- a/arch/x86/include/asm/qrwlock.h
18646+++ b/arch/x86/include/asm/qrwlock.h
18647@@ -7,8 +7,8 @@
18648 #define queue_write_unlock queue_write_unlock
18649 static inline void queue_write_unlock(struct qrwlock *lock)
18650 {
18651- barrier();
18652- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18653+ barrier();
18654+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18655 }
18656 #endif
18657
18658diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18659index 9c6b890..5305f53 100644
18660--- a/arch/x86/include/asm/realmode.h
18661+++ b/arch/x86/include/asm/realmode.h
18662@@ -22,16 +22,14 @@ struct real_mode_header {
18663 #endif
18664 /* APM/BIOS reboot */
18665 u32 machine_real_restart_asm;
18666-#ifdef CONFIG_X86_64
18667 u32 machine_real_restart_seg;
18668-#endif
18669 };
18670
18671 /* This must match data at trampoline_32/64.S */
18672 struct trampoline_header {
18673 #ifdef CONFIG_X86_32
18674 u32 start;
18675- u16 gdt_pad;
18676+ u16 boot_cs;
18677 u16 gdt_limit;
18678 u32 gdt_base;
18679 #else
18680diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18681index a82c4f1..ac45053 100644
18682--- a/arch/x86/include/asm/reboot.h
18683+++ b/arch/x86/include/asm/reboot.h
18684@@ -6,13 +6,13 @@
18685 struct pt_regs;
18686
18687 struct machine_ops {
18688- void (*restart)(char *cmd);
18689- void (*halt)(void);
18690- void (*power_off)(void);
18691+ void (* __noreturn restart)(char *cmd);
18692+ void (* __noreturn halt)(void);
18693+ void (* __noreturn power_off)(void);
18694 void (*shutdown)(void);
18695 void (*crash_shutdown)(struct pt_regs *);
18696- void (*emergency_restart)(void);
18697-};
18698+ void (* __noreturn emergency_restart)(void);
18699+} __no_const;
18700
18701 extern struct machine_ops machine_ops;
18702
18703diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18704index 8f7866a..e442f20 100644
18705--- a/arch/x86/include/asm/rmwcc.h
18706+++ b/arch/x86/include/asm/rmwcc.h
18707@@ -3,7 +3,34 @@
18708
18709 #ifdef CC_HAVE_ASM_GOTO
18710
18711-#define __GEN_RMWcc(fullop, var, cc, ...) \
18712+#ifdef CONFIG_PAX_REFCOUNT
18713+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18714+do { \
18715+ asm_volatile_goto (fullop \
18716+ ";jno 0f\n" \
18717+ fullantiop \
18718+ ";int $4\n0:\n" \
18719+ _ASM_EXTABLE(0b, 0b) \
18720+ ";j" cc " %l[cc_label]" \
18721+ : : "m" (var), ## __VA_ARGS__ \
18722+ : "memory" : cc_label); \
18723+ return 0; \
18724+cc_label: \
18725+ return 1; \
18726+} while (0)
18727+#else
18728+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18729+do { \
18730+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18731+ : : "m" (var), ## __VA_ARGS__ \
18732+ : "memory" : cc_label); \
18733+ return 0; \
18734+cc_label: \
18735+ return 1; \
18736+} while (0)
18737+#endif
18738+
18739+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18740 do { \
18741 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18742 : : "m" (var), ## __VA_ARGS__ \
18743@@ -13,15 +40,46 @@ cc_label: \
18744 return 1; \
18745 } while (0)
18746
18747-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18748- __GEN_RMWcc(op " " arg0, var, cc)
18749+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18750+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18751
18752-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18753- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18754+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18755+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18756+
18757+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18758+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18759+
18760+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18761+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18762
18763 #else /* !CC_HAVE_ASM_GOTO */
18764
18765-#define __GEN_RMWcc(fullop, var, cc, ...) \
18766+#ifdef CONFIG_PAX_REFCOUNT
18767+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18768+do { \
18769+ char c; \
18770+ asm volatile (fullop \
18771+ ";jno 0f\n" \
18772+ fullantiop \
18773+ ";int $4\n0:\n" \
18774+ _ASM_EXTABLE(0b, 0b) \
18775+ "; set" cc " %1" \
18776+ : "+m" (var), "=qm" (c) \
18777+ : __VA_ARGS__ : "memory"); \
18778+ return c != 0; \
18779+} while (0)
18780+#else
18781+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18782+do { \
18783+ char c; \
18784+ asm volatile (fullop "; set" cc " %1" \
18785+ : "+m" (var), "=qm" (c) \
18786+ : __VA_ARGS__ : "memory"); \
18787+ return c != 0; \
18788+} while (0)
18789+#endif
18790+
18791+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18792 do { \
18793 char c; \
18794 asm volatile (fullop "; set" cc " %1" \
18795@@ -30,11 +88,17 @@ do { \
18796 return c != 0; \
18797 } while (0)
18798
18799-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18800- __GEN_RMWcc(op " " arg0, var, cc)
18801+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18802+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18803+
18804+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18805+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18806+
18807+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18808+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18809
18810-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18811- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18812+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18813+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18814
18815 #endif /* CC_HAVE_ASM_GOTO */
18816
18817diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18818index cad82c9..2e5c5c1 100644
18819--- a/arch/x86/include/asm/rwsem.h
18820+++ b/arch/x86/include/asm/rwsem.h
18821@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18822 {
18823 asm volatile("# beginning down_read\n\t"
18824 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18825+
18826+#ifdef CONFIG_PAX_REFCOUNT
18827+ "jno 0f\n"
18828+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18829+ "int $4\n0:\n"
18830+ _ASM_EXTABLE(0b, 0b)
18831+#endif
18832+
18833 /* adds 0x00000001 */
18834 " jns 1f\n"
18835 " call call_rwsem_down_read_failed\n"
18836@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18837 "1:\n\t"
18838 " mov %1,%2\n\t"
18839 " add %3,%2\n\t"
18840+
18841+#ifdef CONFIG_PAX_REFCOUNT
18842+ "jno 0f\n"
18843+ "sub %3,%2\n"
18844+ "int $4\n0:\n"
18845+ _ASM_EXTABLE(0b, 0b)
18846+#endif
18847+
18848 " jle 2f\n\t"
18849 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18850 " jnz 1b\n\t"
18851@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18852 long tmp;
18853 asm volatile("# beginning down_write\n\t"
18854 LOCK_PREFIX " xadd %1,(%2)\n\t"
18855+
18856+#ifdef CONFIG_PAX_REFCOUNT
18857+ "jno 0f\n"
18858+ "mov %1,(%2)\n"
18859+ "int $4\n0:\n"
18860+ _ASM_EXTABLE(0b, 0b)
18861+#endif
18862+
18863 /* adds 0xffff0001, returns the old value */
18864 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18865 /* was the active mask 0 before? */
18866@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18867 long tmp;
18868 asm volatile("# beginning __up_read\n\t"
18869 LOCK_PREFIX " xadd %1,(%2)\n\t"
18870+
18871+#ifdef CONFIG_PAX_REFCOUNT
18872+ "jno 0f\n"
18873+ "mov %1,(%2)\n"
18874+ "int $4\n0:\n"
18875+ _ASM_EXTABLE(0b, 0b)
18876+#endif
18877+
18878 /* subtracts 1, returns the old value */
18879 " jns 1f\n\t"
18880 " call call_rwsem_wake\n" /* expects old value in %edx */
18881@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18882 long tmp;
18883 asm volatile("# beginning __up_write\n\t"
18884 LOCK_PREFIX " xadd %1,(%2)\n\t"
18885+
18886+#ifdef CONFIG_PAX_REFCOUNT
18887+ "jno 0f\n"
18888+ "mov %1,(%2)\n"
18889+ "int $4\n0:\n"
18890+ _ASM_EXTABLE(0b, 0b)
18891+#endif
18892+
18893 /* subtracts 0xffff0001, returns the old value */
18894 " jns 1f\n\t"
18895 " call call_rwsem_wake\n" /* expects old value in %edx */
18896@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18897 {
18898 asm volatile("# beginning __downgrade_write\n\t"
18899 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18900+
18901+#ifdef CONFIG_PAX_REFCOUNT
18902+ "jno 0f\n"
18903+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18904+ "int $4\n0:\n"
18905+ _ASM_EXTABLE(0b, 0b)
18906+#endif
18907+
18908 /*
18909 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18910 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18911@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18912 */
18913 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18914 {
18915- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18916+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18917+
18918+#ifdef CONFIG_PAX_REFCOUNT
18919+ "jno 0f\n"
18920+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18921+ "int $4\n0:\n"
18922+ _ASM_EXTABLE(0b, 0b)
18923+#endif
18924+
18925 : "+m" (sem->count)
18926 : "er" (delta));
18927 }
18928@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18929 */
18930 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18931 {
18932- return delta + xadd(&sem->count, delta);
18933+ return delta + xadd_check_overflow(&sem->count, delta);
18934 }
18935
18936 #endif /* __KERNEL__ */
18937diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18938index db257a5..b91bc77 100644
18939--- a/arch/x86/include/asm/segment.h
18940+++ b/arch/x86/include/asm/segment.h
18941@@ -73,10 +73,15 @@
18942 * 26 - ESPFIX small SS
18943 * 27 - per-cpu [ offset to per-cpu data area ]
18944 * 28 - stack_canary-20 [ for stack protector ]
18945- * 29 - unused
18946- * 30 - unused
18947+ * 29 - PCI BIOS CS
18948+ * 30 - PCI BIOS DS
18949 * 31 - TSS for double fault handler
18950 */
18951+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18952+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18953+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18954+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18955+
18956 #define GDT_ENTRY_TLS_MIN 6
18957 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18958
18959@@ -88,6 +93,8 @@
18960
18961 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18962
18963+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18964+
18965 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18966
18967 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18968@@ -113,6 +120,12 @@
18969 #define __KERNEL_STACK_CANARY 0
18970 #endif
18971
18972+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
18973+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
18974+
18975+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
18976+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
18977+
18978 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
18979
18980 /*
18981@@ -140,7 +153,7 @@
18982 */
18983
18984 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
18985-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
18986+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
18987
18988
18989 #else
18990@@ -164,6 +177,8 @@
18991 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
18992 #define __USER32_DS __USER_DS
18993
18994+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
18995+
18996 #define GDT_ENTRY_TSS 8 /* needs two entries */
18997 #define GDT_ENTRY_LDT 10 /* needs two entries */
18998 #define GDT_ENTRY_TLS_MIN 12
18999@@ -172,6 +187,8 @@
19000 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19001 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19002
19003+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19004+
19005 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19006 #define FS_TLS 0
19007 #define GS_TLS 1
19008@@ -179,12 +196,14 @@
19009 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19010 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19011
19012-#define GDT_ENTRIES 16
19013+#define GDT_ENTRIES 17
19014
19015 #endif
19016
19017 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19018+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19019 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19020+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19021 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19022 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19023 #ifndef CONFIG_PARAVIRT
19024@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19025 {
19026 unsigned long __limit;
19027 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19028- return __limit + 1;
19029+ return __limit;
19030 }
19031
19032 #endif /* !__ASSEMBLY__ */
19033diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19034index 8d3120f..352b440 100644
19035--- a/arch/x86/include/asm/smap.h
19036+++ b/arch/x86/include/asm/smap.h
19037@@ -25,11 +25,40 @@
19038
19039 #include <asm/alternative-asm.h>
19040
19041+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19042+#define ASM_PAX_OPEN_USERLAND \
19043+ 661: jmp 663f; \
19044+ .pushsection .altinstr_replacement, "a" ; \
19045+ 662: pushq %rax; nop; \
19046+ .popsection ; \
19047+ .pushsection .altinstructions, "a" ; \
19048+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19049+ .popsection ; \
19050+ call __pax_open_userland; \
19051+ popq %rax; \
19052+ 663:
19053+
19054+#define ASM_PAX_CLOSE_USERLAND \
19055+ 661: jmp 663f; \
19056+ .pushsection .altinstr_replacement, "a" ; \
19057+ 662: pushq %rax; nop; \
19058+ .popsection; \
19059+ .pushsection .altinstructions, "a" ; \
19060+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19061+ .popsection; \
19062+ call __pax_close_userland; \
19063+ popq %rax; \
19064+ 663:
19065+#else
19066+#define ASM_PAX_OPEN_USERLAND
19067+#define ASM_PAX_CLOSE_USERLAND
19068+#endif
19069+
19070 #ifdef CONFIG_X86_SMAP
19071
19072 #define ASM_CLAC \
19073 661: ASM_NOP3 ; \
19074- .pushsection .altinstr_replacement, "ax" ; \
19075+ .pushsection .altinstr_replacement, "a" ; \
19076 662: __ASM_CLAC ; \
19077 .popsection ; \
19078 .pushsection .altinstructions, "a" ; \
19079@@ -38,7 +67,7 @@
19080
19081 #define ASM_STAC \
19082 661: ASM_NOP3 ; \
19083- .pushsection .altinstr_replacement, "ax" ; \
19084+ .pushsection .altinstr_replacement, "a" ; \
19085 662: __ASM_STAC ; \
19086 .popsection ; \
19087 .pushsection .altinstructions, "a" ; \
19088@@ -56,6 +85,37 @@
19089
19090 #include <asm/alternative.h>
19091
19092+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19093+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19094+
19095+extern void __pax_open_userland(void);
19096+static __always_inline unsigned long pax_open_userland(void)
19097+{
19098+
19099+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19100+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19101+ :
19102+ : [open] "i" (__pax_open_userland)
19103+ : "memory", "rax");
19104+#endif
19105+
19106+ return 0;
19107+}
19108+
19109+extern void __pax_close_userland(void);
19110+static __always_inline unsigned long pax_close_userland(void)
19111+{
19112+
19113+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19114+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19115+ :
19116+ : [close] "i" (__pax_close_userland)
19117+ : "memory", "rax");
19118+#endif
19119+
19120+ return 0;
19121+}
19122+
19123 #ifdef CONFIG_X86_SMAP
19124
19125 static __always_inline void clac(void)
19126diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19127index 8cd1cc3..827e09e 100644
19128--- a/arch/x86/include/asm/smp.h
19129+++ b/arch/x86/include/asm/smp.h
19130@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19131 /* cpus sharing the last level cache: */
19132 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19133 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19134-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19135+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19136
19137 static inline struct cpumask *cpu_sibling_mask(int cpu)
19138 {
19139@@ -78,7 +78,7 @@ struct smp_ops {
19140
19141 void (*send_call_func_ipi)(const struct cpumask *mask);
19142 void (*send_call_func_single_ipi)(int cpu);
19143-};
19144+} __no_const;
19145
19146 /* Globals due to paravirt */
19147 extern void set_cpu_sibling_map(int cpu);
19148@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19149 extern int safe_smp_processor_id(void);
19150
19151 #elif defined(CONFIG_X86_64_SMP)
19152-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19153-
19154-#define stack_smp_processor_id() \
19155-({ \
19156- struct thread_info *ti; \
19157- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19158- ti->cpu; \
19159-})
19160+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19161+#define stack_smp_processor_id() raw_smp_processor_id()
19162 #define safe_smp_processor_id() smp_processor_id()
19163
19164 #endif
19165diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19166index 6a99859..03cb807 100644
19167--- a/arch/x86/include/asm/stackprotector.h
19168+++ b/arch/x86/include/asm/stackprotector.h
19169@@ -47,7 +47,7 @@
19170 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19171 */
19172 #define GDT_STACK_CANARY_INIT \
19173- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19174+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19175
19176 /*
19177 * Initialize the stackprotector canary value.
19178@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19179
19180 static inline void load_stack_canary_segment(void)
19181 {
19182-#ifdef CONFIG_X86_32
19183+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19184 asm volatile ("mov %0, %%gs" : : "r" (0));
19185 #endif
19186 }
19187diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19188index 70bbe39..4ae2bd4 100644
19189--- a/arch/x86/include/asm/stacktrace.h
19190+++ b/arch/x86/include/asm/stacktrace.h
19191@@ -11,28 +11,20 @@
19192
19193 extern int kstack_depth_to_print;
19194
19195-struct thread_info;
19196+struct task_struct;
19197 struct stacktrace_ops;
19198
19199-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19200- unsigned long *stack,
19201- unsigned long bp,
19202- const struct stacktrace_ops *ops,
19203- void *data,
19204- unsigned long *end,
19205- int *graph);
19206+typedef unsigned long walk_stack_t(struct task_struct *task,
19207+ void *stack_start,
19208+ unsigned long *stack,
19209+ unsigned long bp,
19210+ const struct stacktrace_ops *ops,
19211+ void *data,
19212+ unsigned long *end,
19213+ int *graph);
19214
19215-extern unsigned long
19216-print_context_stack(struct thread_info *tinfo,
19217- unsigned long *stack, unsigned long bp,
19218- const struct stacktrace_ops *ops, void *data,
19219- unsigned long *end, int *graph);
19220-
19221-extern unsigned long
19222-print_context_stack_bp(struct thread_info *tinfo,
19223- unsigned long *stack, unsigned long bp,
19224- const struct stacktrace_ops *ops, void *data,
19225- unsigned long *end, int *graph);
19226+extern walk_stack_t print_context_stack;
19227+extern walk_stack_t print_context_stack_bp;
19228
19229 /* Generic stack tracer with callbacks */
19230
19231@@ -40,7 +32,7 @@ struct stacktrace_ops {
19232 void (*address)(void *data, unsigned long address, int reliable);
19233 /* On negative return stop dumping */
19234 int (*stack)(void *data, char *name);
19235- walk_stack_t walk_stack;
19236+ walk_stack_t *walk_stack;
19237 };
19238
19239 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19240diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19241index 751bf4b..a1278b5 100644
19242--- a/arch/x86/include/asm/switch_to.h
19243+++ b/arch/x86/include/asm/switch_to.h
19244@@ -112,7 +112,7 @@ do { \
19245 "call __switch_to\n\t" \
19246 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19247 __switch_canary \
19248- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19249+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19250 "movq %%rax,%%rdi\n\t" \
19251 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19252 "jnz ret_from_fork\n\t" \
19253@@ -123,7 +123,7 @@ do { \
19254 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19255 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19256 [_tif_fork] "i" (_TIF_FORK), \
19257- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19258+ [thread_info] "m" (current_tinfo), \
19259 [current_task] "m" (current_task) \
19260 __switch_canary_iparam \
19261 : "memory", "cc" __EXTRA_CLOBBER)
19262diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19263index 547e344..6be1175 100644
19264--- a/arch/x86/include/asm/thread_info.h
19265+++ b/arch/x86/include/asm/thread_info.h
19266@@ -24,7 +24,6 @@ struct exec_domain;
19267 #include <linux/atomic.h>
19268
19269 struct thread_info {
19270- struct task_struct *task; /* main task structure */
19271 struct exec_domain *exec_domain; /* execution domain */
19272 __u32 flags; /* low level flags */
19273 __u32 status; /* thread synchronous flags */
19274@@ -33,13 +32,13 @@ struct thread_info {
19275 mm_segment_t addr_limit;
19276 struct restart_block restart_block;
19277 void __user *sysenter_return;
19278+ unsigned long lowest_stack;
19279 unsigned int sig_on_uaccess_error:1;
19280 unsigned int uaccess_err:1; /* uaccess failed */
19281 };
19282
19283-#define INIT_THREAD_INFO(tsk) \
19284+#define INIT_THREAD_INFO \
19285 { \
19286- .task = &tsk, \
19287 .exec_domain = &default_exec_domain, \
19288 .flags = 0, \
19289 .cpu = 0, \
19290@@ -50,7 +49,7 @@ struct thread_info {
19291 }, \
19292 }
19293
19294-#define init_thread_info (init_thread_union.thread_info)
19295+#define init_thread_info (init_thread_union.stack)
19296 #define init_stack (init_thread_union.stack)
19297
19298 #else /* !__ASSEMBLY__ */
19299@@ -91,6 +90,7 @@ struct thread_info {
19300 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19301 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19302 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19303+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19304
19305 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19306 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19307@@ -115,17 +115,18 @@ struct thread_info {
19308 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19309 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19310 #define _TIF_X32 (1 << TIF_X32)
19311+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19312
19313 /* work to do in syscall_trace_enter() */
19314 #define _TIF_WORK_SYSCALL_ENTRY \
19315 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19316 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19317- _TIF_NOHZ)
19318+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19319
19320 /* work to do in syscall_trace_leave() */
19321 #define _TIF_WORK_SYSCALL_EXIT \
19322 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19323- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19324+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19325
19326 /* work to do on interrupt/exception return */
19327 #define _TIF_WORK_MASK \
19328@@ -136,7 +137,7 @@ struct thread_info {
19329 /* work to do on any return to user space */
19330 #define _TIF_ALLWORK_MASK \
19331 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19332- _TIF_NOHZ)
19333+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19334
19335 /* Only used for 64 bit */
19336 #define _TIF_DO_NOTIFY_MASK \
19337@@ -151,7 +152,6 @@ struct thread_info {
19338 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19339
19340 #define STACK_WARN (THREAD_SIZE/8)
19341-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19342
19343 /*
19344 * macros/functions for gaining access to the thread information structure
19345@@ -162,26 +162,18 @@ struct thread_info {
19346
19347 DECLARE_PER_CPU(unsigned long, kernel_stack);
19348
19349+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19350+
19351 static inline struct thread_info *current_thread_info(void)
19352 {
19353- struct thread_info *ti;
19354- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19355- KERNEL_STACK_OFFSET - THREAD_SIZE);
19356- return ti;
19357+ return this_cpu_read_stable(current_tinfo);
19358 }
19359
19360 #else /* !__ASSEMBLY__ */
19361
19362 /* how to get the thread information struct from ASM */
19363 #define GET_THREAD_INFO(reg) \
19364- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19365- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19366-
19367-/*
19368- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19369- * a certain register (to be used in assembler memory operands).
19370- */
19371-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19372+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19373
19374 #endif
19375
19376@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19377 extern void arch_task_cache_init(void);
19378 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19379 extern void arch_release_task_struct(struct task_struct *tsk);
19380+
19381+#define __HAVE_THREAD_FUNCTIONS
19382+#define task_thread_info(task) (&(task)->tinfo)
19383+#define task_stack_page(task) ((task)->stack)
19384+#define setup_thread_stack(p, org) do {} while (0)
19385+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19386+
19387 #endif
19388 #endif /* _ASM_X86_THREAD_INFO_H */
19389diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19390index 04905bf..1178cdf 100644
19391--- a/arch/x86/include/asm/tlbflush.h
19392+++ b/arch/x86/include/asm/tlbflush.h
19393@@ -17,18 +17,44 @@
19394
19395 static inline void __native_flush_tlb(void)
19396 {
19397+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19398+ u64 descriptor[2];
19399+
19400+ descriptor[0] = PCID_KERNEL;
19401+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19402+ return;
19403+ }
19404+
19405+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19406+ if (static_cpu_has(X86_FEATURE_PCID)) {
19407+ unsigned int cpu = raw_get_cpu();
19408+
19409+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19410+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19411+ raw_put_cpu_no_resched();
19412+ return;
19413+ }
19414+#endif
19415+
19416 native_write_cr3(native_read_cr3());
19417 }
19418
19419 static inline void __native_flush_tlb_global_irq_disabled(void)
19420 {
19421- unsigned long cr4;
19422+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19423+ u64 descriptor[2];
19424
19425- cr4 = native_read_cr4();
19426- /* clear PGE */
19427- native_write_cr4(cr4 & ~X86_CR4_PGE);
19428- /* write old PGE again and flush TLBs */
19429- native_write_cr4(cr4);
19430+ descriptor[0] = PCID_KERNEL;
19431+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19432+ } else {
19433+ unsigned long cr4;
19434+
19435+ cr4 = native_read_cr4();
19436+ /* clear PGE */
19437+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19438+ /* write old PGE again and flush TLBs */
19439+ native_write_cr4(cr4);
19440+ }
19441 }
19442
19443 static inline void __native_flush_tlb_global(void)
19444@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19445
19446 static inline void __native_flush_tlb_single(unsigned long addr)
19447 {
19448+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19449+ u64 descriptor[2];
19450+
19451+ descriptor[0] = PCID_KERNEL;
19452+ descriptor[1] = addr;
19453+
19454+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19455+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19456+ if (addr < TASK_SIZE_MAX)
19457+ descriptor[1] += pax_user_shadow_base;
19458+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19459+ }
19460+
19461+ descriptor[0] = PCID_USER;
19462+ descriptor[1] = addr;
19463+#endif
19464+
19465+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19466+ return;
19467+ }
19468+
19469+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19470+ if (static_cpu_has(X86_FEATURE_PCID)) {
19471+ unsigned int cpu = raw_get_cpu();
19472+
19473+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19474+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19475+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19476+ raw_put_cpu_no_resched();
19477+
19478+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19479+ addr += pax_user_shadow_base;
19480+ }
19481+#endif
19482+
19483 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19484 }
19485
19486diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19487index 0d592e0..7430aad 100644
19488--- a/arch/x86/include/asm/uaccess.h
19489+++ b/arch/x86/include/asm/uaccess.h
19490@@ -7,6 +7,7 @@
19491 #include <linux/compiler.h>
19492 #include <linux/thread_info.h>
19493 #include <linux/string.h>
19494+#include <linux/spinlock.h>
19495 #include <asm/asm.h>
19496 #include <asm/page.h>
19497 #include <asm/smap.h>
19498@@ -29,7 +30,12 @@
19499
19500 #define get_ds() (KERNEL_DS)
19501 #define get_fs() (current_thread_info()->addr_limit)
19502+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19503+void __set_fs(mm_segment_t x);
19504+void set_fs(mm_segment_t x);
19505+#else
19506 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19507+#endif
19508
19509 #define segment_eq(a, b) ((a).seg == (b).seg)
19510
19511@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19512 * checks that the pointer is in the user space range - after calling
19513 * this function, memory access functions may still return -EFAULT.
19514 */
19515-#define access_ok(type, addr, size) \
19516- likely(!__range_not_ok(addr, size, user_addr_max()))
19517+extern int _cond_resched(void);
19518+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19519+#define access_ok(type, addr, size) \
19520+({ \
19521+ unsigned long __size = size; \
19522+ unsigned long __addr = (unsigned long)addr; \
19523+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19524+ if (__ret_ao && __size) { \
19525+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19526+ unsigned long __end_ao = __addr + __size - 1; \
19527+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19528+ while (__addr_ao <= __end_ao) { \
19529+ char __c_ao; \
19530+ __addr_ao += PAGE_SIZE; \
19531+ if (__size > PAGE_SIZE) \
19532+ _cond_resched(); \
19533+ if (__get_user(__c_ao, (char __user *)__addr)) \
19534+ break; \
19535+ if (type != VERIFY_WRITE) { \
19536+ __addr = __addr_ao; \
19537+ continue; \
19538+ } \
19539+ if (__put_user(__c_ao, (char __user *)__addr)) \
19540+ break; \
19541+ __addr = __addr_ao; \
19542+ } \
19543+ } \
19544+ } \
19545+ __ret_ao; \
19546+})
19547
19548 /*
19549 * The exception table consists of pairs of addresses relative to the
19550@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19551 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19552 __chk_user_ptr(ptr); \
19553 might_fault(); \
19554+ pax_open_userland(); \
19555 asm volatile("call __get_user_%P3" \
19556 : "=a" (__ret_gu), "=r" (__val_gu) \
19557 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19558 (x) = (__typeof__(*(ptr))) __val_gu; \
19559+ pax_close_userland(); \
19560 __ret_gu; \
19561 })
19562
19563@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19564 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19565 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19566
19567-
19568+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19569+#define __copyuser_seg "gs;"
19570+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19571+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19572+#else
19573+#define __copyuser_seg
19574+#define __COPYUSER_SET_ES
19575+#define __COPYUSER_RESTORE_ES
19576+#endif
19577
19578 #ifdef CONFIG_X86_32
19579 #define __put_user_asm_u64(x, addr, err, errret) \
19580 asm volatile(ASM_STAC "\n" \
19581- "1: movl %%eax,0(%2)\n" \
19582- "2: movl %%edx,4(%2)\n" \
19583+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19584+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19585 "3: " ASM_CLAC "\n" \
19586 ".section .fixup,\"ax\"\n" \
19587 "4: movl %3,%0\n" \
19588@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19589
19590 #define __put_user_asm_ex_u64(x, addr) \
19591 asm volatile(ASM_STAC "\n" \
19592- "1: movl %%eax,0(%1)\n" \
19593- "2: movl %%edx,4(%1)\n" \
19594+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19595+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19596 "3: " ASM_CLAC "\n" \
19597 _ASM_EXTABLE_EX(1b, 2b) \
19598 _ASM_EXTABLE_EX(2b, 3b) \
19599@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19600 __typeof__(*(ptr)) __pu_val; \
19601 __chk_user_ptr(ptr); \
19602 might_fault(); \
19603- __pu_val = x; \
19604+ __pu_val = (x); \
19605+ pax_open_userland(); \
19606 switch (sizeof(*(ptr))) { \
19607 case 1: \
19608 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19609@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19610 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19611 break; \
19612 } \
19613+ pax_close_userland(); \
19614 __ret_pu; \
19615 })
19616
19617@@ -355,8 +401,10 @@ do { \
19618 } while (0)
19619
19620 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19621+do { \
19622+ pax_open_userland(); \
19623 asm volatile(ASM_STAC "\n" \
19624- "1: mov"itype" %2,%"rtype"1\n" \
19625+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19626 "2: " ASM_CLAC "\n" \
19627 ".section .fixup,\"ax\"\n" \
19628 "3: mov %3,%0\n" \
19629@@ -364,8 +412,10 @@ do { \
19630 " jmp 2b\n" \
19631 ".previous\n" \
19632 _ASM_EXTABLE(1b, 3b) \
19633- : "=r" (err), ltype(x) \
19634- : "m" (__m(addr)), "i" (errret), "0" (err))
19635+ : "=r" (err), ltype (x) \
19636+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19637+ pax_close_userland(); \
19638+} while (0)
19639
19640 #define __get_user_size_ex(x, ptr, size) \
19641 do { \
19642@@ -389,7 +439,7 @@ do { \
19643 } while (0)
19644
19645 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19646- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19647+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19648 "2:\n" \
19649 _ASM_EXTABLE_EX(1b, 2b) \
19650 : ltype(x) : "m" (__m(addr)))
19651@@ -406,13 +456,24 @@ do { \
19652 int __gu_err; \
19653 unsigned long __gu_val; \
19654 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19655- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19656+ (x) = (__typeof__(*(ptr)))__gu_val; \
19657 __gu_err; \
19658 })
19659
19660 /* FIXME: this hack is definitely wrong -AK */
19661 struct __large_struct { unsigned long buf[100]; };
19662-#define __m(x) (*(struct __large_struct __user *)(x))
19663+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19664+#define ____m(x) \
19665+({ \
19666+ unsigned long ____x = (unsigned long)(x); \
19667+ if (____x < pax_user_shadow_base) \
19668+ ____x += pax_user_shadow_base; \
19669+ (typeof(x))____x; \
19670+})
19671+#else
19672+#define ____m(x) (x)
19673+#endif
19674+#define __m(x) (*(struct __large_struct __user *)____m(x))
19675
19676 /*
19677 * Tell gcc we read from memory instead of writing: this is because
19678@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
19679 * aliasing issues.
19680 */
19681 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19682+do { \
19683+ pax_open_userland(); \
19684 asm volatile(ASM_STAC "\n" \
19685- "1: mov"itype" %"rtype"1,%2\n" \
19686+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19687 "2: " ASM_CLAC "\n" \
19688 ".section .fixup,\"ax\"\n" \
19689 "3: mov %3,%0\n" \
19690@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
19691 ".previous\n" \
19692 _ASM_EXTABLE(1b, 3b) \
19693 : "=r"(err) \
19694- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19695+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19696+ pax_close_userland(); \
19697+} while (0)
19698
19699 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19700- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19701+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19702 "2:\n" \
19703 _ASM_EXTABLE_EX(1b, 2b) \
19704 : : ltype(x), "m" (__m(addr)))
19705@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
19706 */
19707 #define uaccess_try do { \
19708 current_thread_info()->uaccess_err = 0; \
19709+ pax_open_userland(); \
19710 stac(); \
19711 barrier();
19712
19713 #define uaccess_catch(err) \
19714 clac(); \
19715+ pax_close_userland(); \
19716 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19717 } while (0)
19718
19719@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
19720 * On error, the variable @x is set to zero.
19721 */
19722
19723+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19724+#define __get_user(x, ptr) get_user((x), (ptr))
19725+#else
19726 #define __get_user(x, ptr) \
19727 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19728+#endif
19729
19730 /**
19731 * __put_user: - Write a simple value into user space, with less checking.
19732@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
19733 * Returns zero on success, or -EFAULT on error.
19734 */
19735
19736+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19737+#define __put_user(x, ptr) put_user((x), (ptr))
19738+#else
19739 #define __put_user(x, ptr) \
19740 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19741+#endif
19742
19743 #define __get_user_unaligned __get_user
19744 #define __put_user_unaligned __put_user
19745@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
19746 #define get_user_ex(x, ptr) do { \
19747 unsigned long __gue_val; \
19748 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19749- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19750+ (x) = (__typeof__(*(ptr)))__gue_val; \
19751 } while (0)
19752
19753 #define put_user_try uaccess_try
19754@@ -531,7 +606,7 @@ extern __must_check long strlen_user(const char __user *str);
19755 extern __must_check long strnlen_user(const char __user *str, long n);
19756
19757 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19758-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19759+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19760
19761 extern void __cmpxchg_wrong_size(void)
19762 __compiletime_error("Bad argument size for cmpxchg");
19763@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
19764 __typeof__(ptr) __uval = (uval); \
19765 __typeof__(*(ptr)) __old = (old); \
19766 __typeof__(*(ptr)) __new = (new); \
19767+ pax_open_userland(); \
19768 switch (size) { \
19769 case 1: \
19770 { \
19771 asm volatile("\t" ASM_STAC "\n" \
19772- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19773+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19774 "2:\t" ASM_CLAC "\n" \
19775 "\t.section .fixup, \"ax\"\n" \
19776 "3:\tmov %3, %0\n" \
19777 "\tjmp 2b\n" \
19778 "\t.previous\n" \
19779 _ASM_EXTABLE(1b, 3b) \
19780- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19781+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19782 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19783 : "memory" \
19784 ); \
19785@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
19786 case 2: \
19787 { \
19788 asm volatile("\t" ASM_STAC "\n" \
19789- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19790+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19791 "2:\t" ASM_CLAC "\n" \
19792 "\t.section .fixup, \"ax\"\n" \
19793 "3:\tmov %3, %0\n" \
19794 "\tjmp 2b\n" \
19795 "\t.previous\n" \
19796 _ASM_EXTABLE(1b, 3b) \
19797- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19798+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19799 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19800 : "memory" \
19801 ); \
19802@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
19803 case 4: \
19804 { \
19805 asm volatile("\t" ASM_STAC "\n" \
19806- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19807+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19808 "2:\t" ASM_CLAC "\n" \
19809 "\t.section .fixup, \"ax\"\n" \
19810 "3:\tmov %3, %0\n" \
19811 "\tjmp 2b\n" \
19812 "\t.previous\n" \
19813 _ASM_EXTABLE(1b, 3b) \
19814- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19815+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19816 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19817 : "memory" \
19818 ); \
19819@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
19820 __cmpxchg_wrong_size(); \
19821 \
19822 asm volatile("\t" ASM_STAC "\n" \
19823- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19824+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19825 "2:\t" ASM_CLAC "\n" \
19826 "\t.section .fixup, \"ax\"\n" \
19827 "3:\tmov %3, %0\n" \
19828 "\tjmp 2b\n" \
19829 "\t.previous\n" \
19830 _ASM_EXTABLE(1b, 3b) \
19831- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19832+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19833 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19834 : "memory" \
19835 ); \
19836@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
19837 default: \
19838 __cmpxchg_wrong_size(); \
19839 } \
19840+ pax_close_userland(); \
19841 *__uval = __old; \
19842 __ret; \
19843 })
19844@@ -636,17 +713,6 @@ extern struct movsl_mask {
19845
19846 #define ARCH_HAS_NOCACHE_UACCESS 1
19847
19848-#ifdef CONFIG_X86_32
19849-# include <asm/uaccess_32.h>
19850-#else
19851-# include <asm/uaccess_64.h>
19852-#endif
19853-
19854-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19855- unsigned n);
19856-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19857- unsigned n);
19858-
19859 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19860 # define copy_user_diag __compiletime_error
19861 #else
19862@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19863 extern void copy_user_diag("copy_from_user() buffer size is too small")
19864 copy_from_user_overflow(void);
19865 extern void copy_user_diag("copy_to_user() buffer size is too small")
19866-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19867+copy_to_user_overflow(void);
19868
19869 #undef copy_user_diag
19870
19871@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19872
19873 extern void
19874 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19875-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19876+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19877 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19878
19879 #else
19880@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19881
19882 #endif
19883
19884+#ifdef CONFIG_X86_32
19885+# include <asm/uaccess_32.h>
19886+#else
19887+# include <asm/uaccess_64.h>
19888+#endif
19889+
19890 static inline unsigned long __must_check
19891 copy_from_user(void *to, const void __user *from, unsigned long n)
19892 {
19893- int sz = __compiletime_object_size(to);
19894+ size_t sz = __compiletime_object_size(to);
19895
19896 might_fault();
19897
19898@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19899 * case, and do only runtime checking for non-constant sizes.
19900 */
19901
19902- if (likely(sz < 0 || sz >= n))
19903- n = _copy_from_user(to, from, n);
19904- else if(__builtin_constant_p(n))
19905- copy_from_user_overflow();
19906- else
19907- __copy_from_user_overflow(sz, n);
19908+ if (likely(sz != (size_t)-1 && sz < n)) {
19909+ if(__builtin_constant_p(n))
19910+ copy_from_user_overflow();
19911+ else
19912+ __copy_from_user_overflow(sz, n);
19913+ } else if (access_ok(VERIFY_READ, from, n))
19914+ n = __copy_from_user(to, from, n);
19915+ else if ((long)n > 0)
19916+ memset(to, 0, n);
19917
19918 return n;
19919 }
19920@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19921 static inline unsigned long __must_check
19922 copy_to_user(void __user *to, const void *from, unsigned long n)
19923 {
19924- int sz = __compiletime_object_size(from);
19925+ size_t sz = __compiletime_object_size(from);
19926
19927 might_fault();
19928
19929 /* See the comment in copy_from_user() above. */
19930- if (likely(sz < 0 || sz >= n))
19931- n = _copy_to_user(to, from, n);
19932- else if(__builtin_constant_p(n))
19933- copy_to_user_overflow();
19934- else
19935- __copy_to_user_overflow(sz, n);
19936+ if (likely(sz != (size_t)-1 && sz < n)) {
19937+ if(__builtin_constant_p(n))
19938+ copy_to_user_overflow();
19939+ else
19940+ __copy_to_user_overflow(sz, n);
19941+ } else if (access_ok(VERIFY_WRITE, to, n))
19942+ n = __copy_to_user(to, from, n);
19943
19944 return n;
19945 }
19946diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19947index 3c03a5d..edb68ae 100644
19948--- a/arch/x86/include/asm/uaccess_32.h
19949+++ b/arch/x86/include/asm/uaccess_32.h
19950@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19951 * anything, so this is accurate.
19952 */
19953
19954-static __always_inline unsigned long __must_check
19955+static __always_inline __size_overflow(3) unsigned long __must_check
19956 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
19957 {
19958+ if ((long)n < 0)
19959+ return n;
19960+
19961+ check_object_size(from, n, true);
19962+
19963 if (__builtin_constant_p(n)) {
19964 unsigned long ret;
19965
19966@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
19967 __copy_to_user(void __user *to, const void *from, unsigned long n)
19968 {
19969 might_fault();
19970+
19971 return __copy_to_user_inatomic(to, from, n);
19972 }
19973
19974-static __always_inline unsigned long
19975+static __always_inline __size_overflow(3) unsigned long
19976 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
19977 {
19978+ if ((long)n < 0)
19979+ return n;
19980+
19981 /* Avoid zeroing the tail if the copy fails..
19982 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
19983 * but as the zeroing behaviour is only significant when n is not
19984@@ -137,6 +146,12 @@ static __always_inline unsigned long
19985 __copy_from_user(void *to, const void __user *from, unsigned long n)
19986 {
19987 might_fault();
19988+
19989+ if ((long)n < 0)
19990+ return n;
19991+
19992+ check_object_size(to, n, false);
19993+
19994 if (__builtin_constant_p(n)) {
19995 unsigned long ret;
19996
19997@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
19998 const void __user *from, unsigned long n)
19999 {
20000 might_fault();
20001+
20002+ if ((long)n < 0)
20003+ return n;
20004+
20005 if (__builtin_constant_p(n)) {
20006 unsigned long ret;
20007
20008@@ -181,7 +200,10 @@ static __always_inline unsigned long
20009 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20010 unsigned long n)
20011 {
20012- return __copy_from_user_ll_nocache_nozero(to, from, n);
20013+ if ((long)n < 0)
20014+ return n;
20015+
20016+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20017 }
20018
20019 #endif /* _ASM_X86_UACCESS_32_H */
20020diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20021index 12a26b9..c36fff5 100644
20022--- a/arch/x86/include/asm/uaccess_64.h
20023+++ b/arch/x86/include/asm/uaccess_64.h
20024@@ -10,6 +10,9 @@
20025 #include <asm/alternative.h>
20026 #include <asm/cpufeature.h>
20027 #include <asm/page.h>
20028+#include <asm/pgtable.h>
20029+
20030+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20031
20032 /*
20033 * Copy To/From Userspace
20034@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20035 __must_check unsigned long
20036 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20037
20038-static __always_inline __must_check unsigned long
20039-copy_user_generic(void *to, const void *from, unsigned len)
20040+static __always_inline __must_check __size_overflow(3) unsigned long
20041+copy_user_generic(void *to, const void *from, unsigned long len)
20042 {
20043 unsigned ret;
20044
20045@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20046 }
20047
20048 __must_check unsigned long
20049-copy_in_user(void __user *to, const void __user *from, unsigned len);
20050+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20051
20052 static __always_inline __must_check
20053-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20054+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20055 {
20056- int ret = 0;
20057+ size_t sz = __compiletime_object_size(dst);
20058+ unsigned ret = 0;
20059+
20060+ if (size > INT_MAX)
20061+ return size;
20062+
20063+ check_object_size(dst, size, false);
20064+
20065+#ifdef CONFIG_PAX_MEMORY_UDEREF
20066+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20067+ return size;
20068+#endif
20069+
20070+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20071+ if(__builtin_constant_p(size))
20072+ copy_from_user_overflow();
20073+ else
20074+ __copy_from_user_overflow(sz, size);
20075+ return size;
20076+ }
20077
20078 if (!__builtin_constant_p(size))
20079- return copy_user_generic(dst, (__force void *)src, size);
20080+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20081 switch (size) {
20082- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20083+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20084 ret, "b", "b", "=q", 1);
20085 return ret;
20086- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20087+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20088 ret, "w", "w", "=r", 2);
20089 return ret;
20090- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20091+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20092 ret, "l", "k", "=r", 4);
20093 return ret;
20094- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20095+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20096 ret, "q", "", "=r", 8);
20097 return ret;
20098 case 10:
20099- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20100+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20101 ret, "q", "", "=r", 10);
20102 if (unlikely(ret))
20103 return ret;
20104 __get_user_asm(*(u16 *)(8 + (char *)dst),
20105- (u16 __user *)(8 + (char __user *)src),
20106+ (const u16 __user *)(8 + (const char __user *)src),
20107 ret, "w", "w", "=r", 2);
20108 return ret;
20109 case 16:
20110- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20111+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20112 ret, "q", "", "=r", 16);
20113 if (unlikely(ret))
20114 return ret;
20115 __get_user_asm(*(u64 *)(8 + (char *)dst),
20116- (u64 __user *)(8 + (char __user *)src),
20117+ (const u64 __user *)(8 + (const char __user *)src),
20118 ret, "q", "", "=r", 8);
20119 return ret;
20120 default:
20121- return copy_user_generic(dst, (__force void *)src, size);
20122+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20123 }
20124 }
20125
20126 static __always_inline __must_check
20127-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20128+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20129 {
20130 might_fault();
20131 return __copy_from_user_nocheck(dst, src, size);
20132 }
20133
20134 static __always_inline __must_check
20135-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20136+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20137 {
20138- int ret = 0;
20139+ size_t sz = __compiletime_object_size(src);
20140+ unsigned ret = 0;
20141+
20142+ if (size > INT_MAX)
20143+ return size;
20144+
20145+ check_object_size(src, size, true);
20146+
20147+#ifdef CONFIG_PAX_MEMORY_UDEREF
20148+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20149+ return size;
20150+#endif
20151+
20152+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20153+ if(__builtin_constant_p(size))
20154+ copy_to_user_overflow();
20155+ else
20156+ __copy_to_user_overflow(sz, size);
20157+ return size;
20158+ }
20159
20160 if (!__builtin_constant_p(size))
20161- return copy_user_generic((__force void *)dst, src, size);
20162+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20163 switch (size) {
20164- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20165+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20166 ret, "b", "b", "iq", 1);
20167 return ret;
20168- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20169+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20170 ret, "w", "w", "ir", 2);
20171 return ret;
20172- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20173+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20174 ret, "l", "k", "ir", 4);
20175 return ret;
20176- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20177+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20178 ret, "q", "", "er", 8);
20179 return ret;
20180 case 10:
20181- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20182+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20183 ret, "q", "", "er", 10);
20184 if (unlikely(ret))
20185 return ret;
20186 asm("":::"memory");
20187- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20188+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20189 ret, "w", "w", "ir", 2);
20190 return ret;
20191 case 16:
20192- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20193+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20194 ret, "q", "", "er", 16);
20195 if (unlikely(ret))
20196 return ret;
20197 asm("":::"memory");
20198- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20199+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20200 ret, "q", "", "er", 8);
20201 return ret;
20202 default:
20203- return copy_user_generic((__force void *)dst, src, size);
20204+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20205 }
20206 }
20207
20208 static __always_inline __must_check
20209-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20210+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20211 {
20212 might_fault();
20213 return __copy_to_user_nocheck(dst, src, size);
20214 }
20215
20216 static __always_inline __must_check
20217-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20218+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20219 {
20220- int ret = 0;
20221+ unsigned ret = 0;
20222
20223 might_fault();
20224+
20225+ if (size > INT_MAX)
20226+ return size;
20227+
20228+#ifdef CONFIG_PAX_MEMORY_UDEREF
20229+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20230+ return size;
20231+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20232+ return size;
20233+#endif
20234+
20235 if (!__builtin_constant_p(size))
20236- return copy_user_generic((__force void *)dst,
20237- (__force void *)src, size);
20238+ return copy_user_generic((__force_kernel void *)____m(dst),
20239+ (__force_kernel const void *)____m(src), size);
20240 switch (size) {
20241 case 1: {
20242 u8 tmp;
20243- __get_user_asm(tmp, (u8 __user *)src,
20244+ __get_user_asm(tmp, (const u8 __user *)src,
20245 ret, "b", "b", "=q", 1);
20246 if (likely(!ret))
20247 __put_user_asm(tmp, (u8 __user *)dst,
20248@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20249 }
20250 case 2: {
20251 u16 tmp;
20252- __get_user_asm(tmp, (u16 __user *)src,
20253+ __get_user_asm(tmp, (const u16 __user *)src,
20254 ret, "w", "w", "=r", 2);
20255 if (likely(!ret))
20256 __put_user_asm(tmp, (u16 __user *)dst,
20257@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20258
20259 case 4: {
20260 u32 tmp;
20261- __get_user_asm(tmp, (u32 __user *)src,
20262+ __get_user_asm(tmp, (const u32 __user *)src,
20263 ret, "l", "k", "=r", 4);
20264 if (likely(!ret))
20265 __put_user_asm(tmp, (u32 __user *)dst,
20266@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20267 }
20268 case 8: {
20269 u64 tmp;
20270- __get_user_asm(tmp, (u64 __user *)src,
20271+ __get_user_asm(tmp, (const u64 __user *)src,
20272 ret, "q", "", "=r", 8);
20273 if (likely(!ret))
20274 __put_user_asm(tmp, (u64 __user *)dst,
20275@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20276 return ret;
20277 }
20278 default:
20279- return copy_user_generic((__force void *)dst,
20280- (__force void *)src, size);
20281+ return copy_user_generic((__force_kernel void *)____m(dst),
20282+ (__force_kernel const void *)____m(src), size);
20283 }
20284 }
20285
20286-static __must_check __always_inline int
20287-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20288+static __must_check __always_inline unsigned long
20289+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20290 {
20291 return __copy_from_user_nocheck(dst, src, size);
20292 }
20293
20294-static __must_check __always_inline int
20295-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20296+static __must_check __always_inline unsigned long
20297+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20298 {
20299 return __copy_to_user_nocheck(dst, src, size);
20300 }
20301
20302-extern long __copy_user_nocache(void *dst, const void __user *src,
20303- unsigned size, int zerorest);
20304+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20305+ unsigned long size, int zerorest);
20306
20307-static inline int
20308-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20309+static inline unsigned long
20310+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20311 {
20312 might_fault();
20313+
20314+ if (size > INT_MAX)
20315+ return size;
20316+
20317+#ifdef CONFIG_PAX_MEMORY_UDEREF
20318+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20319+ return size;
20320+#endif
20321+
20322 return __copy_user_nocache(dst, src, size, 1);
20323 }
20324
20325-static inline int
20326+static inline unsigned long
20327 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20328- unsigned size)
20329+ unsigned long size)
20330 {
20331+ if (size > INT_MAX)
20332+ return size;
20333+
20334+#ifdef CONFIG_PAX_MEMORY_UDEREF
20335+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20336+ return size;
20337+#endif
20338+
20339 return __copy_user_nocache(dst, src, size, 0);
20340 }
20341
20342 unsigned long
20343-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20344+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20345
20346 #endif /* _ASM_X86_UACCESS_64_H */
20347diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20348index 5b238981..77fdd78 100644
20349--- a/arch/x86/include/asm/word-at-a-time.h
20350+++ b/arch/x86/include/asm/word-at-a-time.h
20351@@ -11,7 +11,7 @@
20352 * and shift, for example.
20353 */
20354 struct word_at_a_time {
20355- const unsigned long one_bits, high_bits;
20356+ unsigned long one_bits, high_bits;
20357 };
20358
20359 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20360diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20361index f58a9c7..dc378042a 100644
20362--- a/arch/x86/include/asm/x86_init.h
20363+++ b/arch/x86/include/asm/x86_init.h
20364@@ -129,7 +129,7 @@ struct x86_init_ops {
20365 struct x86_init_timers timers;
20366 struct x86_init_iommu iommu;
20367 struct x86_init_pci pci;
20368-};
20369+} __no_const;
20370
20371 /**
20372 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20373@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20374 void (*setup_percpu_clockev)(void);
20375 void (*early_percpu_clock_init)(void);
20376 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20377-};
20378+} __no_const;
20379
20380 struct timespec;
20381
20382@@ -168,7 +168,7 @@ struct x86_platform_ops {
20383 void (*save_sched_clock_state)(void);
20384 void (*restore_sched_clock_state)(void);
20385 void (*apic_post_init)(void);
20386-};
20387+} __no_const;
20388
20389 struct pci_dev;
20390 struct msi_msg;
20391@@ -182,7 +182,7 @@ struct x86_msi_ops {
20392 void (*teardown_msi_irqs)(struct pci_dev *dev);
20393 void (*restore_msi_irqs)(struct pci_dev *dev);
20394 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20395-};
20396+} __no_const;
20397
20398 struct IO_APIC_route_entry;
20399 struct io_apic_irq_attr;
20400@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20401 unsigned int destination, int vector,
20402 struct io_apic_irq_attr *attr);
20403 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20404-};
20405+} __no_const;
20406
20407 extern struct x86_init_ops x86_init;
20408 extern struct x86_cpuinit_ops x86_cpuinit;
20409diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20410index 5eea099..ff7ef8d 100644
20411--- a/arch/x86/include/asm/xen/page.h
20412+++ b/arch/x86/include/asm/xen/page.h
20413@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20414 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20415 * cases needing an extended handling.
20416 */
20417-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20418+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20419 {
20420 unsigned long mfn;
20421
20422diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20423index 5fa9770..2b49d6c 100644
20424--- a/arch/x86/include/asm/xsave.h
20425+++ b/arch/x86/include/asm/xsave.h
20426@@ -229,12 +229,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20427 if (unlikely(err))
20428 return -EFAULT;
20429
20430+ pax_open_userland();
20431 __asm__ __volatile__(ASM_STAC "\n"
20432- "1:"XSAVE"\n"
20433+ "1:"
20434+ __copyuser_seg
20435+ XSAVE"\n"
20436 "2: " ASM_CLAC "\n"
20437 xstate_fault
20438 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20439 : "memory");
20440+ pax_close_userland();
20441 return err;
20442 }
20443
20444@@ -244,16 +248,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20445 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20446 {
20447 int err = 0;
20448- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20449+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20450 u32 lmask = mask;
20451 u32 hmask = mask >> 32;
20452
20453+ pax_open_userland();
20454 __asm__ __volatile__(ASM_STAC "\n"
20455- "1:"XRSTOR"\n"
20456+ "1:"
20457+ __copyuser_seg
20458+ XRSTOR"\n"
20459 "2: " ASM_CLAC "\n"
20460 xstate_fault
20461 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20462 : "memory"); /* memory required? */
20463+ pax_close_userland();
20464 return err;
20465 }
20466
20467diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20468index d993e33..8db1b18 100644
20469--- a/arch/x86/include/uapi/asm/e820.h
20470+++ b/arch/x86/include/uapi/asm/e820.h
20471@@ -58,7 +58,7 @@ struct e820map {
20472 #define ISA_START_ADDRESS 0xa0000
20473 #define ISA_END_ADDRESS 0x100000
20474
20475-#define BIOS_BEGIN 0x000a0000
20476+#define BIOS_BEGIN 0x000c0000
20477 #define BIOS_END 0x00100000
20478
20479 #define BIOS_ROM_BASE 0xffe00000
20480diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20481index 7b0a55a..ad115bf 100644
20482--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20483+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20484@@ -49,7 +49,6 @@
20485 #define EFLAGS 144
20486 #define RSP 152
20487 #define SS 160
20488-#define ARGOFFSET R11
20489 #endif /* __ASSEMBLY__ */
20490
20491 /* top of stack page */
20492diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20493index 5d4502c..a567e09 100644
20494--- a/arch/x86/kernel/Makefile
20495+++ b/arch/x86/kernel/Makefile
20496@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20497 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20498 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20499 obj-y += probe_roms.o
20500-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20501+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20502 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20503 obj-$(CONFIG_X86_64) += mcount_64.o
20504 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20505diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20506index b5ddc96..490b4e4 100644
20507--- a/arch/x86/kernel/acpi/boot.c
20508+++ b/arch/x86/kernel/acpi/boot.c
20509@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20510 * If your system is blacklisted here, but you find that acpi=force
20511 * works for you, please contact linux-acpi@vger.kernel.org
20512 */
20513-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20514+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20515 /*
20516 * Boxes that need ACPI disabled
20517 */
20518@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20519 };
20520
20521 /* second table for DMI checks that should run after early-quirks */
20522-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20523+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20524 /*
20525 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20526 * which includes some code which overrides all temperature
20527diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20528index 3136820..e2c6577 100644
20529--- a/arch/x86/kernel/acpi/sleep.c
20530+++ b/arch/x86/kernel/acpi/sleep.c
20531@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20532 #else /* CONFIG_64BIT */
20533 #ifdef CONFIG_SMP
20534 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20535+
20536+ pax_open_kernel();
20537 early_gdt_descr.address =
20538 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20539+ pax_close_kernel();
20540+
20541 initial_gs = per_cpu_offset(smp_processor_id());
20542 #endif
20543 initial_code = (unsigned long)wakeup_long64;
20544diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20545index 665c6b7..eae4d56 100644
20546--- a/arch/x86/kernel/acpi/wakeup_32.S
20547+++ b/arch/x86/kernel/acpi/wakeup_32.S
20548@@ -29,13 +29,11 @@ wakeup_pmode_return:
20549 # and restore the stack ... but you need gdt for this to work
20550 movl saved_context_esp, %esp
20551
20552- movl %cs:saved_magic, %eax
20553- cmpl $0x12345678, %eax
20554+ cmpl $0x12345678, saved_magic
20555 jne bogus_magic
20556
20557 # jump to place where we left off
20558- movl saved_eip, %eax
20559- jmp *%eax
20560+ jmp *(saved_eip)
20561
20562 bogus_magic:
20563 jmp bogus_magic
20564diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20565index 703130f..27a155d 100644
20566--- a/arch/x86/kernel/alternative.c
20567+++ b/arch/x86/kernel/alternative.c
20568@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20569 */
20570 for (a = start; a < end; a++) {
20571 instr = (u8 *)&a->instr_offset + a->instr_offset;
20572+
20573+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20574+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20575+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20576+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20577+#endif
20578+
20579 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20580 BUG_ON(a->replacementlen > a->instrlen);
20581 BUG_ON(a->instrlen > sizeof(insnbuf));
20582@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20583 add_nops(insnbuf + a->replacementlen,
20584 a->instrlen - a->replacementlen);
20585
20586+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20587+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20588+ instr = ktva_ktla(instr);
20589+#endif
20590+
20591 text_poke_early(instr, insnbuf, a->instrlen);
20592 }
20593 }
20594@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20595 for (poff = start; poff < end; poff++) {
20596 u8 *ptr = (u8 *)poff + *poff;
20597
20598+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20599+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20600+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20601+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20602+#endif
20603+
20604 if (!*poff || ptr < text || ptr >= text_end)
20605 continue;
20606 /* turn DS segment override prefix into lock prefix */
20607- if (*ptr == 0x3e)
20608+ if (*ktla_ktva(ptr) == 0x3e)
20609 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20610 }
20611 mutex_unlock(&text_mutex);
20612@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20613 for (poff = start; poff < end; poff++) {
20614 u8 *ptr = (u8 *)poff + *poff;
20615
20616+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20617+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20618+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20619+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20620+#endif
20621+
20622 if (!*poff || ptr < text || ptr >= text_end)
20623 continue;
20624 /* turn lock prefix into DS segment override prefix */
20625- if (*ptr == 0xf0)
20626+ if (*ktla_ktva(ptr) == 0xf0)
20627 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20628 }
20629 mutex_unlock(&text_mutex);
20630@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20631
20632 BUG_ON(p->len > MAX_PATCH_LEN);
20633 /* prep the buffer with the original instructions */
20634- memcpy(insnbuf, p->instr, p->len);
20635+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20636 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20637 (unsigned long)p->instr, p->len);
20638
20639@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20640 if (!uniproc_patched || num_possible_cpus() == 1)
20641 free_init_pages("SMP alternatives",
20642 (unsigned long)__smp_locks,
20643- (unsigned long)__smp_locks_end);
20644+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20645 #endif
20646
20647 apply_paravirt(__parainstructions, __parainstructions_end);
20648@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20649 * instructions. And on the local CPU you need to be protected again NMI or MCE
20650 * handlers seeing an inconsistent instruction while you patch.
20651 */
20652-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20653+void *__kprobes text_poke_early(void *addr, const void *opcode,
20654 size_t len)
20655 {
20656 unsigned long flags;
20657 local_irq_save(flags);
20658- memcpy(addr, opcode, len);
20659+
20660+ pax_open_kernel();
20661+ memcpy(ktla_ktva(addr), opcode, len);
20662 sync_core();
20663+ pax_close_kernel();
20664+
20665 local_irq_restore(flags);
20666 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20667 that causes hangs on some VIA CPUs. */
20668@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20669 */
20670 void *text_poke(void *addr, const void *opcode, size_t len)
20671 {
20672- unsigned long flags;
20673- char *vaddr;
20674+ unsigned char *vaddr = ktla_ktva(addr);
20675 struct page *pages[2];
20676- int i;
20677+ size_t i;
20678
20679 if (!core_kernel_text((unsigned long)addr)) {
20680- pages[0] = vmalloc_to_page(addr);
20681- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20682+ pages[0] = vmalloc_to_page(vaddr);
20683+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20684 } else {
20685- pages[0] = virt_to_page(addr);
20686+ pages[0] = virt_to_page(vaddr);
20687 WARN_ON(!PageReserved(pages[0]));
20688- pages[1] = virt_to_page(addr + PAGE_SIZE);
20689+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20690 }
20691 BUG_ON(!pages[0]);
20692- local_irq_save(flags);
20693- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20694- if (pages[1])
20695- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20696- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20697- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20698- clear_fixmap(FIX_TEXT_POKE0);
20699- if (pages[1])
20700- clear_fixmap(FIX_TEXT_POKE1);
20701- local_flush_tlb();
20702- sync_core();
20703- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20704- that causes hangs on some VIA CPUs. */
20705+ text_poke_early(addr, opcode, len);
20706 for (i = 0; i < len; i++)
20707- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20708- local_irq_restore(flags);
20709+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20710 return addr;
20711 }
20712
20713@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20714 if (likely(!bp_patching_in_progress))
20715 return 0;
20716
20717- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20718+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20719 return 0;
20720
20721 /* set up the specified breakpoint handler */
20722@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20723 */
20724 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20725 {
20726- unsigned char int3 = 0xcc;
20727+ const unsigned char int3 = 0xcc;
20728
20729 bp_int3_handler = handler;
20730 bp_int3_addr = (u8 *)addr + sizeof(int3);
20731diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20732index 29b5b18..3bdfc29 100644
20733--- a/arch/x86/kernel/apic/apic.c
20734+++ b/arch/x86/kernel/apic/apic.c
20735@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20736 /*
20737 * Debug level, exported for io_apic.c
20738 */
20739-unsigned int apic_verbosity;
20740+int apic_verbosity;
20741
20742 int pic_mode;
20743
20744@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20745 apic_write(APIC_ESR, 0);
20746 v = apic_read(APIC_ESR);
20747 ack_APIC_irq();
20748- atomic_inc(&irq_err_count);
20749+ atomic_inc_unchecked(&irq_err_count);
20750
20751 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20752 smp_processor_id(), v);
20753diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20754index de918c4..32eed23 100644
20755--- a/arch/x86/kernel/apic/apic_flat_64.c
20756+++ b/arch/x86/kernel/apic/apic_flat_64.c
20757@@ -154,7 +154,7 @@ static int flat_probe(void)
20758 return 1;
20759 }
20760
20761-static struct apic apic_flat = {
20762+static struct apic apic_flat __read_only = {
20763 .name = "flat",
20764 .probe = flat_probe,
20765 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20766@@ -260,7 +260,7 @@ static int physflat_probe(void)
20767 return 0;
20768 }
20769
20770-static struct apic apic_physflat = {
20771+static struct apic apic_physflat __read_only = {
20772
20773 .name = "physical flat",
20774 .probe = physflat_probe,
20775diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20776index b205cdb..d8503ff 100644
20777--- a/arch/x86/kernel/apic/apic_noop.c
20778+++ b/arch/x86/kernel/apic/apic_noop.c
20779@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20780 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20781 }
20782
20783-struct apic apic_noop = {
20784+struct apic apic_noop __read_only = {
20785 .name = "noop",
20786 .probe = noop_probe,
20787 .acpi_madt_oem_check = NULL,
20788diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20789index c4a8d63..fe893ac 100644
20790--- a/arch/x86/kernel/apic/bigsmp_32.c
20791+++ b/arch/x86/kernel/apic/bigsmp_32.c
20792@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20793 return dmi_bigsmp;
20794 }
20795
20796-static struct apic apic_bigsmp = {
20797+static struct apic apic_bigsmp __read_only = {
20798
20799 .name = "bigsmp",
20800 .probe = probe_bigsmp,
20801diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20802index 3f5f604..309c0e6 100644
20803--- a/arch/x86/kernel/apic/io_apic.c
20804+++ b/arch/x86/kernel/apic/io_apic.c
20805@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20806 return ret;
20807 }
20808
20809-atomic_t irq_mis_count;
20810+atomic_unchecked_t irq_mis_count;
20811
20812 #ifdef CONFIG_GENERIC_PENDING_IRQ
20813 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20814@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
20815 * at the cpu.
20816 */
20817 if (!(v & (1 << (i & 0x1f)))) {
20818- atomic_inc(&irq_mis_count);
20819+ atomic_inc_unchecked(&irq_mis_count);
20820
20821 eoi_ioapic_irq(irq, cfg);
20822 }
20823diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20824index bda4886..f9c7195 100644
20825--- a/arch/x86/kernel/apic/probe_32.c
20826+++ b/arch/x86/kernel/apic/probe_32.c
20827@@ -72,7 +72,7 @@ static int probe_default(void)
20828 return 1;
20829 }
20830
20831-static struct apic apic_default = {
20832+static struct apic apic_default __read_only = {
20833
20834 .name = "default",
20835 .probe = probe_default,
20836diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20837index 6cedd79..023ff8e 100644
20838--- a/arch/x86/kernel/apic/vector.c
20839+++ b/arch/x86/kernel/apic/vector.c
20840@@ -21,7 +21,7 @@
20841
20842 static DEFINE_RAW_SPINLOCK(vector_lock);
20843
20844-void lock_vector_lock(void)
20845+void lock_vector_lock(void) __acquires(vector_lock)
20846 {
20847 /* Used to the online set of cpus does not change
20848 * during assign_irq_vector.
20849@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20850 raw_spin_lock(&vector_lock);
20851 }
20852
20853-void unlock_vector_lock(void)
20854+void unlock_vector_lock(void) __releases(vector_lock)
20855 {
20856 raw_spin_unlock(&vector_lock);
20857 }
20858diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20859index e658f21..b695a1a 100644
20860--- a/arch/x86/kernel/apic/x2apic_cluster.c
20861+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20862@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20863 return notifier_from_errno(err);
20864 }
20865
20866-static struct notifier_block __refdata x2apic_cpu_notifier = {
20867+static struct notifier_block x2apic_cpu_notifier = {
20868 .notifier_call = update_clusterinfo,
20869 };
20870
20871@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20872 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20873 }
20874
20875-static struct apic apic_x2apic_cluster = {
20876+static struct apic apic_x2apic_cluster __read_only = {
20877
20878 .name = "cluster x2apic",
20879 .probe = x2apic_cluster_probe,
20880diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20881index 6fae733..5ca17af 100644
20882--- a/arch/x86/kernel/apic/x2apic_phys.c
20883+++ b/arch/x86/kernel/apic/x2apic_phys.c
20884@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20885 return apic == &apic_x2apic_phys;
20886 }
20887
20888-static struct apic apic_x2apic_phys = {
20889+static struct apic apic_x2apic_phys __read_only = {
20890
20891 .name = "physical x2apic",
20892 .probe = x2apic_phys_probe,
20893diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20894index 8e9dcfd..c61b3e4 100644
20895--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20896+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20897@@ -348,7 +348,7 @@ static int uv_probe(void)
20898 return apic == &apic_x2apic_uv_x;
20899 }
20900
20901-static struct apic __refdata apic_x2apic_uv_x = {
20902+static struct apic apic_x2apic_uv_x __read_only = {
20903
20904 .name = "UV large system",
20905 .probe = uv_probe,
20906diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20907index 927ec92..0dc3bd4 100644
20908--- a/arch/x86/kernel/apm_32.c
20909+++ b/arch/x86/kernel/apm_32.c
20910@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20911 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20912 * even though they are called in protected mode.
20913 */
20914-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20915+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20916 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20917
20918 static const char driver_version[] = "1.16ac"; /* no spaces */
20919@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20920 BUG_ON(cpu != 0);
20921 gdt = get_cpu_gdt_table(cpu);
20922 save_desc_40 = gdt[0x40 / 8];
20923+
20924+ pax_open_kernel();
20925 gdt[0x40 / 8] = bad_bios_desc;
20926+ pax_close_kernel();
20927
20928 apm_irq_save(flags);
20929 APM_DO_SAVE_SEGS;
20930@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
20931 &call->esi);
20932 APM_DO_RESTORE_SEGS;
20933 apm_irq_restore(flags);
20934+
20935+ pax_open_kernel();
20936 gdt[0x40 / 8] = save_desc_40;
20937+ pax_close_kernel();
20938+
20939 put_cpu();
20940
20941 return call->eax & 0xff;
20942@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
20943 BUG_ON(cpu != 0);
20944 gdt = get_cpu_gdt_table(cpu);
20945 save_desc_40 = gdt[0x40 / 8];
20946+
20947+ pax_open_kernel();
20948 gdt[0x40 / 8] = bad_bios_desc;
20949+ pax_close_kernel();
20950
20951 apm_irq_save(flags);
20952 APM_DO_SAVE_SEGS;
20953@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
20954 &call->eax);
20955 APM_DO_RESTORE_SEGS;
20956 apm_irq_restore(flags);
20957+
20958+ pax_open_kernel();
20959 gdt[0x40 / 8] = save_desc_40;
20960+ pax_close_kernel();
20961+
20962 put_cpu();
20963 return error;
20964 }
20965@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
20966 * code to that CPU.
20967 */
20968 gdt = get_cpu_gdt_table(0);
20969+
20970+ pax_open_kernel();
20971 set_desc_base(&gdt[APM_CS >> 3],
20972 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
20973 set_desc_base(&gdt[APM_CS_16 >> 3],
20974 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
20975 set_desc_base(&gdt[APM_DS >> 3],
20976 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
20977+ pax_close_kernel();
20978
20979 proc_create("apm", 0, NULL, &apm_file_ops);
20980
20981diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
20982index 9f6b934..cf5ffb3 100644
20983--- a/arch/x86/kernel/asm-offsets.c
20984+++ b/arch/x86/kernel/asm-offsets.c
20985@@ -32,6 +32,8 @@ void common(void) {
20986 OFFSET(TI_flags, thread_info, flags);
20987 OFFSET(TI_status, thread_info, status);
20988 OFFSET(TI_addr_limit, thread_info, addr_limit);
20989+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
20990+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
20991
20992 BLANK();
20993 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
20994@@ -52,8 +54,26 @@ void common(void) {
20995 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
20996 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
20997 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
20998+
20999+#ifdef CONFIG_PAX_KERNEXEC
21000+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21001 #endif
21002
21003+#ifdef CONFIG_PAX_MEMORY_UDEREF
21004+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21005+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21006+#ifdef CONFIG_X86_64
21007+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21008+#endif
21009+#endif
21010+
21011+#endif
21012+
21013+ BLANK();
21014+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21015+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21016+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21017+
21018 #ifdef CONFIG_XEN
21019 BLANK();
21020 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21021diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21022index fdcbb4d..036dd93 100644
21023--- a/arch/x86/kernel/asm-offsets_64.c
21024+++ b/arch/x86/kernel/asm-offsets_64.c
21025@@ -80,6 +80,7 @@ int main(void)
21026 BLANK();
21027 #undef ENTRY
21028
21029+ DEFINE(TSS_size, sizeof(struct tss_struct));
21030 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21031 BLANK();
21032
21033diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21034index 80091ae..0c5184f 100644
21035--- a/arch/x86/kernel/cpu/Makefile
21036+++ b/arch/x86/kernel/cpu/Makefile
21037@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21038 CFLAGS_REMOVE_perf_event.o = -pg
21039 endif
21040
21041-# Make sure load_percpu_segment has no stackprotector
21042-nostackp := $(call cc-option, -fno-stack-protector)
21043-CFLAGS_common.o := $(nostackp)
21044-
21045 obj-y := intel_cacheinfo.o scattered.o topology.o
21046 obj-y += common.o
21047 obj-y += rdrand.o
21048diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21049index 15c5df9..d9a604a 100644
21050--- a/arch/x86/kernel/cpu/amd.c
21051+++ b/arch/x86/kernel/cpu/amd.c
21052@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21053 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21054 {
21055 /* AMD errata T13 (order #21922) */
21056- if ((c->x86 == 6)) {
21057+ if (c->x86 == 6) {
21058 /* Duron Rev A0 */
21059 if (c->x86_model == 3 && c->x86_mask == 0)
21060 size = 64;
21061diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21062index c604965..0b0e28a 100644
21063--- a/arch/x86/kernel/cpu/common.c
21064+++ b/arch/x86/kernel/cpu/common.c
21065@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21066
21067 static const struct cpu_dev *this_cpu = &default_cpu;
21068
21069-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21070-#ifdef CONFIG_X86_64
21071- /*
21072- * We need valid kernel segments for data and code in long mode too
21073- * IRET will check the segment types kkeil 2000/10/28
21074- * Also sysret mandates a special GDT layout
21075- *
21076- * TLS descriptors are currently at a different place compared to i386.
21077- * Hopefully nobody expects them at a fixed place (Wine?)
21078- */
21079- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21080- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21081- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21082- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21083- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21084- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21085-#else
21086- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21087- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21088- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21089- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21090- /*
21091- * Segments used for calling PnP BIOS have byte granularity.
21092- * They code segments and data segments have fixed 64k limits,
21093- * the transfer segment sizes are set at run time.
21094- */
21095- /* 32-bit code */
21096- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21097- /* 16-bit code */
21098- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21099- /* 16-bit data */
21100- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21101- /* 16-bit data */
21102- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21103- /* 16-bit data */
21104- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21105- /*
21106- * The APM segments have byte granularity and their bases
21107- * are set at run time. All have 64k limits.
21108- */
21109- /* 32-bit code */
21110- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21111- /* 16-bit code */
21112- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21113- /* data */
21114- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21115-
21116- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21117- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21118- GDT_STACK_CANARY_INIT
21119-#endif
21120-} };
21121-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21122-
21123 static int __init x86_xsave_setup(char *s)
21124 {
21125 if (strlen(s))
21126@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21127 }
21128 }
21129
21130+#ifdef CONFIG_X86_64
21131+static __init int setup_disable_pcid(char *arg)
21132+{
21133+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21134+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21135+
21136+#ifdef CONFIG_PAX_MEMORY_UDEREF
21137+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21138+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21139+#endif
21140+
21141+ return 1;
21142+}
21143+__setup("nopcid", setup_disable_pcid);
21144+
21145+static void setup_pcid(struct cpuinfo_x86 *c)
21146+{
21147+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21148+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21149+
21150+#ifdef CONFIG_PAX_MEMORY_UDEREF
21151+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21152+ pax_open_kernel();
21153+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21154+ pax_close_kernel();
21155+ printk("PAX: slow and weak UDEREF enabled\n");
21156+ } else
21157+ printk("PAX: UDEREF disabled\n");
21158+#endif
21159+
21160+ return;
21161+ }
21162+
21163+ printk("PAX: PCID detected\n");
21164+ set_in_cr4(X86_CR4_PCIDE);
21165+
21166+#ifdef CONFIG_PAX_MEMORY_UDEREF
21167+ pax_open_kernel();
21168+ clone_pgd_mask = ~(pgdval_t)0UL;
21169+ pax_close_kernel();
21170+ if (pax_user_shadow_base)
21171+ printk("PAX: weak UDEREF enabled\n");
21172+ else {
21173+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21174+ printk("PAX: strong UDEREF enabled\n");
21175+ }
21176+#endif
21177+
21178+ if (cpu_has(c, X86_FEATURE_INVPCID))
21179+ printk("PAX: INVPCID detected\n");
21180+}
21181+#endif
21182+
21183 /*
21184 * Some CPU features depend on higher CPUID levels, which may not always
21185 * be available due to CPUID level capping or broken virtualization
21186@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
21187 {
21188 struct desc_ptr gdt_descr;
21189
21190- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21191+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21192 gdt_descr.size = GDT_SIZE - 1;
21193 load_gdt(&gdt_descr);
21194 /* Reload the per-cpu base */
21195@@ -895,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21196 setup_smep(c);
21197 setup_smap(c);
21198
21199+#ifdef CONFIG_X86_64
21200+ setup_pcid(c);
21201+#endif
21202+
21203 /*
21204 * The vendor-specific functions might have changed features.
21205 * Now we do "generic changes."
21206@@ -903,6 +906,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21207 /* Filter out anything that depends on CPUID levels we don't have */
21208 filter_cpuid_features(c, true);
21209
21210+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21211+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21212+#endif
21213+
21214 /* If the model name is still unset, do table lookup. */
21215 if (!c->x86_model_id[0]) {
21216 const char *p;
21217@@ -977,7 +984,7 @@ static void syscall32_cpu_init(void)
21218 void enable_sep_cpu(void)
21219 {
21220 int cpu = get_cpu();
21221- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21222+ struct tss_struct *tss = init_tss + cpu;
21223
21224 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21225 put_cpu();
21226@@ -1115,14 +1122,16 @@ static __init int setup_disablecpuid(char *arg)
21227 }
21228 __setup("clearcpuid=", setup_disablecpuid);
21229
21230+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21231+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21232+
21233 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21234- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21235+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21236 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21237
21238 #ifdef CONFIG_X86_64
21239-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21240-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21241- (unsigned long) debug_idt_table };
21242+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21243+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21244
21245 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21246 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21247@@ -1299,7 +1308,7 @@ void cpu_init(void)
21248 */
21249 load_ucode_ap();
21250
21251- t = &per_cpu(init_tss, cpu);
21252+ t = init_tss + cpu;
21253 oist = &per_cpu(orig_ist, cpu);
21254
21255 #ifdef CONFIG_NUMA
21256@@ -1331,7 +1340,6 @@ void cpu_init(void)
21257 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21258 barrier();
21259
21260- x86_configure_nx();
21261 enable_x2apic();
21262
21263 /*
21264@@ -1383,7 +1391,7 @@ void cpu_init(void)
21265 {
21266 int cpu = smp_processor_id();
21267 struct task_struct *curr = current;
21268- struct tss_struct *t = &per_cpu(init_tss, cpu);
21269+ struct tss_struct *t = init_tss + cpu;
21270 struct thread_struct *thread = &curr->thread;
21271
21272 wait_for_master_cpu(cpu);
21273diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21274index c703507..28535e3 100644
21275--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21276+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21277@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21278 };
21279
21280 #ifdef CONFIG_AMD_NB
21281+static struct attribute *default_attrs_amd_nb[] = {
21282+ &type.attr,
21283+ &level.attr,
21284+ &coherency_line_size.attr,
21285+ &physical_line_partition.attr,
21286+ &ways_of_associativity.attr,
21287+ &number_of_sets.attr,
21288+ &size.attr,
21289+ &shared_cpu_map.attr,
21290+ &shared_cpu_list.attr,
21291+ NULL,
21292+ NULL,
21293+ NULL,
21294+ NULL
21295+};
21296+
21297 static struct attribute **amd_l3_attrs(void)
21298 {
21299 static struct attribute **attrs;
21300@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21301
21302 n = ARRAY_SIZE(default_attrs);
21303
21304- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21305- n += 2;
21306-
21307- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21308- n += 1;
21309-
21310- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21311- if (attrs == NULL)
21312- return attrs = default_attrs;
21313-
21314- for (n = 0; default_attrs[n]; n++)
21315- attrs[n] = default_attrs[n];
21316+ attrs = default_attrs_amd_nb;
21317
21318 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21319 attrs[n++] = &cache_disable_0.attr;
21320@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21321 .default_attrs = default_attrs,
21322 };
21323
21324+#ifdef CONFIG_AMD_NB
21325+static struct kobj_type ktype_cache_amd_nb = {
21326+ .sysfs_ops = &sysfs_ops,
21327+ .default_attrs = default_attrs_amd_nb,
21328+};
21329+#endif
21330+
21331 static struct kobj_type ktype_percpu_entry = {
21332 .sysfs_ops = &sysfs_ops,
21333 };
21334@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21335 return retval;
21336 }
21337
21338+#ifdef CONFIG_AMD_NB
21339+ amd_l3_attrs();
21340+#endif
21341+
21342 for (i = 0; i < num_cache_leaves; i++) {
21343+ struct kobj_type *ktype;
21344+
21345 this_object = INDEX_KOBJECT_PTR(cpu, i);
21346 this_object->cpu = cpu;
21347 this_object->index = i;
21348
21349 this_leaf = CPUID4_INFO_IDX(cpu, i);
21350
21351- ktype_cache.default_attrs = default_attrs;
21352+ ktype = &ktype_cache;
21353 #ifdef CONFIG_AMD_NB
21354 if (this_leaf->base.nb)
21355- ktype_cache.default_attrs = amd_l3_attrs();
21356+ ktype = &ktype_cache_amd_nb;
21357 #endif
21358 retval = kobject_init_and_add(&(this_object->kobj),
21359- &ktype_cache,
21360+ ktype,
21361 per_cpu(ici_cache_kobject, cpu),
21362 "index%1lu", i);
21363 if (unlikely(retval)) {
21364diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21365index d2c6116..62fd7aa 100644
21366--- a/arch/x86/kernel/cpu/mcheck/mce.c
21367+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21368@@ -45,6 +45,7 @@
21369 #include <asm/processor.h>
21370 #include <asm/mce.h>
21371 #include <asm/msr.h>
21372+#include <asm/local.h>
21373
21374 #include "mce-internal.h"
21375
21376@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21377 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21378 m->cs, m->ip);
21379
21380- if (m->cs == __KERNEL_CS)
21381+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21382 print_symbol("{%s}", m->ip);
21383 pr_cont("\n");
21384 }
21385@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21386
21387 #define PANIC_TIMEOUT 5 /* 5 seconds */
21388
21389-static atomic_t mce_panicked;
21390+static atomic_unchecked_t mce_panicked;
21391
21392 static int fake_panic;
21393-static atomic_t mce_fake_panicked;
21394+static atomic_unchecked_t mce_fake_panicked;
21395
21396 /* Panic in progress. Enable interrupts and wait for final IPI */
21397 static void wait_for_panic(void)
21398@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21399 /*
21400 * Make sure only one CPU runs in machine check panic
21401 */
21402- if (atomic_inc_return(&mce_panicked) > 1)
21403+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21404 wait_for_panic();
21405 barrier();
21406
21407@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21408 console_verbose();
21409 } else {
21410 /* Don't log too much for fake panic */
21411- if (atomic_inc_return(&mce_fake_panicked) > 1)
21412+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21413 return;
21414 }
21415 /* First print corrected ones that are still unlogged */
21416@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21417 if (!fake_panic) {
21418 if (panic_timeout == 0)
21419 panic_timeout = mca_cfg.panic_timeout;
21420- panic(msg);
21421+ panic("%s", msg);
21422 } else
21423 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21424 }
21425@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
21426 * might have been modified by someone else.
21427 */
21428 rmb();
21429- if (atomic_read(&mce_panicked))
21430+ if (atomic_read_unchecked(&mce_panicked))
21431 wait_for_panic();
21432 if (!mca_cfg.monarch_timeout)
21433 goto out;
21434@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21435 }
21436
21437 /* Call the installed machine check handler for this CPU setup. */
21438-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21439+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21440 unexpected_machine_check;
21441
21442 /*
21443@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21444 return;
21445 }
21446
21447+ pax_open_kernel();
21448 machine_check_vector = do_machine_check;
21449+ pax_close_kernel();
21450
21451 __mcheck_cpu_init_generic();
21452 __mcheck_cpu_init_vendor(c);
21453@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21454 */
21455
21456 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21457-static int mce_chrdev_open_count; /* #times opened */
21458+static local_t mce_chrdev_open_count; /* #times opened */
21459 static int mce_chrdev_open_exclu; /* already open exclusive? */
21460
21461 static int mce_chrdev_open(struct inode *inode, struct file *file)
21462@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21463 spin_lock(&mce_chrdev_state_lock);
21464
21465 if (mce_chrdev_open_exclu ||
21466- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21467+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21468 spin_unlock(&mce_chrdev_state_lock);
21469
21470 return -EBUSY;
21471@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21472
21473 if (file->f_flags & O_EXCL)
21474 mce_chrdev_open_exclu = 1;
21475- mce_chrdev_open_count++;
21476+ local_inc(&mce_chrdev_open_count);
21477
21478 spin_unlock(&mce_chrdev_state_lock);
21479
21480@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21481 {
21482 spin_lock(&mce_chrdev_state_lock);
21483
21484- mce_chrdev_open_count--;
21485+ local_dec(&mce_chrdev_open_count);
21486 mce_chrdev_open_exclu = 0;
21487
21488 spin_unlock(&mce_chrdev_state_lock);
21489@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
21490
21491 for (i = 0; i < mca_cfg.banks; i++) {
21492 struct mce_bank *b = &mce_banks[i];
21493- struct device_attribute *a = &b->attr;
21494+ device_attribute_no_const *a = &b->attr;
21495
21496 sysfs_attr_init(&a->attr);
21497 a->attr.name = b->attrname;
21498@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
21499 static void mce_reset(void)
21500 {
21501 cpu_missing = 0;
21502- atomic_set(&mce_fake_panicked, 0);
21503+ atomic_set_unchecked(&mce_fake_panicked, 0);
21504 atomic_set(&mce_executing, 0);
21505 atomic_set(&mce_callin, 0);
21506 atomic_set(&global_nwo, 0);
21507diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21508index a304298..49b6d06 100644
21509--- a/arch/x86/kernel/cpu/mcheck/p5.c
21510+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21511@@ -10,6 +10,7 @@
21512 #include <asm/processor.h>
21513 #include <asm/mce.h>
21514 #include <asm/msr.h>
21515+#include <asm/pgtable.h>
21516
21517 /* By default disabled */
21518 int mce_p5_enabled __read_mostly;
21519@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21520 if (!cpu_has(c, X86_FEATURE_MCE))
21521 return;
21522
21523+ pax_open_kernel();
21524 machine_check_vector = pentium_machine_check;
21525+ pax_close_kernel();
21526 /* Make sure the vector pointer is visible before we enable MCEs: */
21527 wmb();
21528
21529diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21530index 7dc5564..1273569 100644
21531--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21532+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21533@@ -9,6 +9,7 @@
21534 #include <asm/processor.h>
21535 #include <asm/mce.h>
21536 #include <asm/msr.h>
21537+#include <asm/pgtable.h>
21538
21539 /* Machine check handler for WinChip C6: */
21540 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21541@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21542 {
21543 u32 lo, hi;
21544
21545+ pax_open_kernel();
21546 machine_check_vector = winchip_machine_check;
21547+ pax_close_kernel();
21548 /* Make sure the vector pointer is visible before we enable MCEs: */
21549 wmb();
21550
21551diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21552index 36a8361..e7058c2 100644
21553--- a/arch/x86/kernel/cpu/microcode/core.c
21554+++ b/arch/x86/kernel/cpu/microcode/core.c
21555@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21556 return NOTIFY_OK;
21557 }
21558
21559-static struct notifier_block __refdata mc_cpu_notifier = {
21560+static struct notifier_block mc_cpu_notifier = {
21561 .notifier_call = mc_cpu_callback,
21562 };
21563
21564diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21565index c6826d1..8dc677e 100644
21566--- a/arch/x86/kernel/cpu/microcode/intel.c
21567+++ b/arch/x86/kernel/cpu/microcode/intel.c
21568@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
21569 struct microcode_header_intel mc_header;
21570 unsigned int mc_size;
21571
21572+ if (leftover < sizeof(mc_header)) {
21573+ pr_err("error! Truncated header in microcode data file\n");
21574+ break;
21575+ }
21576+
21577 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
21578 break;
21579
21580@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21581
21582 static int get_ucode_user(void *to, const void *from, size_t n)
21583 {
21584- return copy_from_user(to, from, n);
21585+ return copy_from_user(to, (const void __force_user *)from, n);
21586 }
21587
21588 static enum ucode_state
21589 request_microcode_user(int cpu, const void __user *buf, size_t size)
21590 {
21591- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21592+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21593 }
21594
21595 static void microcode_fini_cpu(int cpu)
21596diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
21597index ec9df6f..420eb93 100644
21598--- a/arch/x86/kernel/cpu/microcode/intel_early.c
21599+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
21600@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
21601 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
21602 int i;
21603
21604- while (leftover) {
21605+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
21606+
21607+ if (leftover < sizeof(mc_header))
21608+ break;
21609+
21610 mc_header = (struct microcode_header_intel *)ucode_ptr;
21611
21612 mc_size = get_totalsize(mc_header);
21613diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21614index ea5f363..cb0e905 100644
21615--- a/arch/x86/kernel/cpu/mtrr/main.c
21616+++ b/arch/x86/kernel/cpu/mtrr/main.c
21617@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21618 u64 size_or_mask, size_and_mask;
21619 static bool mtrr_aps_delayed_init;
21620
21621-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21622+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21623
21624 const struct mtrr_ops *mtrr_if;
21625
21626diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21627index df5e41f..816c719 100644
21628--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21629+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21630@@ -25,7 +25,7 @@ struct mtrr_ops {
21631 int (*validate_add_page)(unsigned long base, unsigned long size,
21632 unsigned int type);
21633 int (*have_wrcomb)(void);
21634-};
21635+} __do_const;
21636
21637 extern int generic_get_free_region(unsigned long base, unsigned long size,
21638 int replace_reg);
21639diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21640index 143e5f5..5825081 100644
21641--- a/arch/x86/kernel/cpu/perf_event.c
21642+++ b/arch/x86/kernel/cpu/perf_event.c
21643@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
21644
21645 }
21646
21647-static struct attribute_group x86_pmu_format_group = {
21648+static attribute_group_no_const x86_pmu_format_group = {
21649 .name = "format",
21650 .attrs = NULL,
21651 };
21652@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
21653 NULL,
21654 };
21655
21656-static struct attribute_group x86_pmu_events_group = {
21657+static attribute_group_no_const x86_pmu_events_group = {
21658 .name = "events",
21659 .attrs = events_attr,
21660 };
21661@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
21662 if (idx > GDT_ENTRIES)
21663 return 0;
21664
21665- desc = raw_cpu_ptr(gdt_page.gdt);
21666+ desc = get_cpu_gdt_table(smp_processor_id());
21667 }
21668
21669 return get_desc_base(desc + idx);
21670@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21671 break;
21672
21673 perf_callchain_store(entry, frame.return_address);
21674- fp = frame.next_frame;
21675+ fp = (const void __force_user *)frame.next_frame;
21676 }
21677 }
21678
21679diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21680index 97242a9..cf9c30e 100644
21681--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21682+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21683@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21684 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21685 {
21686 struct attribute **attrs;
21687- struct attribute_group *attr_group;
21688+ attribute_group_no_const *attr_group;
21689 int i = 0, j;
21690
21691 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21692diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21693index 498b6d9..4126515 100644
21694--- a/arch/x86/kernel/cpu/perf_event_intel.c
21695+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21696@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21697 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21698
21699 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21700- u64 capabilities;
21701+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21702
21703- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21704- x86_pmu.intel_cap.capabilities = capabilities;
21705+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21706+ x86_pmu.intel_cap.capabilities = capabilities;
21707 }
21708
21709 intel_ds_init();
21710diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21711index c4bb8b8..9f7384d 100644
21712--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21713+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21714@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21715 NULL,
21716 };
21717
21718-static struct attribute_group rapl_pmu_events_group = {
21719+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21720 .name = "events",
21721 .attrs = NULL, /* patched at runtime */
21722 };
21723diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21724index c635b8b..b78835e 100644
21725--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21726+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21727@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21728 static int __init uncore_type_init(struct intel_uncore_type *type)
21729 {
21730 struct intel_uncore_pmu *pmus;
21731- struct attribute_group *attr_group;
21732+ attribute_group_no_const *attr_group;
21733 struct attribute **attrs;
21734 int i, j;
21735
21736diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21737index 6c8c1e7..515b98a 100644
21738--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21739+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21740@@ -114,7 +114,7 @@ struct intel_uncore_box {
21741 struct uncore_event_desc {
21742 struct kobj_attribute attr;
21743 const char *config;
21744-};
21745+} __do_const;
21746
21747 ssize_t uncore_event_show(struct kobject *kobj,
21748 struct kobj_attribute *attr, char *buf);
21749diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21750index 83741a7..bd3507d 100644
21751--- a/arch/x86/kernel/cpuid.c
21752+++ b/arch/x86/kernel/cpuid.c
21753@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21754 return notifier_from_errno(err);
21755 }
21756
21757-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21758+static struct notifier_block cpuid_class_cpu_notifier =
21759 {
21760 .notifier_call = cpuid_class_cpu_callback,
21761 };
21762diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21763index aceb2f9..c76d3e3 100644
21764--- a/arch/x86/kernel/crash.c
21765+++ b/arch/x86/kernel/crash.c
21766@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21767 #ifdef CONFIG_X86_32
21768 struct pt_regs fixed_regs;
21769
21770- if (!user_mode_vm(regs)) {
21771+ if (!user_mode(regs)) {
21772 crash_fixup_ss_esp(&fixed_regs, regs);
21773 regs = &fixed_regs;
21774 }
21775diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21776index afa64ad..dce67dd 100644
21777--- a/arch/x86/kernel/crash_dump_64.c
21778+++ b/arch/x86/kernel/crash_dump_64.c
21779@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21780 return -ENOMEM;
21781
21782 if (userbuf) {
21783- if (copy_to_user(buf, vaddr + offset, csize)) {
21784+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21785 iounmap(vaddr);
21786 return -EFAULT;
21787 }
21788diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21789index f6dfd93..892ade4 100644
21790--- a/arch/x86/kernel/doublefault.c
21791+++ b/arch/x86/kernel/doublefault.c
21792@@ -12,7 +12,7 @@
21793
21794 #define DOUBLEFAULT_STACKSIZE (1024)
21795 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21796-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21797+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21798
21799 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21800
21801@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21802 unsigned long gdt, tss;
21803
21804 native_store_gdt(&gdt_desc);
21805- gdt = gdt_desc.address;
21806+ gdt = (unsigned long)gdt_desc.address;
21807
21808 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21809
21810@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21811 /* 0x2 bit is always set */
21812 .flags = X86_EFLAGS_SF | 0x2,
21813 .sp = STACK_START,
21814- .es = __USER_DS,
21815+ .es = __KERNEL_DS,
21816 .cs = __KERNEL_CS,
21817 .ss = __KERNEL_DS,
21818- .ds = __USER_DS,
21819+ .ds = __KERNEL_DS,
21820 .fs = __KERNEL_PERCPU,
21821
21822 .__cr3 = __pa_nodebug(swapper_pg_dir),
21823diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21824index b74ebc7..2c95874 100644
21825--- a/arch/x86/kernel/dumpstack.c
21826+++ b/arch/x86/kernel/dumpstack.c
21827@@ -2,6 +2,9 @@
21828 * Copyright (C) 1991, 1992 Linus Torvalds
21829 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21830 */
21831+#ifdef CONFIG_GRKERNSEC_HIDESYM
21832+#define __INCLUDED_BY_HIDESYM 1
21833+#endif
21834 #include <linux/kallsyms.h>
21835 #include <linux/kprobes.h>
21836 #include <linux/uaccess.h>
21837@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21838
21839 void printk_address(unsigned long address)
21840 {
21841- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21842+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21843 }
21844
21845 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21846 static void
21847 print_ftrace_graph_addr(unsigned long addr, void *data,
21848 const struct stacktrace_ops *ops,
21849- struct thread_info *tinfo, int *graph)
21850+ struct task_struct *task, int *graph)
21851 {
21852- struct task_struct *task;
21853 unsigned long ret_addr;
21854 int index;
21855
21856 if (addr != (unsigned long)return_to_handler)
21857 return;
21858
21859- task = tinfo->task;
21860 index = task->curr_ret_stack;
21861
21862 if (!task->ret_stack || index < *graph)
21863@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21864 static inline void
21865 print_ftrace_graph_addr(unsigned long addr, void *data,
21866 const struct stacktrace_ops *ops,
21867- struct thread_info *tinfo, int *graph)
21868+ struct task_struct *task, int *graph)
21869 { }
21870 #endif
21871
21872@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21873 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21874 */
21875
21876-static inline int valid_stack_ptr(struct thread_info *tinfo,
21877- void *p, unsigned int size, void *end)
21878+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21879 {
21880- void *t = tinfo;
21881 if (end) {
21882 if (p < end && p >= (end-THREAD_SIZE))
21883 return 1;
21884@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21885 }
21886
21887 unsigned long
21888-print_context_stack(struct thread_info *tinfo,
21889+print_context_stack(struct task_struct *task, void *stack_start,
21890 unsigned long *stack, unsigned long bp,
21891 const struct stacktrace_ops *ops, void *data,
21892 unsigned long *end, int *graph)
21893 {
21894 struct stack_frame *frame = (struct stack_frame *)bp;
21895
21896- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21897+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21898 unsigned long addr;
21899
21900 addr = *stack;
21901@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21902 } else {
21903 ops->address(data, addr, 0);
21904 }
21905- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21906+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21907 }
21908 stack++;
21909 }
21910@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21911 EXPORT_SYMBOL_GPL(print_context_stack);
21912
21913 unsigned long
21914-print_context_stack_bp(struct thread_info *tinfo,
21915+print_context_stack_bp(struct task_struct *task, void *stack_start,
21916 unsigned long *stack, unsigned long bp,
21917 const struct stacktrace_ops *ops, void *data,
21918 unsigned long *end, int *graph)
21919@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21920 struct stack_frame *frame = (struct stack_frame *)bp;
21921 unsigned long *ret_addr = &frame->return_address;
21922
21923- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21924+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21925 unsigned long addr = *ret_addr;
21926
21927 if (!__kernel_text_address(addr))
21928@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21929 ops->address(data, addr, 1);
21930 frame = frame->next_frame;
21931 ret_addr = &frame->return_address;
21932- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21933+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21934 }
21935
21936 return (unsigned long)frame;
21937@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21938 static void print_trace_address(void *data, unsigned long addr, int reliable)
21939 {
21940 touch_nmi_watchdog();
21941- printk(data);
21942+ printk("%s", (char *)data);
21943 printk_stack_address(addr, reliable);
21944 }
21945
21946@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
21947 EXPORT_SYMBOL_GPL(oops_begin);
21948 NOKPROBE_SYMBOL(oops_begin);
21949
21950+extern void gr_handle_kernel_exploit(void);
21951+
21952 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21953 {
21954 if (regs && kexec_should_crash(current))
21955@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21956 panic("Fatal exception in interrupt");
21957 if (panic_on_oops)
21958 panic("Fatal exception");
21959- do_exit(signr);
21960+
21961+ gr_handle_kernel_exploit();
21962+
21963+ do_group_exit(signr);
21964 }
21965 NOKPROBE_SYMBOL(oops_end);
21966
21967@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
21968 print_modules();
21969 show_regs(regs);
21970 #ifdef CONFIG_X86_32
21971- if (user_mode_vm(regs)) {
21972+ if (user_mode(regs)) {
21973 sp = regs->sp;
21974 ss = regs->ss & 0xffff;
21975 } else {
21976@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
21977 unsigned long flags = oops_begin();
21978 int sig = SIGSEGV;
21979
21980- if (!user_mode_vm(regs))
21981+ if (!user_mode(regs))
21982 report_bug(regs->ip, regs);
21983
21984 if (__die(str, regs, err))
21985diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
21986index 5abd4cd..c65733b 100644
21987--- a/arch/x86/kernel/dumpstack_32.c
21988+++ b/arch/x86/kernel/dumpstack_32.c
21989@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21990 bp = stack_frame(task, regs);
21991
21992 for (;;) {
21993- struct thread_info *context;
21994+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21995 void *end_stack;
21996
21997 end_stack = is_hardirq_stack(stack, cpu);
21998 if (!end_stack)
21999 end_stack = is_softirq_stack(stack, cpu);
22000
22001- context = task_thread_info(task);
22002- bp = ops->walk_stack(context, stack, bp, ops, data,
22003+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22004 end_stack, &graph);
22005
22006 /* Stop if not on irq stack */
22007@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22008 int i;
22009
22010 show_regs_print_info(KERN_EMERG);
22011- __show_regs(regs, !user_mode_vm(regs));
22012+ __show_regs(regs, !user_mode(regs));
22013
22014 /*
22015 * When in-kernel, we also print out the stack and code at the
22016 * time of the fault..
22017 */
22018- if (!user_mode_vm(regs)) {
22019+ if (!user_mode(regs)) {
22020 unsigned int code_prologue = code_bytes * 43 / 64;
22021 unsigned int code_len = code_bytes;
22022 unsigned char c;
22023 u8 *ip;
22024+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22025
22026 pr_emerg("Stack:\n");
22027 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22028
22029 pr_emerg("Code:");
22030
22031- ip = (u8 *)regs->ip - code_prologue;
22032+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22033 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22034 /* try starting at IP */
22035- ip = (u8 *)regs->ip;
22036+ ip = (u8 *)regs->ip + cs_base;
22037 code_len = code_len - code_prologue + 1;
22038 }
22039 for (i = 0; i < code_len; i++, ip++) {
22040@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22041 pr_cont(" Bad EIP value.");
22042 break;
22043 }
22044- if (ip == (u8 *)regs->ip)
22045+ if (ip == (u8 *)regs->ip + cs_base)
22046 pr_cont(" <%02x>", c);
22047 else
22048 pr_cont(" %02x", c);
22049@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22050 {
22051 unsigned short ud2;
22052
22053+ ip = ktla_ktva(ip);
22054 if (ip < PAGE_OFFSET)
22055 return 0;
22056 if (probe_kernel_address((unsigned short *)ip, ud2))
22057@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22058
22059 return ud2 == 0x0b0f;
22060 }
22061+
22062+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22063+void pax_check_alloca(unsigned long size)
22064+{
22065+ unsigned long sp = (unsigned long)&sp, stack_left;
22066+
22067+ /* all kernel stacks are of the same size */
22068+ stack_left = sp & (THREAD_SIZE - 1);
22069+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22070+}
22071+EXPORT_SYMBOL(pax_check_alloca);
22072+#endif
22073diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22074index ff86f19..73eabf4 100644
22075--- a/arch/x86/kernel/dumpstack_64.c
22076+++ b/arch/x86/kernel/dumpstack_64.c
22077@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22078 const struct stacktrace_ops *ops, void *data)
22079 {
22080 const unsigned cpu = get_cpu();
22081- struct thread_info *tinfo;
22082 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22083 unsigned long dummy;
22084 unsigned used = 0;
22085 int graph = 0;
22086 int done = 0;
22087+ void *stack_start;
22088
22089 if (!task)
22090 task = current;
22091@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22092 * current stack address. If the stacks consist of nested
22093 * exceptions
22094 */
22095- tinfo = task_thread_info(task);
22096 while (!done) {
22097 unsigned long *stack_end;
22098 enum stack_type stype;
22099@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22100 if (ops->stack(data, id) < 0)
22101 break;
22102
22103- bp = ops->walk_stack(tinfo, stack, bp, ops,
22104+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22105 data, stack_end, &graph);
22106 ops->stack(data, "<EOE>");
22107 /*
22108@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22109 * second-to-last pointer (index -2 to end) in the
22110 * exception stack:
22111 */
22112+ if ((u16)stack_end[-1] != __KERNEL_DS)
22113+ goto out;
22114 stack = (unsigned long *) stack_end[-2];
22115 done = 0;
22116 break;
22117@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22118
22119 if (ops->stack(data, "IRQ") < 0)
22120 break;
22121- bp = ops->walk_stack(tinfo, stack, bp,
22122+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22123 ops, data, stack_end, &graph);
22124 /*
22125 * We link to the next stack (which would be
22126@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22127 /*
22128 * This handles the process stack:
22129 */
22130- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22131+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22132+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22133+out:
22134 put_cpu();
22135 }
22136 EXPORT_SYMBOL(dump_trace);
22137@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22138 {
22139 unsigned short ud2;
22140
22141- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22142+ if (probe_kernel_address((unsigned short *)ip, ud2))
22143 return 0;
22144
22145 return ud2 == 0x0b0f;
22146 }
22147+
22148+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22149+void pax_check_alloca(unsigned long size)
22150+{
22151+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22152+ unsigned cpu, used;
22153+ char *id;
22154+
22155+ /* check the process stack first */
22156+ stack_start = (unsigned long)task_stack_page(current);
22157+ stack_end = stack_start + THREAD_SIZE;
22158+ if (likely(stack_start <= sp && sp < stack_end)) {
22159+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22160+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22161+ return;
22162+ }
22163+
22164+ cpu = get_cpu();
22165+
22166+ /* check the irq stacks */
22167+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22168+ stack_start = stack_end - IRQ_STACK_SIZE;
22169+ if (stack_start <= sp && sp < stack_end) {
22170+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22171+ put_cpu();
22172+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22173+ return;
22174+ }
22175+
22176+ /* check the exception stacks */
22177+ used = 0;
22178+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22179+ stack_start = stack_end - EXCEPTION_STKSZ;
22180+ if (stack_end && stack_start <= sp && sp < stack_end) {
22181+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22182+ put_cpu();
22183+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22184+ return;
22185+ }
22186+
22187+ put_cpu();
22188+
22189+ /* unknown stack */
22190+ BUG();
22191+}
22192+EXPORT_SYMBOL(pax_check_alloca);
22193+#endif
22194diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22195index dd2f07a..845dc05 100644
22196--- a/arch/x86/kernel/e820.c
22197+++ b/arch/x86/kernel/e820.c
22198@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22199
22200 static void early_panic(char *msg)
22201 {
22202- early_printk(msg);
22203- panic(msg);
22204+ early_printk("%s", msg);
22205+ panic("%s", msg);
22206 }
22207
22208 static int userdef __initdata;
22209diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22210index 01d1c18..8073693 100644
22211--- a/arch/x86/kernel/early_printk.c
22212+++ b/arch/x86/kernel/early_printk.c
22213@@ -7,6 +7,7 @@
22214 #include <linux/pci_regs.h>
22215 #include <linux/pci_ids.h>
22216 #include <linux/errno.h>
22217+#include <linux/sched.h>
22218 #include <asm/io.h>
22219 #include <asm/processor.h>
22220 #include <asm/fcntl.h>
22221diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22222index 000d419..8f66802 100644
22223--- a/arch/x86/kernel/entry_32.S
22224+++ b/arch/x86/kernel/entry_32.S
22225@@ -177,13 +177,154 @@
22226 /*CFI_REL_OFFSET gs, PT_GS*/
22227 .endm
22228 .macro SET_KERNEL_GS reg
22229+
22230+#ifdef CONFIG_CC_STACKPROTECTOR
22231 movl $(__KERNEL_STACK_CANARY), \reg
22232+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22233+ movl $(__USER_DS), \reg
22234+#else
22235+ xorl \reg, \reg
22236+#endif
22237+
22238 movl \reg, %gs
22239 .endm
22240
22241 #endif /* CONFIG_X86_32_LAZY_GS */
22242
22243-.macro SAVE_ALL
22244+.macro pax_enter_kernel
22245+#ifdef CONFIG_PAX_KERNEXEC
22246+ call pax_enter_kernel
22247+#endif
22248+.endm
22249+
22250+.macro pax_exit_kernel
22251+#ifdef CONFIG_PAX_KERNEXEC
22252+ call pax_exit_kernel
22253+#endif
22254+.endm
22255+
22256+#ifdef CONFIG_PAX_KERNEXEC
22257+ENTRY(pax_enter_kernel)
22258+#ifdef CONFIG_PARAVIRT
22259+ pushl %eax
22260+ pushl %ecx
22261+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22262+ mov %eax, %esi
22263+#else
22264+ mov %cr0, %esi
22265+#endif
22266+ bts $16, %esi
22267+ jnc 1f
22268+ mov %cs, %esi
22269+ cmp $__KERNEL_CS, %esi
22270+ jz 3f
22271+ ljmp $__KERNEL_CS, $3f
22272+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22273+2:
22274+#ifdef CONFIG_PARAVIRT
22275+ mov %esi, %eax
22276+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22277+#else
22278+ mov %esi, %cr0
22279+#endif
22280+3:
22281+#ifdef CONFIG_PARAVIRT
22282+ popl %ecx
22283+ popl %eax
22284+#endif
22285+ ret
22286+ENDPROC(pax_enter_kernel)
22287+
22288+ENTRY(pax_exit_kernel)
22289+#ifdef CONFIG_PARAVIRT
22290+ pushl %eax
22291+ pushl %ecx
22292+#endif
22293+ mov %cs, %esi
22294+ cmp $__KERNEXEC_KERNEL_CS, %esi
22295+ jnz 2f
22296+#ifdef CONFIG_PARAVIRT
22297+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22298+ mov %eax, %esi
22299+#else
22300+ mov %cr0, %esi
22301+#endif
22302+ btr $16, %esi
22303+ ljmp $__KERNEL_CS, $1f
22304+1:
22305+#ifdef CONFIG_PARAVIRT
22306+ mov %esi, %eax
22307+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22308+#else
22309+ mov %esi, %cr0
22310+#endif
22311+2:
22312+#ifdef CONFIG_PARAVIRT
22313+ popl %ecx
22314+ popl %eax
22315+#endif
22316+ ret
22317+ENDPROC(pax_exit_kernel)
22318+#endif
22319+
22320+ .macro pax_erase_kstack
22321+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22322+ call pax_erase_kstack
22323+#endif
22324+ .endm
22325+
22326+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22327+/*
22328+ * ebp: thread_info
22329+ */
22330+ENTRY(pax_erase_kstack)
22331+ pushl %edi
22332+ pushl %ecx
22333+ pushl %eax
22334+
22335+ mov TI_lowest_stack(%ebp), %edi
22336+ mov $-0xBEEF, %eax
22337+ std
22338+
22339+1: mov %edi, %ecx
22340+ and $THREAD_SIZE_asm - 1, %ecx
22341+ shr $2, %ecx
22342+ repne scasl
22343+ jecxz 2f
22344+
22345+ cmp $2*16, %ecx
22346+ jc 2f
22347+
22348+ mov $2*16, %ecx
22349+ repe scasl
22350+ jecxz 2f
22351+ jne 1b
22352+
22353+2: cld
22354+ or $2*4, %edi
22355+ mov %esp, %ecx
22356+ sub %edi, %ecx
22357+
22358+ cmp $THREAD_SIZE_asm, %ecx
22359+ jb 3f
22360+ ud2
22361+3:
22362+
22363+ shr $2, %ecx
22364+ rep stosl
22365+
22366+ mov TI_task_thread_sp0(%ebp), %edi
22367+ sub $128, %edi
22368+ mov %edi, TI_lowest_stack(%ebp)
22369+
22370+ popl %eax
22371+ popl %ecx
22372+ popl %edi
22373+ ret
22374+ENDPROC(pax_erase_kstack)
22375+#endif
22376+
22377+.macro __SAVE_ALL _DS
22378 cld
22379 PUSH_GS
22380 pushl_cfi %fs
22381@@ -206,7 +347,7 @@
22382 CFI_REL_OFFSET ecx, 0
22383 pushl_cfi %ebx
22384 CFI_REL_OFFSET ebx, 0
22385- movl $(__USER_DS), %edx
22386+ movl $\_DS, %edx
22387 movl %edx, %ds
22388 movl %edx, %es
22389 movl $(__KERNEL_PERCPU), %edx
22390@@ -214,6 +355,15 @@
22391 SET_KERNEL_GS %edx
22392 .endm
22393
22394+.macro SAVE_ALL
22395+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22396+ __SAVE_ALL __KERNEL_DS
22397+ pax_enter_kernel
22398+#else
22399+ __SAVE_ALL __USER_DS
22400+#endif
22401+.endm
22402+
22403 .macro RESTORE_INT_REGS
22404 popl_cfi %ebx
22405 CFI_RESTORE ebx
22406@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22407 popfl_cfi
22408 jmp syscall_exit
22409 CFI_ENDPROC
22410-END(ret_from_fork)
22411+ENDPROC(ret_from_fork)
22412
22413 ENTRY(ret_from_kernel_thread)
22414 CFI_STARTPROC
22415@@ -340,7 +490,15 @@ ret_from_intr:
22416 andl $SEGMENT_RPL_MASK, %eax
22417 #endif
22418 cmpl $USER_RPL, %eax
22419+
22420+#ifdef CONFIG_PAX_KERNEXEC
22421+ jae resume_userspace
22422+
22423+ pax_exit_kernel
22424+ jmp resume_kernel
22425+#else
22426 jb resume_kernel # not returning to v8086 or userspace
22427+#endif
22428
22429 ENTRY(resume_userspace)
22430 LOCKDEP_SYS_EXIT
22431@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22432 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22433 # int/exception return?
22434 jne work_pending
22435- jmp restore_all
22436-END(ret_from_exception)
22437+ jmp restore_all_pax
22438+ENDPROC(ret_from_exception)
22439
22440 #ifdef CONFIG_PREEMPT
22441 ENTRY(resume_kernel)
22442@@ -365,7 +523,7 @@ need_resched:
22443 jz restore_all
22444 call preempt_schedule_irq
22445 jmp need_resched
22446-END(resume_kernel)
22447+ENDPROC(resume_kernel)
22448 #endif
22449 CFI_ENDPROC
22450
22451@@ -395,30 +553,45 @@ sysenter_past_esp:
22452 /*CFI_REL_OFFSET cs, 0*/
22453 /*
22454 * Push current_thread_info()->sysenter_return to the stack.
22455- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22456- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22457 */
22458- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22459+ pushl_cfi $0
22460 CFI_REL_OFFSET eip, 0
22461
22462 pushl_cfi %eax
22463 SAVE_ALL
22464+ GET_THREAD_INFO(%ebp)
22465+ movl TI_sysenter_return(%ebp),%ebp
22466+ movl %ebp,PT_EIP(%esp)
22467 ENABLE_INTERRUPTS(CLBR_NONE)
22468
22469 /*
22470 * Load the potential sixth argument from user stack.
22471 * Careful about security.
22472 */
22473+ movl PT_OLDESP(%esp),%ebp
22474+
22475+#ifdef CONFIG_PAX_MEMORY_UDEREF
22476+ mov PT_OLDSS(%esp),%ds
22477+1: movl %ds:(%ebp),%ebp
22478+ push %ss
22479+ pop %ds
22480+#else
22481 cmpl $__PAGE_OFFSET-3,%ebp
22482 jae syscall_fault
22483 ASM_STAC
22484 1: movl (%ebp),%ebp
22485 ASM_CLAC
22486+#endif
22487+
22488 movl %ebp,PT_EBP(%esp)
22489 _ASM_EXTABLE(1b,syscall_fault)
22490
22491 GET_THREAD_INFO(%ebp)
22492
22493+#ifdef CONFIG_PAX_RANDKSTACK
22494+ pax_erase_kstack
22495+#endif
22496+
22497 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22498 jnz sysenter_audit
22499 sysenter_do_call:
22500@@ -434,12 +607,24 @@ sysenter_after_call:
22501 testl $_TIF_ALLWORK_MASK, %ecx
22502 jne sysexit_audit
22503 sysenter_exit:
22504+
22505+#ifdef CONFIG_PAX_RANDKSTACK
22506+ pushl_cfi %eax
22507+ movl %esp, %eax
22508+ call pax_randomize_kstack
22509+ popl_cfi %eax
22510+#endif
22511+
22512+ pax_erase_kstack
22513+
22514 /* if something modifies registers it must also disable sysexit */
22515 movl PT_EIP(%esp), %edx
22516 movl PT_OLDESP(%esp), %ecx
22517 xorl %ebp,%ebp
22518 TRACE_IRQS_ON
22519 1: mov PT_FS(%esp), %fs
22520+2: mov PT_DS(%esp), %ds
22521+3: mov PT_ES(%esp), %es
22522 PTGS_TO_GS
22523 ENABLE_INTERRUPTS_SYSEXIT
22524
22525@@ -453,6 +638,9 @@ sysenter_audit:
22526 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22527 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22528 call __audit_syscall_entry
22529+
22530+ pax_erase_kstack
22531+
22532 popl_cfi %ecx /* get that remapped edx off the stack */
22533 popl_cfi %ecx /* get that remapped esi off the stack */
22534 movl PT_EAX(%esp),%eax /* reload syscall number */
22535@@ -479,10 +667,16 @@ sysexit_audit:
22536
22537 CFI_ENDPROC
22538 .pushsection .fixup,"ax"
22539-2: movl $0,PT_FS(%esp)
22540+4: movl $0,PT_FS(%esp)
22541+ jmp 1b
22542+5: movl $0,PT_DS(%esp)
22543+ jmp 1b
22544+6: movl $0,PT_ES(%esp)
22545 jmp 1b
22546 .popsection
22547- _ASM_EXTABLE(1b,2b)
22548+ _ASM_EXTABLE(1b,4b)
22549+ _ASM_EXTABLE(2b,5b)
22550+ _ASM_EXTABLE(3b,6b)
22551 PTGS_TO_GS_EX
22552 ENDPROC(ia32_sysenter_target)
22553
22554@@ -493,6 +687,11 @@ ENTRY(system_call)
22555 pushl_cfi %eax # save orig_eax
22556 SAVE_ALL
22557 GET_THREAD_INFO(%ebp)
22558+
22559+#ifdef CONFIG_PAX_RANDKSTACK
22560+ pax_erase_kstack
22561+#endif
22562+
22563 # system call tracing in operation / emulation
22564 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22565 jnz syscall_trace_entry
22566@@ -512,6 +711,15 @@ syscall_exit:
22567 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22568 jne syscall_exit_work
22569
22570+restore_all_pax:
22571+
22572+#ifdef CONFIG_PAX_RANDKSTACK
22573+ movl %esp, %eax
22574+ call pax_randomize_kstack
22575+#endif
22576+
22577+ pax_erase_kstack
22578+
22579 restore_all:
22580 TRACE_IRQS_IRET
22581 restore_all_notrace:
22582@@ -566,14 +774,34 @@ ldt_ss:
22583 * compensating for the offset by changing to the ESPFIX segment with
22584 * a base address that matches for the difference.
22585 */
22586-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22587+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22588 mov %esp, %edx /* load kernel esp */
22589 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22590 mov %dx, %ax /* eax: new kernel esp */
22591 sub %eax, %edx /* offset (low word is 0) */
22592+#ifdef CONFIG_SMP
22593+ movl PER_CPU_VAR(cpu_number), %ebx
22594+ shll $PAGE_SHIFT_asm, %ebx
22595+ addl $cpu_gdt_table, %ebx
22596+#else
22597+ movl $cpu_gdt_table, %ebx
22598+#endif
22599 shr $16, %edx
22600- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22601- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22602+
22603+#ifdef CONFIG_PAX_KERNEXEC
22604+ mov %cr0, %esi
22605+ btr $16, %esi
22606+ mov %esi, %cr0
22607+#endif
22608+
22609+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22610+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22611+
22612+#ifdef CONFIG_PAX_KERNEXEC
22613+ bts $16, %esi
22614+ mov %esi, %cr0
22615+#endif
22616+
22617 pushl_cfi $__ESPFIX_SS
22618 pushl_cfi %eax /* new kernel esp */
22619 /* Disable interrupts, but do not irqtrace this section: we
22620@@ -603,20 +831,18 @@ work_resched:
22621 movl TI_flags(%ebp), %ecx
22622 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22623 # than syscall tracing?
22624- jz restore_all
22625+ jz restore_all_pax
22626 testb $_TIF_NEED_RESCHED, %cl
22627 jnz work_resched
22628
22629 work_notifysig: # deal with pending signals and
22630 # notify-resume requests
22631+ movl %esp, %eax
22632 #ifdef CONFIG_VM86
22633 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22634- movl %esp, %eax
22635 jne work_notifysig_v86 # returning to kernel-space or
22636 # vm86-space
22637 1:
22638-#else
22639- movl %esp, %eax
22640 #endif
22641 TRACE_IRQS_ON
22642 ENABLE_INTERRUPTS(CLBR_NONE)
22643@@ -637,7 +863,7 @@ work_notifysig_v86:
22644 movl %eax, %esp
22645 jmp 1b
22646 #endif
22647-END(work_pending)
22648+ENDPROC(work_pending)
22649
22650 # perform syscall exit tracing
22651 ALIGN
22652@@ -645,11 +871,14 @@ syscall_trace_entry:
22653 movl $-ENOSYS,PT_EAX(%esp)
22654 movl %esp, %eax
22655 call syscall_trace_enter
22656+
22657+ pax_erase_kstack
22658+
22659 /* What it returned is what we'll actually use. */
22660 cmpl $(NR_syscalls), %eax
22661 jnae syscall_call
22662 jmp syscall_exit
22663-END(syscall_trace_entry)
22664+ENDPROC(syscall_trace_entry)
22665
22666 # perform syscall exit tracing
22667 ALIGN
22668@@ -662,26 +891,30 @@ syscall_exit_work:
22669 movl %esp, %eax
22670 call syscall_trace_leave
22671 jmp resume_userspace
22672-END(syscall_exit_work)
22673+ENDPROC(syscall_exit_work)
22674 CFI_ENDPROC
22675
22676 RING0_INT_FRAME # can't unwind into user space anyway
22677 syscall_fault:
22678+#ifdef CONFIG_PAX_MEMORY_UDEREF
22679+ push %ss
22680+ pop %ds
22681+#endif
22682 ASM_CLAC
22683 GET_THREAD_INFO(%ebp)
22684 movl $-EFAULT,PT_EAX(%esp)
22685 jmp resume_userspace
22686-END(syscall_fault)
22687+ENDPROC(syscall_fault)
22688
22689 syscall_badsys:
22690 movl $-ENOSYS,%eax
22691 jmp syscall_after_call
22692-END(syscall_badsys)
22693+ENDPROC(syscall_badsys)
22694
22695 sysenter_badsys:
22696 movl $-ENOSYS,%eax
22697 jmp sysenter_after_call
22698-END(sysenter_badsys)
22699+ENDPROC(sysenter_badsys)
22700 CFI_ENDPROC
22701
22702 .macro FIXUP_ESPFIX_STACK
22703@@ -694,8 +927,15 @@ END(sysenter_badsys)
22704 */
22705 #ifdef CONFIG_X86_ESPFIX32
22706 /* fixup the stack */
22707- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22708- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22709+#ifdef CONFIG_SMP
22710+ movl PER_CPU_VAR(cpu_number), %ebx
22711+ shll $PAGE_SHIFT_asm, %ebx
22712+ addl $cpu_gdt_table, %ebx
22713+#else
22714+ movl $cpu_gdt_table, %ebx
22715+#endif
22716+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22717+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22718 shl $16, %eax
22719 addl %esp, %eax /* the adjusted stack pointer */
22720 pushl_cfi $__KERNEL_DS
22721@@ -751,7 +991,7 @@ vector=vector+1
22722 .endr
22723 2: jmp common_interrupt
22724 .endr
22725-END(irq_entries_start)
22726+ENDPROC(irq_entries_start)
22727
22728 .previous
22729 END(interrupt)
22730@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22731 pushl_cfi $do_coprocessor_error
22732 jmp error_code
22733 CFI_ENDPROC
22734-END(coprocessor_error)
22735+ENDPROC(coprocessor_error)
22736
22737 ENTRY(simd_coprocessor_error)
22738 RING0_INT_FRAME
22739@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22740 .section .altinstructions,"a"
22741 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22742 .previous
22743-.section .altinstr_replacement,"ax"
22744+.section .altinstr_replacement,"a"
22745 663: pushl $do_simd_coprocessor_error
22746 664:
22747 .previous
22748@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22749 #endif
22750 jmp error_code
22751 CFI_ENDPROC
22752-END(simd_coprocessor_error)
22753+ENDPROC(simd_coprocessor_error)
22754
22755 ENTRY(device_not_available)
22756 RING0_INT_FRAME
22757@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22758 pushl_cfi $do_device_not_available
22759 jmp error_code
22760 CFI_ENDPROC
22761-END(device_not_available)
22762+ENDPROC(device_not_available)
22763
22764 #ifdef CONFIG_PARAVIRT
22765 ENTRY(native_iret)
22766 iret
22767 _ASM_EXTABLE(native_iret, iret_exc)
22768-END(native_iret)
22769+ENDPROC(native_iret)
22770
22771 ENTRY(native_irq_enable_sysexit)
22772 sti
22773 sysexit
22774-END(native_irq_enable_sysexit)
22775+ENDPROC(native_irq_enable_sysexit)
22776 #endif
22777
22778 ENTRY(overflow)
22779@@ -860,7 +1100,7 @@ ENTRY(overflow)
22780 pushl_cfi $do_overflow
22781 jmp error_code
22782 CFI_ENDPROC
22783-END(overflow)
22784+ENDPROC(overflow)
22785
22786 ENTRY(bounds)
22787 RING0_INT_FRAME
22788@@ -869,7 +1109,7 @@ ENTRY(bounds)
22789 pushl_cfi $do_bounds
22790 jmp error_code
22791 CFI_ENDPROC
22792-END(bounds)
22793+ENDPROC(bounds)
22794
22795 ENTRY(invalid_op)
22796 RING0_INT_FRAME
22797@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22798 pushl_cfi $do_invalid_op
22799 jmp error_code
22800 CFI_ENDPROC
22801-END(invalid_op)
22802+ENDPROC(invalid_op)
22803
22804 ENTRY(coprocessor_segment_overrun)
22805 RING0_INT_FRAME
22806@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22807 pushl_cfi $do_coprocessor_segment_overrun
22808 jmp error_code
22809 CFI_ENDPROC
22810-END(coprocessor_segment_overrun)
22811+ENDPROC(coprocessor_segment_overrun)
22812
22813 ENTRY(invalid_TSS)
22814 RING0_EC_FRAME
22815@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22816 pushl_cfi $do_invalid_TSS
22817 jmp error_code
22818 CFI_ENDPROC
22819-END(invalid_TSS)
22820+ENDPROC(invalid_TSS)
22821
22822 ENTRY(segment_not_present)
22823 RING0_EC_FRAME
22824@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22825 pushl_cfi $do_segment_not_present
22826 jmp error_code
22827 CFI_ENDPROC
22828-END(segment_not_present)
22829+ENDPROC(segment_not_present)
22830
22831 ENTRY(stack_segment)
22832 RING0_EC_FRAME
22833@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22834 pushl_cfi $do_stack_segment
22835 jmp error_code
22836 CFI_ENDPROC
22837-END(stack_segment)
22838+ENDPROC(stack_segment)
22839
22840 ENTRY(alignment_check)
22841 RING0_EC_FRAME
22842@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22843 pushl_cfi $do_alignment_check
22844 jmp error_code
22845 CFI_ENDPROC
22846-END(alignment_check)
22847+ENDPROC(alignment_check)
22848
22849 ENTRY(divide_error)
22850 RING0_INT_FRAME
22851@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22852 pushl_cfi $do_divide_error
22853 jmp error_code
22854 CFI_ENDPROC
22855-END(divide_error)
22856+ENDPROC(divide_error)
22857
22858 #ifdef CONFIG_X86_MCE
22859 ENTRY(machine_check)
22860@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22861 pushl_cfi machine_check_vector
22862 jmp error_code
22863 CFI_ENDPROC
22864-END(machine_check)
22865+ENDPROC(machine_check)
22866 #endif
22867
22868 ENTRY(spurious_interrupt_bug)
22869@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22870 pushl_cfi $do_spurious_interrupt_bug
22871 jmp error_code
22872 CFI_ENDPROC
22873-END(spurious_interrupt_bug)
22874+ENDPROC(spurious_interrupt_bug)
22875
22876 #ifdef CONFIG_XEN
22877 /* Xen doesn't set %esp to be precisely what the normal sysenter
22878@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22879
22880 ENTRY(mcount)
22881 ret
22882-END(mcount)
22883+ENDPROC(mcount)
22884
22885 ENTRY(ftrace_caller)
22886 pushl %eax
22887@@ -1084,7 +1324,7 @@ ftrace_graph_call:
22888 .globl ftrace_stub
22889 ftrace_stub:
22890 ret
22891-END(ftrace_caller)
22892+ENDPROC(ftrace_caller)
22893
22894 ENTRY(ftrace_regs_caller)
22895 pushf /* push flags before compare (in cs location) */
22896@@ -1182,7 +1422,7 @@ trace:
22897 popl %ecx
22898 popl %eax
22899 jmp ftrace_stub
22900-END(mcount)
22901+ENDPROC(mcount)
22902 #endif /* CONFIG_DYNAMIC_FTRACE */
22903 #endif /* CONFIG_FUNCTION_TRACER */
22904
22905@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
22906 popl %ecx
22907 popl %eax
22908 ret
22909-END(ftrace_graph_caller)
22910+ENDPROC(ftrace_graph_caller)
22911
22912 .globl return_to_handler
22913 return_to_handler:
22914@@ -1261,15 +1501,18 @@ error_code:
22915 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22916 REG_TO_PTGS %ecx
22917 SET_KERNEL_GS %ecx
22918- movl $(__USER_DS), %ecx
22919+ movl $(__KERNEL_DS), %ecx
22920 movl %ecx, %ds
22921 movl %ecx, %es
22922+
22923+ pax_enter_kernel
22924+
22925 TRACE_IRQS_OFF
22926 movl %esp,%eax # pt_regs pointer
22927 call *%edi
22928 jmp ret_from_exception
22929 CFI_ENDPROC
22930-END(page_fault)
22931+ENDPROC(page_fault)
22932
22933 /*
22934 * Debug traps and NMI can happen at the one SYSENTER instruction
22935@@ -1312,7 +1555,7 @@ debug_stack_correct:
22936 call do_debug
22937 jmp ret_from_exception
22938 CFI_ENDPROC
22939-END(debug)
22940+ENDPROC(debug)
22941
22942 /*
22943 * NMI is doubly nasty. It can happen _while_ we're handling
22944@@ -1352,6 +1595,9 @@ nmi_stack_correct:
22945 xorl %edx,%edx # zero error code
22946 movl %esp,%eax # pt_regs pointer
22947 call do_nmi
22948+
22949+ pax_exit_kernel
22950+
22951 jmp restore_all_notrace
22952 CFI_ENDPROC
22953
22954@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
22955 FIXUP_ESPFIX_STACK # %eax == %esp
22956 xorl %edx,%edx # zero error code
22957 call do_nmi
22958+
22959+ pax_exit_kernel
22960+
22961 RESTORE_REGS
22962 lss 12+4(%esp), %esp # back to espfix stack
22963 CFI_ADJUST_CFA_OFFSET -24
22964 jmp irq_return
22965 #endif
22966 CFI_ENDPROC
22967-END(nmi)
22968+ENDPROC(nmi)
22969
22970 ENTRY(int3)
22971 RING0_INT_FRAME
22972@@ -1408,14 +1657,14 @@ ENTRY(int3)
22973 call do_int3
22974 jmp ret_from_exception
22975 CFI_ENDPROC
22976-END(int3)
22977+ENDPROC(int3)
22978
22979 ENTRY(general_protection)
22980 RING0_EC_FRAME
22981 pushl_cfi $do_general_protection
22982 jmp error_code
22983 CFI_ENDPROC
22984-END(general_protection)
22985+ENDPROC(general_protection)
22986
22987 #ifdef CONFIG_KVM_GUEST
22988 ENTRY(async_page_fault)
22989@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
22990 pushl_cfi $do_async_page_fault
22991 jmp error_code
22992 CFI_ENDPROC
22993-END(async_page_fault)
22994+ENDPROC(async_page_fault)
22995 #endif
22996
22997diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
22998index 9ebaf63..c786610 100644
22999--- a/arch/x86/kernel/entry_64.S
23000+++ b/arch/x86/kernel/entry_64.S
23001@@ -59,6 +59,8 @@
23002 #include <asm/smap.h>
23003 #include <asm/pgtable_types.h>
23004 #include <linux/err.h>
23005+#include <asm/pgtable.h>
23006+#include <asm/alternative-asm.h>
23007
23008 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23009 #include <linux/elf-em.h>
23010@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23011 ENDPROC(native_usergs_sysret64)
23012 #endif /* CONFIG_PARAVIRT */
23013
23014+ .macro ljmpq sel, off
23015+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23016+ .byte 0x48; ljmp *1234f(%rip)
23017+ .pushsection .rodata
23018+ .align 16
23019+ 1234: .quad \off; .word \sel
23020+ .popsection
23021+#else
23022+ pushq $\sel
23023+ pushq $\off
23024+ lretq
23025+#endif
23026+ .endm
23027+
23028+ .macro pax_enter_kernel
23029+ pax_set_fptr_mask
23030+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23031+ call pax_enter_kernel
23032+#endif
23033+ .endm
23034+
23035+ .macro pax_exit_kernel
23036+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23037+ call pax_exit_kernel
23038+#endif
23039+
23040+ .endm
23041+
23042+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23043+ENTRY(pax_enter_kernel)
23044+ pushq %rdi
23045+
23046+#ifdef CONFIG_PARAVIRT
23047+ PV_SAVE_REGS(CLBR_RDI)
23048+#endif
23049+
23050+#ifdef CONFIG_PAX_KERNEXEC
23051+ GET_CR0_INTO_RDI
23052+ bts $16,%rdi
23053+ jnc 3f
23054+ mov %cs,%edi
23055+ cmp $__KERNEL_CS,%edi
23056+ jnz 2f
23057+1:
23058+#endif
23059+
23060+#ifdef CONFIG_PAX_MEMORY_UDEREF
23061+ 661: jmp 111f
23062+ .pushsection .altinstr_replacement, "a"
23063+ 662: ASM_NOP2
23064+ .popsection
23065+ .pushsection .altinstructions, "a"
23066+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23067+ .popsection
23068+ GET_CR3_INTO_RDI
23069+ cmp $0,%dil
23070+ jnz 112f
23071+ mov $__KERNEL_DS,%edi
23072+ mov %edi,%ss
23073+ jmp 111f
23074+112: cmp $1,%dil
23075+ jz 113f
23076+ ud2
23077+113: sub $4097,%rdi
23078+ bts $63,%rdi
23079+ SET_RDI_INTO_CR3
23080+ mov $__UDEREF_KERNEL_DS,%edi
23081+ mov %edi,%ss
23082+111:
23083+#endif
23084+
23085+#ifdef CONFIG_PARAVIRT
23086+ PV_RESTORE_REGS(CLBR_RDI)
23087+#endif
23088+
23089+ popq %rdi
23090+ pax_force_retaddr
23091+ retq
23092+
23093+#ifdef CONFIG_PAX_KERNEXEC
23094+2: ljmpq __KERNEL_CS,1b
23095+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23096+4: SET_RDI_INTO_CR0
23097+ jmp 1b
23098+#endif
23099+ENDPROC(pax_enter_kernel)
23100+
23101+ENTRY(pax_exit_kernel)
23102+ pushq %rdi
23103+
23104+#ifdef CONFIG_PARAVIRT
23105+ PV_SAVE_REGS(CLBR_RDI)
23106+#endif
23107+
23108+#ifdef CONFIG_PAX_KERNEXEC
23109+ mov %cs,%rdi
23110+ cmp $__KERNEXEC_KERNEL_CS,%edi
23111+ jz 2f
23112+ GET_CR0_INTO_RDI
23113+ bts $16,%rdi
23114+ jnc 4f
23115+1:
23116+#endif
23117+
23118+#ifdef CONFIG_PAX_MEMORY_UDEREF
23119+ 661: jmp 111f
23120+ .pushsection .altinstr_replacement, "a"
23121+ 662: ASM_NOP2
23122+ .popsection
23123+ .pushsection .altinstructions, "a"
23124+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23125+ .popsection
23126+ mov %ss,%edi
23127+ cmp $__UDEREF_KERNEL_DS,%edi
23128+ jnz 111f
23129+ GET_CR3_INTO_RDI
23130+ cmp $0,%dil
23131+ jz 112f
23132+ ud2
23133+112: add $4097,%rdi
23134+ bts $63,%rdi
23135+ SET_RDI_INTO_CR3
23136+ mov $__KERNEL_DS,%edi
23137+ mov %edi,%ss
23138+111:
23139+#endif
23140+
23141+#ifdef CONFIG_PARAVIRT
23142+ PV_RESTORE_REGS(CLBR_RDI);
23143+#endif
23144+
23145+ popq %rdi
23146+ pax_force_retaddr
23147+ retq
23148+
23149+#ifdef CONFIG_PAX_KERNEXEC
23150+2: GET_CR0_INTO_RDI
23151+ btr $16,%rdi
23152+ jnc 4f
23153+ ljmpq __KERNEL_CS,3f
23154+3: SET_RDI_INTO_CR0
23155+ jmp 1b
23156+4: ud2
23157+ jmp 4b
23158+#endif
23159+ENDPROC(pax_exit_kernel)
23160+#endif
23161+
23162+ .macro pax_enter_kernel_user
23163+ pax_set_fptr_mask
23164+#ifdef CONFIG_PAX_MEMORY_UDEREF
23165+ call pax_enter_kernel_user
23166+#endif
23167+ .endm
23168+
23169+ .macro pax_exit_kernel_user
23170+#ifdef CONFIG_PAX_MEMORY_UDEREF
23171+ call pax_exit_kernel_user
23172+#endif
23173+#ifdef CONFIG_PAX_RANDKSTACK
23174+ pushq %rax
23175+ pushq %r11
23176+ call pax_randomize_kstack
23177+ popq %r11
23178+ popq %rax
23179+#endif
23180+ .endm
23181+
23182+#ifdef CONFIG_PAX_MEMORY_UDEREF
23183+ENTRY(pax_enter_kernel_user)
23184+ pushq %rdi
23185+ pushq %rbx
23186+
23187+#ifdef CONFIG_PARAVIRT
23188+ PV_SAVE_REGS(CLBR_RDI)
23189+#endif
23190+
23191+ 661: jmp 111f
23192+ .pushsection .altinstr_replacement, "a"
23193+ 662: ASM_NOP2
23194+ .popsection
23195+ .pushsection .altinstructions, "a"
23196+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23197+ .popsection
23198+ GET_CR3_INTO_RDI
23199+ cmp $1,%dil
23200+ jnz 4f
23201+ sub $4097,%rdi
23202+ bts $63,%rdi
23203+ SET_RDI_INTO_CR3
23204+ jmp 3f
23205+111:
23206+
23207+ GET_CR3_INTO_RDI
23208+ mov %rdi,%rbx
23209+ add $__START_KERNEL_map,%rbx
23210+ sub phys_base(%rip),%rbx
23211+
23212+#ifdef CONFIG_PARAVIRT
23213+ cmpl $0, pv_info+PARAVIRT_enabled
23214+ jz 1f
23215+ pushq %rdi
23216+ i = 0
23217+ .rept USER_PGD_PTRS
23218+ mov i*8(%rbx),%rsi
23219+ mov $0,%sil
23220+ lea i*8(%rbx),%rdi
23221+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23222+ i = i + 1
23223+ .endr
23224+ popq %rdi
23225+ jmp 2f
23226+1:
23227+#endif
23228+
23229+ i = 0
23230+ .rept USER_PGD_PTRS
23231+ movb $0,i*8(%rbx)
23232+ i = i + 1
23233+ .endr
23234+
23235+2: SET_RDI_INTO_CR3
23236+
23237+#ifdef CONFIG_PAX_KERNEXEC
23238+ GET_CR0_INTO_RDI
23239+ bts $16,%rdi
23240+ SET_RDI_INTO_CR0
23241+#endif
23242+
23243+3:
23244+
23245+#ifdef CONFIG_PARAVIRT
23246+ PV_RESTORE_REGS(CLBR_RDI)
23247+#endif
23248+
23249+ popq %rbx
23250+ popq %rdi
23251+ pax_force_retaddr
23252+ retq
23253+4: ud2
23254+ENDPROC(pax_enter_kernel_user)
23255+
23256+ENTRY(pax_exit_kernel_user)
23257+ pushq %rdi
23258+ pushq %rbx
23259+
23260+#ifdef CONFIG_PARAVIRT
23261+ PV_SAVE_REGS(CLBR_RDI)
23262+#endif
23263+
23264+ GET_CR3_INTO_RDI
23265+ 661: jmp 1f
23266+ .pushsection .altinstr_replacement, "a"
23267+ 662: ASM_NOP2
23268+ .popsection
23269+ .pushsection .altinstructions, "a"
23270+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23271+ .popsection
23272+ cmp $0,%dil
23273+ jnz 3f
23274+ add $4097,%rdi
23275+ bts $63,%rdi
23276+ SET_RDI_INTO_CR3
23277+ jmp 2f
23278+1:
23279+
23280+ mov %rdi,%rbx
23281+
23282+#ifdef CONFIG_PAX_KERNEXEC
23283+ GET_CR0_INTO_RDI
23284+ btr $16,%rdi
23285+ jnc 3f
23286+ SET_RDI_INTO_CR0
23287+#endif
23288+
23289+ add $__START_KERNEL_map,%rbx
23290+ sub phys_base(%rip),%rbx
23291+
23292+#ifdef CONFIG_PARAVIRT
23293+ cmpl $0, pv_info+PARAVIRT_enabled
23294+ jz 1f
23295+ i = 0
23296+ .rept USER_PGD_PTRS
23297+ mov i*8(%rbx),%rsi
23298+ mov $0x67,%sil
23299+ lea i*8(%rbx),%rdi
23300+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23301+ i = i + 1
23302+ .endr
23303+ jmp 2f
23304+1:
23305+#endif
23306+
23307+ i = 0
23308+ .rept USER_PGD_PTRS
23309+ movb $0x67,i*8(%rbx)
23310+ i = i + 1
23311+ .endr
23312+2:
23313+
23314+#ifdef CONFIG_PARAVIRT
23315+ PV_RESTORE_REGS(CLBR_RDI)
23316+#endif
23317+
23318+ popq %rbx
23319+ popq %rdi
23320+ pax_force_retaddr
23321+ retq
23322+3: ud2
23323+ENDPROC(pax_exit_kernel_user)
23324+#endif
23325+
23326+ .macro pax_enter_kernel_nmi
23327+ pax_set_fptr_mask
23328+
23329+#ifdef CONFIG_PAX_KERNEXEC
23330+ GET_CR0_INTO_RDI
23331+ bts $16,%rdi
23332+ jc 110f
23333+ SET_RDI_INTO_CR0
23334+ or $2,%ebx
23335+110:
23336+#endif
23337+
23338+#ifdef CONFIG_PAX_MEMORY_UDEREF
23339+ 661: jmp 111f
23340+ .pushsection .altinstr_replacement, "a"
23341+ 662: ASM_NOP2
23342+ .popsection
23343+ .pushsection .altinstructions, "a"
23344+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23345+ .popsection
23346+ GET_CR3_INTO_RDI
23347+ cmp $0,%dil
23348+ jz 111f
23349+ sub $4097,%rdi
23350+ or $4,%ebx
23351+ bts $63,%rdi
23352+ SET_RDI_INTO_CR3
23353+ mov $__UDEREF_KERNEL_DS,%edi
23354+ mov %edi,%ss
23355+111:
23356+#endif
23357+ .endm
23358+
23359+ .macro pax_exit_kernel_nmi
23360+#ifdef CONFIG_PAX_KERNEXEC
23361+ btr $1,%ebx
23362+ jnc 110f
23363+ GET_CR0_INTO_RDI
23364+ btr $16,%rdi
23365+ SET_RDI_INTO_CR0
23366+110:
23367+#endif
23368+
23369+#ifdef CONFIG_PAX_MEMORY_UDEREF
23370+ btr $2,%ebx
23371+ jnc 111f
23372+ GET_CR3_INTO_RDI
23373+ add $4097,%rdi
23374+ bts $63,%rdi
23375+ SET_RDI_INTO_CR3
23376+ mov $__KERNEL_DS,%edi
23377+ mov %edi,%ss
23378+111:
23379+#endif
23380+ .endm
23381+
23382+ .macro pax_erase_kstack
23383+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23384+ call pax_erase_kstack
23385+#endif
23386+ .endm
23387+
23388+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23389+ENTRY(pax_erase_kstack)
23390+ pushq %rdi
23391+ pushq %rcx
23392+ pushq %rax
23393+ pushq %r11
23394+
23395+ GET_THREAD_INFO(%r11)
23396+ mov TI_lowest_stack(%r11), %rdi
23397+ mov $-0xBEEF, %rax
23398+ std
23399+
23400+1: mov %edi, %ecx
23401+ and $THREAD_SIZE_asm - 1, %ecx
23402+ shr $3, %ecx
23403+ repne scasq
23404+ jecxz 2f
23405+
23406+ cmp $2*8, %ecx
23407+ jc 2f
23408+
23409+ mov $2*8, %ecx
23410+ repe scasq
23411+ jecxz 2f
23412+ jne 1b
23413+
23414+2: cld
23415+ or $2*8, %rdi
23416+ mov %esp, %ecx
23417+ sub %edi, %ecx
23418+
23419+ cmp $THREAD_SIZE_asm, %rcx
23420+ jb 3f
23421+ ud2
23422+3:
23423+
23424+ shr $3, %ecx
23425+ rep stosq
23426+
23427+ mov TI_task_thread_sp0(%r11), %rdi
23428+ sub $256, %rdi
23429+ mov %rdi, TI_lowest_stack(%r11)
23430+
23431+ popq %r11
23432+ popq %rax
23433+ popq %rcx
23434+ popq %rdi
23435+ pax_force_retaddr
23436+ ret
23437+ENDPROC(pax_erase_kstack)
23438+#endif
23439
23440 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23441 #ifdef CONFIG_TRACE_IRQFLAGS
23442@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23443 .endm
23444
23445 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23446- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23447+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23448 jnc 1f
23449 TRACE_IRQS_ON_DEBUG
23450 1:
23451@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
23452 movq \tmp,R11+\offset(%rsp)
23453 .endm
23454
23455- .macro FAKE_STACK_FRAME child_rip
23456- /* push in order ss, rsp, eflags, cs, rip */
23457- xorl %eax, %eax
23458- pushq_cfi $__KERNEL_DS /* ss */
23459- /*CFI_REL_OFFSET ss,0*/
23460- pushq_cfi %rax /* rsp */
23461- CFI_REL_OFFSET rsp,0
23462- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23463- /*CFI_REL_OFFSET rflags,0*/
23464- pushq_cfi $__KERNEL_CS /* cs */
23465- /*CFI_REL_OFFSET cs,0*/
23466- pushq_cfi \child_rip /* rip */
23467- CFI_REL_OFFSET rip,0
23468- pushq_cfi %rax /* orig rax */
23469- .endm
23470-
23471- .macro UNFAKE_STACK_FRAME
23472- addq $8*6, %rsp
23473- CFI_ADJUST_CFA_OFFSET -(6*8)
23474- .endm
23475-
23476 /*
23477 * initial frame state for interrupts (and exceptions without error code)
23478 */
23479@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23480 /* save partial stack frame */
23481 .macro SAVE_ARGS_IRQ
23482 cld
23483- /* start from rbp in pt_regs and jump over */
23484- movq_cfi rdi, (RDI-RBP)
23485- movq_cfi rsi, (RSI-RBP)
23486- movq_cfi rdx, (RDX-RBP)
23487- movq_cfi rcx, (RCX-RBP)
23488- movq_cfi rax, (RAX-RBP)
23489- movq_cfi r8, (R8-RBP)
23490- movq_cfi r9, (R9-RBP)
23491- movq_cfi r10, (R10-RBP)
23492- movq_cfi r11, (R11-RBP)
23493+ /* start from r15 in pt_regs and jump over */
23494+ movq_cfi rdi, RDI
23495+ movq_cfi rsi, RSI
23496+ movq_cfi rdx, RDX
23497+ movq_cfi rcx, RCX
23498+ movq_cfi rax, RAX
23499+ movq_cfi r8, R8
23500+ movq_cfi r9, R9
23501+ movq_cfi r10, R10
23502+ movq_cfi r11, R11
23503+ movq_cfi r12, R12
23504
23505 /* Save rbp so that we can unwind from get_irq_regs() */
23506- movq_cfi rbp, 0
23507+ movq_cfi rbp, RBP
23508
23509 /* Save previous stack value */
23510 movq %rsp, %rsi
23511
23512- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23513- testl $3, CS-RBP(%rsi)
23514+ movq %rsp,%rdi /* arg1 for handler */
23515+ testb $3, CS(%rsi)
23516 je 1f
23517 SWAPGS
23518 /*
23519@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23520 0x06 /* DW_OP_deref */, \
23521 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23522 0x22 /* DW_OP_plus */
23523+
23524+#ifdef CONFIG_PAX_MEMORY_UDEREF
23525+ testb $3, CS(%rdi)
23526+ jnz 1f
23527+ pax_enter_kernel
23528+ jmp 2f
23529+1: pax_enter_kernel_user
23530+2:
23531+#else
23532+ pax_enter_kernel
23533+#endif
23534+
23535 /* We entered an interrupt context - irqs are off: */
23536 TRACE_IRQS_OFF
23537 .endm
23538@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
23539 js 1f /* negative -> in kernel */
23540 SWAPGS
23541 xorl %ebx,%ebx
23542-1: ret
23543+1:
23544+#ifdef CONFIG_PAX_MEMORY_UDEREF
23545+ testb $3, CS+8(%rsp)
23546+ jnz 1f
23547+ pax_enter_kernel
23548+ jmp 2f
23549+1: pax_enter_kernel_user
23550+2:
23551+#else
23552+ pax_enter_kernel
23553+#endif
23554+ pax_force_retaddr
23555+ ret
23556 CFI_ENDPROC
23557-END(save_paranoid)
23558+ENDPROC(save_paranoid)
23559+
23560+ENTRY(save_paranoid_nmi)
23561+ XCPT_FRAME 1 RDI+8
23562+ cld
23563+ movq_cfi rdi, RDI+8
23564+ movq_cfi rsi, RSI+8
23565+ movq_cfi rdx, RDX+8
23566+ movq_cfi rcx, RCX+8
23567+ movq_cfi rax, RAX+8
23568+ movq_cfi r8, R8+8
23569+ movq_cfi r9, R9+8
23570+ movq_cfi r10, R10+8
23571+ movq_cfi r11, R11+8
23572+ movq_cfi rbx, RBX+8
23573+ movq_cfi rbp, RBP+8
23574+ movq_cfi r12, R12+8
23575+ movq_cfi r13, R13+8
23576+ movq_cfi r14, R14+8
23577+ movq_cfi r15, R15+8
23578+ movl $1,%ebx
23579+ movl $MSR_GS_BASE,%ecx
23580+ rdmsr
23581+ testl %edx,%edx
23582+ js 1f /* negative -> in kernel */
23583+ SWAPGS
23584+ xorl %ebx,%ebx
23585+1: pax_enter_kernel_nmi
23586+ pax_force_retaddr
23587+ ret
23588+ CFI_ENDPROC
23589+ENDPROC(save_paranoid_nmi)
23590
23591 /*
23592 * A newly forked process directly context switches into this address.
23593@@ -331,25 +793,26 @@ ENTRY(ret_from_fork)
23594
23595 RESTORE_REST
23596
23597- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23598+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23599 jz 1f
23600
23601- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23602- jnz int_ret_from_sys_call
23603-
23604- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
23605- jmp ret_from_sys_call # go to the SYSRET fastpath
23606+ /*
23607+ * By the time we get here, we have no idea whether our pt_regs,
23608+ * ti flags, and ti status came from the 64-bit SYSCALL fast path,
23609+ * the slow path, or one of the ia32entry paths.
23610+ * Use int_ret_from_sys_call to return, since it can safely handle
23611+ * all of the above.
23612+ */
23613+ jmp int_ret_from_sys_call
23614
23615 1:
23616- subq $REST_SKIP, %rsp # leave space for volatiles
23617- CFI_ADJUST_CFA_OFFSET REST_SKIP
23618 movq %rbp, %rdi
23619 call *%rbx
23620 movl $0, RAX(%rsp)
23621 RESTORE_REST
23622 jmp int_ret_from_sys_call
23623 CFI_ENDPROC
23624-END(ret_from_fork)
23625+ENDPROC(ret_from_fork)
23626
23627 /*
23628 * System call entry. Up to 6 arguments in registers are supported.
23629@@ -386,7 +849,7 @@ END(ret_from_fork)
23630 ENTRY(system_call)
23631 CFI_STARTPROC simple
23632 CFI_SIGNAL_FRAME
23633- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23634+ CFI_DEF_CFA rsp,0
23635 CFI_REGISTER rip,rcx
23636 /*CFI_REGISTER rflags,r11*/
23637 SWAPGS_UNSAFE_STACK
23638@@ -399,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
23639
23640 movq %rsp,PER_CPU_VAR(old_rsp)
23641 movq PER_CPU_VAR(kernel_stack),%rsp
23642+ SAVE_ARGS 8*6, 0, rax_enosys=1
23643+ pax_enter_kernel_user
23644+
23645+#ifdef CONFIG_PAX_RANDKSTACK
23646+ pax_erase_kstack
23647+#endif
23648+
23649 /*
23650 * No need to follow this irqs off/on section - it's straight
23651 * and short:
23652 */
23653 ENABLE_INTERRUPTS(CLBR_NONE)
23654- SAVE_ARGS 8, 0, rax_enosys=1
23655 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23656 movq %rcx,RIP-ARGOFFSET(%rsp)
23657 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23658- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23659+ GET_THREAD_INFO(%rcx)
23660+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23661 jnz tracesys
23662 system_call_fastpath:
23663 #if __SYSCALL_MASK == ~0
23664@@ -432,10 +902,13 @@ sysret_check:
23665 LOCKDEP_SYS_EXIT
23666 DISABLE_INTERRUPTS(CLBR_NONE)
23667 TRACE_IRQS_OFF
23668- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23669+ GET_THREAD_INFO(%rcx)
23670+ movl TI_flags(%rcx),%edx
23671 andl %edi,%edx
23672 jnz sysret_careful
23673 CFI_REMEMBER_STATE
23674+ pax_exit_kernel_user
23675+ pax_erase_kstack
23676 /*
23677 * sysretq will re-enable interrupts:
23678 */
23679@@ -494,12 +967,15 @@ sysret_audit:
23680
23681 /* Do syscall tracing */
23682 tracesys:
23683- leaq -REST_SKIP(%rsp), %rdi
23684+ movq %rsp, %rdi
23685 movq $AUDIT_ARCH_X86_64, %rsi
23686 call syscall_trace_enter_phase1
23687 test %rax, %rax
23688 jnz tracesys_phase2 /* if needed, run the slow path */
23689- LOAD_ARGS 0 /* else restore clobbered regs */
23690+
23691+ pax_erase_kstack
23692+
23693+ LOAD_ARGS /* else restore clobbered regs */
23694 jmp system_call_fastpath /* and return to the fast path */
23695
23696 tracesys_phase2:
23697@@ -510,12 +986,14 @@ tracesys_phase2:
23698 movq %rax,%rdx
23699 call syscall_trace_enter_phase2
23700
23701+ pax_erase_kstack
23702+
23703 /*
23704 * Reload arg registers from stack in case ptrace changed them.
23705 * We don't reload %rax because syscall_trace_entry_phase2() returned
23706 * the value it wants us to use in the table lookup.
23707 */
23708- LOAD_ARGS ARGOFFSET, 1
23709+ LOAD_ARGS 1
23710 RESTORE_REST
23711 #if __SYSCALL_MASK == ~0
23712 cmpq $__NR_syscall_max,%rax
23713@@ -545,7 +1023,9 @@ GLOBAL(int_with_check)
23714 andl %edi,%edx
23715 jnz int_careful
23716 andl $~TS_COMPAT,TI_status(%rcx)
23717- jmp retint_swapgs
23718+ pax_exit_kernel_user
23719+ pax_erase_kstack
23720+ jmp retint_swapgs_pax
23721
23722 /* Either reschedule or signal or syscall exit tracking needed. */
23723 /* First do a reschedule test. */
23724@@ -591,7 +1071,7 @@ int_restore_rest:
23725 TRACE_IRQS_OFF
23726 jmp int_with_check
23727 CFI_ENDPROC
23728-END(system_call)
23729+ENDPROC(system_call)
23730
23731 .macro FORK_LIKE func
23732 ENTRY(stub_\func)
23733@@ -604,9 +1084,10 @@ ENTRY(stub_\func)
23734 DEFAULT_FRAME 0 8 /* offset 8: return address */
23735 call sys_\func
23736 RESTORE_TOP_OF_STACK %r11, 8
23737- ret $REST_SKIP /* pop extended registers */
23738+ pax_force_retaddr
23739+ ret
23740 CFI_ENDPROC
23741-END(stub_\func)
23742+ENDPROC(stub_\func)
23743 .endm
23744
23745 .macro FIXED_FRAME label,func
23746@@ -616,9 +1097,10 @@ ENTRY(\label)
23747 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23748 call \func
23749 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23750+ pax_force_retaddr
23751 ret
23752 CFI_ENDPROC
23753-END(\label)
23754+ENDPROC(\label)
23755 .endm
23756
23757 FORK_LIKE clone
23758@@ -626,19 +1108,6 @@ END(\label)
23759 FORK_LIKE vfork
23760 FIXED_FRAME stub_iopl, sys_iopl
23761
23762-ENTRY(ptregscall_common)
23763- DEFAULT_FRAME 1 8 /* offset 8: return address */
23764- RESTORE_TOP_OF_STACK %r11, 8
23765- movq_cfi_restore R15+8, r15
23766- movq_cfi_restore R14+8, r14
23767- movq_cfi_restore R13+8, r13
23768- movq_cfi_restore R12+8, r12
23769- movq_cfi_restore RBP+8, rbp
23770- movq_cfi_restore RBX+8, rbx
23771- ret $REST_SKIP /* pop extended registers */
23772- CFI_ENDPROC
23773-END(ptregscall_common)
23774-
23775 ENTRY(stub_execve)
23776 CFI_STARTPROC
23777 addq $8, %rsp
23778@@ -650,7 +1119,7 @@ ENTRY(stub_execve)
23779 RESTORE_REST
23780 jmp int_ret_from_sys_call
23781 CFI_ENDPROC
23782-END(stub_execve)
23783+ENDPROC(stub_execve)
23784
23785 ENTRY(stub_execveat)
23786 CFI_STARTPROC
23787@@ -664,7 +1133,7 @@ ENTRY(stub_execveat)
23788 RESTORE_REST
23789 jmp int_ret_from_sys_call
23790 CFI_ENDPROC
23791-END(stub_execveat)
23792+ENDPROC(stub_execveat)
23793
23794 /*
23795 * sigreturn is special because it needs to restore all registers on return.
23796@@ -681,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
23797 RESTORE_REST
23798 jmp int_ret_from_sys_call
23799 CFI_ENDPROC
23800-END(stub_rt_sigreturn)
23801+ENDPROC(stub_rt_sigreturn)
23802
23803 #ifdef CONFIG_X86_X32_ABI
23804 ENTRY(stub_x32_rt_sigreturn)
23805@@ -695,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
23806 RESTORE_REST
23807 jmp int_ret_from_sys_call
23808 CFI_ENDPROC
23809-END(stub_x32_rt_sigreturn)
23810+ENDPROC(stub_x32_rt_sigreturn)
23811
23812 ENTRY(stub_x32_execve)
23813 CFI_STARTPROC
23814@@ -760,7 +1229,7 @@ vector=vector+1
23815 2: jmp common_interrupt
23816 .endr
23817 CFI_ENDPROC
23818-END(irq_entries_start)
23819+ENDPROC(irq_entries_start)
23820
23821 .previous
23822 END(interrupt)
23823@@ -777,8 +1246,8 @@ END(interrupt)
23824 /* 0(%rsp): ~(interrupt number) */
23825 .macro interrupt func
23826 /* reserve pt_regs for scratch regs and rbp */
23827- subq $ORIG_RAX-RBP, %rsp
23828- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23829+ subq $ORIG_RAX, %rsp
23830+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23831 SAVE_ARGS_IRQ
23832 call \func
23833 .endm
23834@@ -801,14 +1270,14 @@ ret_from_intr:
23835
23836 /* Restore saved previous stack */
23837 popq %rsi
23838- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23839- leaq ARGOFFSET-RBP(%rsi), %rsp
23840+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23841+ movq %rsi, %rsp
23842 CFI_DEF_CFA_REGISTER rsp
23843- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23844+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23845
23846 exit_intr:
23847 GET_THREAD_INFO(%rcx)
23848- testl $3,CS-ARGOFFSET(%rsp)
23849+ testb $3,CS-ARGOFFSET(%rsp)
23850 je retint_kernel
23851
23852 /* Interrupt came from user space */
23853@@ -830,12 +1299,35 @@ retint_swapgs: /* return to user-space */
23854 * The iretq could re-enable interrupts:
23855 */
23856 DISABLE_INTERRUPTS(CLBR_ANY)
23857+ pax_exit_kernel_user
23858+retint_swapgs_pax:
23859 TRACE_IRQS_IRETQ
23860 SWAPGS
23861 jmp restore_args
23862
23863 retint_restore_args: /* return to kernel space */
23864 DISABLE_INTERRUPTS(CLBR_ANY)
23865+ pax_exit_kernel
23866+
23867+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23868+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23869+ * namely calling EFI runtime services with a phys mapping. We're
23870+ * starting off with NOPs and patch in the real instrumentation
23871+ * (BTS/OR) before starting any userland process; even before starting
23872+ * up the APs.
23873+ */
23874+ .pushsection .altinstr_replacement, "a"
23875+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23876+ 602:
23877+ .popsection
23878+ 603: .fill 602b-601b, 1, 0x90
23879+ .pushsection .altinstructions, "a"
23880+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23881+ .popsection
23882+#else
23883+ pax_force_retaddr (RIP-ARGOFFSET)
23884+#endif
23885+
23886 /*
23887 * The iretq could re-enable interrupts:
23888 */
23889@@ -873,15 +1365,15 @@ native_irq_return_ldt:
23890 SWAPGS
23891 movq PER_CPU_VAR(espfix_waddr),%rdi
23892 movq %rax,(0*8)(%rdi) /* RAX */
23893- movq (2*8)(%rsp),%rax /* RIP */
23894+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23895 movq %rax,(1*8)(%rdi)
23896- movq (3*8)(%rsp),%rax /* CS */
23897+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23898 movq %rax,(2*8)(%rdi)
23899- movq (4*8)(%rsp),%rax /* RFLAGS */
23900+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23901 movq %rax,(3*8)(%rdi)
23902- movq (6*8)(%rsp),%rax /* SS */
23903+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23904 movq %rax,(5*8)(%rdi)
23905- movq (5*8)(%rsp),%rax /* RSP */
23906+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23907 movq %rax,(4*8)(%rdi)
23908 andl $0xffff0000,%eax
23909 popq_cfi %rdi
23910@@ -935,7 +1427,7 @@ ENTRY(retint_kernel)
23911 jmp exit_intr
23912 #endif
23913 CFI_ENDPROC
23914-END(common_interrupt)
23915+ENDPROC(common_interrupt)
23916
23917 /*
23918 * APIC interrupts.
23919@@ -949,7 +1441,7 @@ ENTRY(\sym)
23920 interrupt \do_sym
23921 jmp ret_from_intr
23922 CFI_ENDPROC
23923-END(\sym)
23924+ENDPROC(\sym)
23925 .endm
23926
23927 #ifdef CONFIG_TRACING
23928@@ -1022,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23929 /*
23930 * Exception entry points.
23931 */
23932-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23933+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23934
23935 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23936 ENTRY(\sym)
23937@@ -1073,6 +1565,12 @@ ENTRY(\sym)
23938 .endif
23939
23940 .if \shift_ist != -1
23941+#ifdef CONFIG_SMP
23942+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23943+ lea init_tss(%r13), %r13
23944+#else
23945+ lea init_tss(%rip), %r13
23946+#endif
23947 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
23948 .endif
23949
23950@@ -1089,7 +1587,7 @@ ENTRY(\sym)
23951 .endif
23952
23953 CFI_ENDPROC
23954-END(\sym)
23955+ENDPROC(\sym)
23956 .endm
23957
23958 #ifdef CONFIG_TRACING
23959@@ -1130,9 +1628,10 @@ gs_change:
23960 2: mfence /* workaround */
23961 SWAPGS
23962 popfq_cfi
23963+ pax_force_retaddr
23964 ret
23965 CFI_ENDPROC
23966-END(native_load_gs_index)
23967+ENDPROC(native_load_gs_index)
23968
23969 _ASM_EXTABLE(gs_change,bad_gs)
23970 .section .fixup,"ax"
23971@@ -1160,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
23972 CFI_DEF_CFA_REGISTER rsp
23973 CFI_ADJUST_CFA_OFFSET -8
23974 decl PER_CPU_VAR(irq_count)
23975+ pax_force_retaddr
23976 ret
23977 CFI_ENDPROC
23978-END(do_softirq_own_stack)
23979+ENDPROC(do_softirq_own_stack)
23980
23981 #ifdef CONFIG_XEN
23982 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
23983@@ -1200,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23984 decl PER_CPU_VAR(irq_count)
23985 jmp error_exit
23986 CFI_ENDPROC
23987-END(xen_do_hypervisor_callback)
23988+ENDPROC(xen_do_hypervisor_callback)
23989
23990 /*
23991 * Hypervisor uses this for application faults while it executes.
23992@@ -1259,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
23993 SAVE_ALL
23994 jmp error_exit
23995 CFI_ENDPROC
23996-END(xen_failsafe_callback)
23997+ENDPROC(xen_failsafe_callback)
23998
23999 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24000 xen_hvm_callback_vector xen_evtchn_do_upcall
24001@@ -1306,18 +1806,33 @@ ENTRY(paranoid_exit)
24002 DEFAULT_FRAME
24003 DISABLE_INTERRUPTS(CLBR_NONE)
24004 TRACE_IRQS_OFF_DEBUG
24005- testl %ebx,%ebx /* swapgs needed? */
24006+ testl $1,%ebx /* swapgs needed? */
24007 jnz paranoid_restore
24008- testl $3,CS(%rsp)
24009+ testb $3,CS(%rsp)
24010 jnz paranoid_userspace
24011+#ifdef CONFIG_PAX_MEMORY_UDEREF
24012+ pax_exit_kernel
24013+ TRACE_IRQS_IRETQ 0
24014+ SWAPGS_UNSAFE_STACK
24015+ RESTORE_ALL 8
24016+ pax_force_retaddr_bts
24017+ jmp irq_return
24018+#endif
24019 paranoid_swapgs:
24020+#ifdef CONFIG_PAX_MEMORY_UDEREF
24021+ pax_exit_kernel_user
24022+#else
24023+ pax_exit_kernel
24024+#endif
24025 TRACE_IRQS_IRETQ 0
24026 SWAPGS_UNSAFE_STACK
24027 RESTORE_ALL 8
24028 jmp irq_return
24029 paranoid_restore:
24030+ pax_exit_kernel
24031 TRACE_IRQS_IRETQ_DEBUG 0
24032 RESTORE_ALL 8
24033+ pax_force_retaddr_bts
24034 jmp irq_return
24035 paranoid_userspace:
24036 GET_THREAD_INFO(%rcx)
24037@@ -1346,7 +1861,7 @@ paranoid_schedule:
24038 TRACE_IRQS_OFF
24039 jmp paranoid_userspace
24040 CFI_ENDPROC
24041-END(paranoid_exit)
24042+ENDPROC(paranoid_exit)
24043
24044 /*
24045 * Exception entry point. This expects an error code/orig_rax on the stack.
24046@@ -1373,12 +1888,23 @@ ENTRY(error_entry)
24047 movq %r14, R14+8(%rsp)
24048 movq %r15, R15+8(%rsp)
24049 xorl %ebx,%ebx
24050- testl $3,CS+8(%rsp)
24051+ testb $3,CS+8(%rsp)
24052 je error_kernelspace
24053 error_swapgs:
24054 SWAPGS
24055 error_sti:
24056+#ifdef CONFIG_PAX_MEMORY_UDEREF
24057+ testb $3, CS+8(%rsp)
24058+ jnz 1f
24059+ pax_enter_kernel
24060+ jmp 2f
24061+1: pax_enter_kernel_user
24062+2:
24063+#else
24064+ pax_enter_kernel
24065+#endif
24066 TRACE_IRQS_OFF
24067+ pax_force_retaddr
24068 ret
24069
24070 /*
24071@@ -1413,7 +1939,7 @@ error_bad_iret:
24072 decl %ebx /* Return to usergs */
24073 jmp error_sti
24074 CFI_ENDPROC
24075-END(error_entry)
24076+ENDPROC(error_entry)
24077
24078
24079 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24080@@ -1424,7 +1950,7 @@ ENTRY(error_exit)
24081 DISABLE_INTERRUPTS(CLBR_NONE)
24082 TRACE_IRQS_OFF
24083 GET_THREAD_INFO(%rcx)
24084- testl %eax,%eax
24085+ testl $1,%eax
24086 jne retint_kernel
24087 LOCKDEP_SYS_EXIT_IRQ
24088 movl TI_flags(%rcx),%edx
24089@@ -1433,7 +1959,7 @@ ENTRY(error_exit)
24090 jnz retint_careful
24091 jmp retint_swapgs
24092 CFI_ENDPROC
24093-END(error_exit)
24094+ENDPROC(error_exit)
24095
24096 /*
24097 * Test if a given stack is an NMI stack or not.
24098@@ -1491,9 +2017,11 @@ ENTRY(nmi)
24099 * If %cs was not the kernel segment, then the NMI triggered in user
24100 * space, which means it is definitely not nested.
24101 */
24102+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24103+ je 1f
24104 cmpl $__KERNEL_CS, 16(%rsp)
24105 jne first_nmi
24106-
24107+1:
24108 /*
24109 * Check the special variable on the stack to see if NMIs are
24110 * executing.
24111@@ -1527,8 +2055,7 @@ nested_nmi:
24112
24113 1:
24114 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24115- leaq -1*8(%rsp), %rdx
24116- movq %rdx, %rsp
24117+ subq $8, %rsp
24118 CFI_ADJUST_CFA_OFFSET 1*8
24119 leaq -10*8(%rsp), %rdx
24120 pushq_cfi $__KERNEL_DS
24121@@ -1546,6 +2073,7 @@ nested_nmi_out:
24122 CFI_RESTORE rdx
24123
24124 /* No need to check faults here */
24125+# pax_force_retaddr_bts
24126 INTERRUPT_RETURN
24127
24128 CFI_RESTORE_STATE
24129@@ -1642,13 +2170,13 @@ end_repeat_nmi:
24130 subq $ORIG_RAX-R15, %rsp
24131 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24132 /*
24133- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24134+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24135 * as we should not be calling schedule in NMI context.
24136 * Even with normal interrupts enabled. An NMI should not be
24137 * setting NEED_RESCHED or anything that normal interrupts and
24138 * exceptions might do.
24139 */
24140- call save_paranoid
24141+ call save_paranoid_nmi
24142 DEFAULT_FRAME 0
24143
24144 /*
24145@@ -1658,9 +2186,9 @@ end_repeat_nmi:
24146 * NMI itself takes a page fault, the page fault that was preempted
24147 * will read the information from the NMI page fault and not the
24148 * origin fault. Save it off and restore it if it changes.
24149- * Use the r12 callee-saved register.
24150+ * Use the r13 callee-saved register.
24151 */
24152- movq %cr2, %r12
24153+ movq %cr2, %r13
24154
24155 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24156 movq %rsp,%rdi
24157@@ -1669,29 +2197,34 @@ end_repeat_nmi:
24158
24159 /* Did the NMI take a page fault? Restore cr2 if it did */
24160 movq %cr2, %rcx
24161- cmpq %rcx, %r12
24162+ cmpq %rcx, %r13
24163 je 1f
24164- movq %r12, %cr2
24165+ movq %r13, %cr2
24166 1:
24167
24168- testl %ebx,%ebx /* swapgs needed? */
24169+ testl $1,%ebx /* swapgs needed? */
24170 jnz nmi_restore
24171 nmi_swapgs:
24172 SWAPGS_UNSAFE_STACK
24173 nmi_restore:
24174+ pax_exit_kernel_nmi
24175 /* Pop the extra iret frame at once */
24176 RESTORE_ALL 6*8
24177+ testb $3, 8(%rsp)
24178+ jnz 1f
24179+ pax_force_retaddr_bts
24180+1:
24181
24182 /* Clear the NMI executing stack variable */
24183 movq $0, 5*8(%rsp)
24184 jmp irq_return
24185 CFI_ENDPROC
24186-END(nmi)
24187+ENDPROC(nmi)
24188
24189 ENTRY(ignore_sysret)
24190 CFI_STARTPROC
24191 mov $-ENOSYS,%eax
24192 sysret
24193 CFI_ENDPROC
24194-END(ignore_sysret)
24195+ENDPROC(ignore_sysret)
24196
24197diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24198index f5d0730..5bce89c 100644
24199--- a/arch/x86/kernel/espfix_64.c
24200+++ b/arch/x86/kernel/espfix_64.c
24201@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24202 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24203 static void *espfix_pages[ESPFIX_MAX_PAGES];
24204
24205-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24206- __aligned(PAGE_SIZE);
24207+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24208
24209 static unsigned int page_random, slot_random;
24210
24211@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24212 void __init init_espfix_bsp(void)
24213 {
24214 pgd_t *pgd_p;
24215+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24216
24217 /* Install the espfix pud into the kernel page directory */
24218- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24219+ pgd_p = &init_level4_pgt[index];
24220 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24221
24222+#ifdef CONFIG_PAX_PER_CPU_PGD
24223+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24224+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24225+#endif
24226+
24227 /* Randomize the locations */
24228 init_espfix_random();
24229
24230@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24231 set_pte(&pte_p[n*PTE_STRIDE], pte);
24232
24233 /* Job is done for this CPU and any CPU which shares this page */
24234- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24235+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24236
24237 unlock_done:
24238 mutex_unlock(&espfix_init_mutex);
24239diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24240index 8b7b0a5..2395f29 100644
24241--- a/arch/x86/kernel/ftrace.c
24242+++ b/arch/x86/kernel/ftrace.c
24243@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24244 * kernel identity mapping to modify code.
24245 */
24246 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24247- ip = (unsigned long)__va(__pa_symbol(ip));
24248+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24249
24250 return ip;
24251 }
24252@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24253 {
24254 unsigned char replaced[MCOUNT_INSN_SIZE];
24255
24256+ ip = ktla_ktva(ip);
24257+
24258 /*
24259 * Note: Due to modules and __init, code can
24260 * disappear and change, we need to protect against faulting
24261@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24262 unsigned char old[MCOUNT_INSN_SIZE];
24263 int ret;
24264
24265- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24266+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24267
24268 ftrace_update_func = ip;
24269 /* Make sure the breakpoints see the ftrace_update_func update */
24270@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24271 unsigned char replaced[MCOUNT_INSN_SIZE];
24272 unsigned char brk = BREAKPOINT_INSTRUCTION;
24273
24274- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24275+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24276 return -EFAULT;
24277
24278 /* Make sure it is what we expect it to be */
24279diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24280index eda1a86..8f6df48 100644
24281--- a/arch/x86/kernel/head64.c
24282+++ b/arch/x86/kernel/head64.c
24283@@ -67,12 +67,12 @@ again:
24284 pgd = *pgd_p;
24285
24286 /*
24287- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24288- * critical -- __PAGE_OFFSET would point us back into the dynamic
24289+ * The use of __early_va rather than __va here is critical:
24290+ * __va would point us back into the dynamic
24291 * range and we might end up looping forever...
24292 */
24293 if (pgd)
24294- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24295+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24296 else {
24297 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24298 reset_early_page_tables();
24299@@ -82,13 +82,13 @@ again:
24300 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24301 for (i = 0; i < PTRS_PER_PUD; i++)
24302 pud_p[i] = 0;
24303- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24304+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24305 }
24306 pud_p += pud_index(address);
24307 pud = *pud_p;
24308
24309 if (pud)
24310- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24311+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24312 else {
24313 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24314 reset_early_page_tables();
24315@@ -98,7 +98,7 @@ again:
24316 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24317 for (i = 0; i < PTRS_PER_PMD; i++)
24318 pmd_p[i] = 0;
24319- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24320+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24321 }
24322 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24323 pmd_p[pmd_index(address)] = pmd;
24324@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24325 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24326 early_printk("Kernel alive\n");
24327
24328- clear_page(init_level4_pgt);
24329 /* set init_level4_pgt kernel high mapping*/
24330 init_level4_pgt[511] = early_level4_pgt[511];
24331
24332diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24333index f36bd42..0ab4474 100644
24334--- a/arch/x86/kernel/head_32.S
24335+++ b/arch/x86/kernel/head_32.S
24336@@ -26,6 +26,12 @@
24337 /* Physical address */
24338 #define pa(X) ((X) - __PAGE_OFFSET)
24339
24340+#ifdef CONFIG_PAX_KERNEXEC
24341+#define ta(X) (X)
24342+#else
24343+#define ta(X) ((X) - __PAGE_OFFSET)
24344+#endif
24345+
24346 /*
24347 * References to members of the new_cpu_data structure.
24348 */
24349@@ -55,11 +61,7 @@
24350 * and small than max_low_pfn, otherwise will waste some page table entries
24351 */
24352
24353-#if PTRS_PER_PMD > 1
24354-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24355-#else
24356-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24357-#endif
24358+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24359
24360 /* Number of possible pages in the lowmem region */
24361 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24362@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24363 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24364
24365 /*
24366+ * Real beginning of normal "text" segment
24367+ */
24368+ENTRY(stext)
24369+ENTRY(_stext)
24370+
24371+/*
24372 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24373 * %esi points to the real-mode code as a 32-bit pointer.
24374 * CS and DS must be 4 GB flat segments, but we don't depend on
24375@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24376 * can.
24377 */
24378 __HEAD
24379+
24380+#ifdef CONFIG_PAX_KERNEXEC
24381+ jmp startup_32
24382+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24383+.fill PAGE_SIZE-5,1,0xcc
24384+#endif
24385+
24386 ENTRY(startup_32)
24387 movl pa(stack_start),%ecx
24388
24389@@ -106,6 +121,59 @@ ENTRY(startup_32)
24390 2:
24391 leal -__PAGE_OFFSET(%ecx),%esp
24392
24393+#ifdef CONFIG_SMP
24394+ movl $pa(cpu_gdt_table),%edi
24395+ movl $__per_cpu_load,%eax
24396+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24397+ rorl $16,%eax
24398+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24399+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24400+ movl $__per_cpu_end - 1,%eax
24401+ subl $__per_cpu_start,%eax
24402+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24403+#endif
24404+
24405+#ifdef CONFIG_PAX_MEMORY_UDEREF
24406+ movl $NR_CPUS,%ecx
24407+ movl $pa(cpu_gdt_table),%edi
24408+1:
24409+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24410+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24411+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24412+ addl $PAGE_SIZE_asm,%edi
24413+ loop 1b
24414+#endif
24415+
24416+#ifdef CONFIG_PAX_KERNEXEC
24417+ movl $pa(boot_gdt),%edi
24418+ movl $__LOAD_PHYSICAL_ADDR,%eax
24419+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24420+ rorl $16,%eax
24421+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24422+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24423+ rorl $16,%eax
24424+
24425+ ljmp $(__BOOT_CS),$1f
24426+1:
24427+
24428+ movl $NR_CPUS,%ecx
24429+ movl $pa(cpu_gdt_table),%edi
24430+ addl $__PAGE_OFFSET,%eax
24431+1:
24432+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24433+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24434+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24435+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24436+ rorl $16,%eax
24437+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24438+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24439+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24440+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24441+ rorl $16,%eax
24442+ addl $PAGE_SIZE_asm,%edi
24443+ loop 1b
24444+#endif
24445+
24446 /*
24447 * Clear BSS first so that there are no surprises...
24448 */
24449@@ -201,8 +269,11 @@ ENTRY(startup_32)
24450 movl %eax, pa(max_pfn_mapped)
24451
24452 /* Do early initialization of the fixmap area */
24453- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24454- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24455+#ifdef CONFIG_COMPAT_VDSO
24456+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24457+#else
24458+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24459+#endif
24460 #else /* Not PAE */
24461
24462 page_pde_offset = (__PAGE_OFFSET >> 20);
24463@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24464 movl %eax, pa(max_pfn_mapped)
24465
24466 /* Do early initialization of the fixmap area */
24467- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24468- movl %eax,pa(initial_page_table+0xffc)
24469+#ifdef CONFIG_COMPAT_VDSO
24470+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24471+#else
24472+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24473+#endif
24474 #endif
24475
24476 #ifdef CONFIG_PARAVIRT
24477@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24478 cmpl $num_subarch_entries, %eax
24479 jae bad_subarch
24480
24481- movl pa(subarch_entries)(,%eax,4), %eax
24482- subl $__PAGE_OFFSET, %eax
24483- jmp *%eax
24484+ jmp *pa(subarch_entries)(,%eax,4)
24485
24486 bad_subarch:
24487 WEAK(lguest_entry)
24488@@ -261,10 +333,10 @@ WEAK(xen_entry)
24489 __INITDATA
24490
24491 subarch_entries:
24492- .long default_entry /* normal x86/PC */
24493- .long lguest_entry /* lguest hypervisor */
24494- .long xen_entry /* Xen hypervisor */
24495- .long default_entry /* Moorestown MID */
24496+ .long ta(default_entry) /* normal x86/PC */
24497+ .long ta(lguest_entry) /* lguest hypervisor */
24498+ .long ta(xen_entry) /* Xen hypervisor */
24499+ .long ta(default_entry) /* Moorestown MID */
24500 num_subarch_entries = (. - subarch_entries) / 4
24501 .previous
24502 #else
24503@@ -354,6 +426,7 @@ default_entry:
24504 movl pa(mmu_cr4_features),%eax
24505 movl %eax,%cr4
24506
24507+#ifdef CONFIG_X86_PAE
24508 testb $X86_CR4_PAE, %al # check if PAE is enabled
24509 jz enable_paging
24510
24511@@ -382,6 +455,9 @@ default_entry:
24512 /* Make changes effective */
24513 wrmsr
24514
24515+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24516+#endif
24517+
24518 enable_paging:
24519
24520 /*
24521@@ -449,14 +525,20 @@ is486:
24522 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24523 movl %eax,%ss # after changing gdt.
24524
24525- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24526+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24527 movl %eax,%ds
24528 movl %eax,%es
24529
24530 movl $(__KERNEL_PERCPU), %eax
24531 movl %eax,%fs # set this cpu's percpu
24532
24533+#ifdef CONFIG_CC_STACKPROTECTOR
24534 movl $(__KERNEL_STACK_CANARY),%eax
24535+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24536+ movl $(__USER_DS),%eax
24537+#else
24538+ xorl %eax,%eax
24539+#endif
24540 movl %eax,%gs
24541
24542 xorl %eax,%eax # Clear LDT
24543@@ -512,8 +594,11 @@ setup_once:
24544 * relocation. Manually set base address in stack canary
24545 * segment descriptor.
24546 */
24547- movl $gdt_page,%eax
24548+ movl $cpu_gdt_table,%eax
24549 movl $stack_canary,%ecx
24550+#ifdef CONFIG_SMP
24551+ addl $__per_cpu_load,%ecx
24552+#endif
24553 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24554 shrl $16, %ecx
24555 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24556@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24557 cmpl $2,(%esp) # X86_TRAP_NMI
24558 je is_nmi # Ignore NMI
24559
24560- cmpl $2,%ss:early_recursion_flag
24561+ cmpl $1,%ss:early_recursion_flag
24562 je hlt_loop
24563 incl %ss:early_recursion_flag
24564
24565@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24566 pushl (20+6*4)(%esp) /* trapno */
24567 pushl $fault_msg
24568 call printk
24569-#endif
24570 call dump_stack
24571+#endif
24572 hlt_loop:
24573 hlt
24574 jmp hlt_loop
24575@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24576 /* This is the default interrupt "handler" :-) */
24577 ALIGN
24578 ignore_int:
24579- cld
24580 #ifdef CONFIG_PRINTK
24581+ cmpl $2,%ss:early_recursion_flag
24582+ je hlt_loop
24583+ incl %ss:early_recursion_flag
24584+ cld
24585 pushl %eax
24586 pushl %ecx
24587 pushl %edx
24588@@ -617,9 +705,6 @@ ignore_int:
24589 movl $(__KERNEL_DS),%eax
24590 movl %eax,%ds
24591 movl %eax,%es
24592- cmpl $2,early_recursion_flag
24593- je hlt_loop
24594- incl early_recursion_flag
24595 pushl 16(%esp)
24596 pushl 24(%esp)
24597 pushl 32(%esp)
24598@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24599 /*
24600 * BSS section
24601 */
24602-__PAGE_ALIGNED_BSS
24603- .align PAGE_SIZE
24604 #ifdef CONFIG_X86_PAE
24605+.section .initial_pg_pmd,"a",@progbits
24606 initial_pg_pmd:
24607 .fill 1024*KPMDS,4,0
24608 #else
24609+.section .initial_page_table,"a",@progbits
24610 ENTRY(initial_page_table)
24611 .fill 1024,4,0
24612 #endif
24613+.section .initial_pg_fixmap,"a",@progbits
24614 initial_pg_fixmap:
24615 .fill 1024,4,0
24616+.section .empty_zero_page,"a",@progbits
24617 ENTRY(empty_zero_page)
24618 .fill 4096,1,0
24619+.section .swapper_pg_dir,"a",@progbits
24620 ENTRY(swapper_pg_dir)
24621+#ifdef CONFIG_X86_PAE
24622+ .fill 4,8,0
24623+#else
24624 .fill 1024,4,0
24625+#endif
24626
24627 /*
24628 * This starts the data section.
24629 */
24630 #ifdef CONFIG_X86_PAE
24631-__PAGE_ALIGNED_DATA
24632- /* Page-aligned for the benefit of paravirt? */
24633- .align PAGE_SIZE
24634+.section .initial_page_table,"a",@progbits
24635 ENTRY(initial_page_table)
24636 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24637 # if KPMDS == 3
24638@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24639 # error "Kernel PMDs should be 1, 2 or 3"
24640 # endif
24641 .align PAGE_SIZE /* needs to be page-sized too */
24642+
24643+#ifdef CONFIG_PAX_PER_CPU_PGD
24644+ENTRY(cpu_pgd)
24645+ .rept 2*NR_CPUS
24646+ .fill 4,8,0
24647+ .endr
24648+#endif
24649+
24650 #endif
24651
24652 .data
24653 .balign 4
24654 ENTRY(stack_start)
24655- .long init_thread_union+THREAD_SIZE
24656+ .long init_thread_union+THREAD_SIZE-8
24657
24658 __INITRODATA
24659 int_msg:
24660@@ -727,7 +825,7 @@ fault_msg:
24661 * segment size, and 32-bit linear address value:
24662 */
24663
24664- .data
24665+.section .rodata,"a",@progbits
24666 .globl boot_gdt_descr
24667 .globl idt_descr
24668
24669@@ -736,7 +834,7 @@ fault_msg:
24670 .word 0 # 32 bit align gdt_desc.address
24671 boot_gdt_descr:
24672 .word __BOOT_DS+7
24673- .long boot_gdt - __PAGE_OFFSET
24674+ .long pa(boot_gdt)
24675
24676 .word 0 # 32-bit align idt_desc.address
24677 idt_descr:
24678@@ -747,7 +845,7 @@ idt_descr:
24679 .word 0 # 32 bit align gdt_desc.address
24680 ENTRY(early_gdt_descr)
24681 .word GDT_ENTRIES*8-1
24682- .long gdt_page /* Overwritten for secondary CPUs */
24683+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24684
24685 /*
24686 * The boot_gdt must mirror the equivalent in setup.S and is
24687@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24688 .align L1_CACHE_BYTES
24689 ENTRY(boot_gdt)
24690 .fill GDT_ENTRY_BOOT_CS,8,0
24691- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24692- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24693+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24694+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24695+
24696+ .align PAGE_SIZE_asm
24697+ENTRY(cpu_gdt_table)
24698+ .rept NR_CPUS
24699+ .quad 0x0000000000000000 /* NULL descriptor */
24700+ .quad 0x0000000000000000 /* 0x0b reserved */
24701+ .quad 0x0000000000000000 /* 0x13 reserved */
24702+ .quad 0x0000000000000000 /* 0x1b reserved */
24703+
24704+#ifdef CONFIG_PAX_KERNEXEC
24705+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24706+#else
24707+ .quad 0x0000000000000000 /* 0x20 unused */
24708+#endif
24709+
24710+ .quad 0x0000000000000000 /* 0x28 unused */
24711+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24712+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24713+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24714+ .quad 0x0000000000000000 /* 0x4b reserved */
24715+ .quad 0x0000000000000000 /* 0x53 reserved */
24716+ .quad 0x0000000000000000 /* 0x5b reserved */
24717+
24718+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24719+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24720+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24721+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24722+
24723+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24724+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24725+
24726+ /*
24727+ * Segments used for calling PnP BIOS have byte granularity.
24728+ * The code segments and data segments have fixed 64k limits,
24729+ * the transfer segment sizes are set at run time.
24730+ */
24731+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24732+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24733+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24734+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24735+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24736+
24737+ /*
24738+ * The APM segments have byte granularity and their bases
24739+ * are set at run time. All have 64k limits.
24740+ */
24741+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24742+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24743+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24744+
24745+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24746+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24747+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24748+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24749+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24750+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24751+
24752+ /* Be sure this is zeroed to avoid false validations in Xen */
24753+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24754+ .endr
24755diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24756index a468c0a..8b5a879 100644
24757--- a/arch/x86/kernel/head_64.S
24758+++ b/arch/x86/kernel/head_64.S
24759@@ -20,6 +20,8 @@
24760 #include <asm/processor-flags.h>
24761 #include <asm/percpu.h>
24762 #include <asm/nops.h>
24763+#include <asm/cpufeature.h>
24764+#include <asm/alternative-asm.h>
24765
24766 #ifdef CONFIG_PARAVIRT
24767 #include <asm/asm-offsets.h>
24768@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24769 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24770 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24771 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24772+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24773+L3_VMALLOC_START = pud_index(VMALLOC_START)
24774+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24775+L3_VMALLOC_END = pud_index(VMALLOC_END)
24776+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24777+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24778
24779 .text
24780 __HEAD
24781@@ -89,11 +97,24 @@ startup_64:
24782 * Fixup the physical addresses in the page table
24783 */
24784 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24785+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24786+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24787+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24788+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24789+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24790
24791- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24792- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24793+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24794+#ifndef CONFIG_XEN
24795+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24796+#endif
24797+
24798+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24799+
24800+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24801+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24802
24803 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24804+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24805
24806 /*
24807 * Set up the identity mapping for the switchover. These
24808@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24809 * after the boot processor executes this code.
24810 */
24811
24812+ orq $-1, %rbp
24813 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24814 1:
24815
24816- /* Enable PAE mode and PGE */
24817- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24818+ /* Enable PAE mode and PSE/PGE */
24819+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24820 movq %rcx, %cr4
24821
24822 /* Setup early boot stage 4 level pagetables. */
24823@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24824 movl $MSR_EFER, %ecx
24825 rdmsr
24826 btsl $_EFER_SCE, %eax /* Enable System Call */
24827- btl $20,%edi /* No Execute supported? */
24828+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24829 jnc 1f
24830 btsl $_EFER_NX, %eax
24831+ cmpq $-1, %rbp
24832+ je 1f
24833 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24834+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24835+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24836+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24837+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24838+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24839+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24840+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24841 1: wrmsr /* Make changes effective */
24842
24843 /* Setup cr0 */
24844@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
24845 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24846 * address given in m16:64.
24847 */
24848+ pax_set_fptr_mask
24849 movq initial_code(%rip),%rax
24850 pushq $0 # fake return address to stop unwinder
24851 pushq $__KERNEL_CS # set correct cs
24852@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
24853 .quad INIT_PER_CPU_VAR(irq_stack_union)
24854
24855 GLOBAL(stack_start)
24856- .quad init_thread_union+THREAD_SIZE-8
24857+ .quad init_thread_union+THREAD_SIZE-16
24858 .word 0
24859 __FINITDATA
24860
24861@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
24862 call dump_stack
24863 #ifdef CONFIG_KALLSYMS
24864 leaq early_idt_ripmsg(%rip),%rdi
24865- movq 40(%rsp),%rsi # %rip again
24866+ movq 88(%rsp),%rsi # %rip again
24867 call __print_symbol
24868 #endif
24869 #endif /* EARLY_PRINTK */
24870@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
24871 early_recursion_flag:
24872 .long 0
24873
24874+ .section .rodata,"a",@progbits
24875 #ifdef CONFIG_EARLY_PRINTK
24876 early_idt_msg:
24877 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24878@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
24879 NEXT_PAGE(early_dynamic_pgts)
24880 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24881
24882- .data
24883+ .section .rodata,"a",@progbits
24884
24885-#ifndef CONFIG_XEN
24886 NEXT_PAGE(init_level4_pgt)
24887- .fill 512,8,0
24888-#else
24889-NEXT_PAGE(init_level4_pgt)
24890- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24891 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24892 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24893+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24894+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24895+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24896+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24897+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24898+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24899 .org init_level4_pgt + L4_START_KERNEL*8, 0
24900 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24901 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24902
24903+#ifdef CONFIG_PAX_PER_CPU_PGD
24904+NEXT_PAGE(cpu_pgd)
24905+ .rept 2*NR_CPUS
24906+ .fill 512,8,0
24907+ .endr
24908+#endif
24909+
24910 NEXT_PAGE(level3_ident_pgt)
24911 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24912+#ifdef CONFIG_XEN
24913 .fill 511, 8, 0
24914+#else
24915+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24916+ .fill 510,8,0
24917+#endif
24918+
24919+NEXT_PAGE(level3_vmalloc_start_pgt)
24920+ .fill 512,8,0
24921+
24922+NEXT_PAGE(level3_vmalloc_end_pgt)
24923+ .fill 512,8,0
24924+
24925+NEXT_PAGE(level3_vmemmap_pgt)
24926+ .fill L3_VMEMMAP_START,8,0
24927+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24928+
24929 NEXT_PAGE(level2_ident_pgt)
24930- /* Since I easily can, map the first 1G.
24931+ /* Since I easily can, map the first 2G.
24932 * Don't set NX because code runs from these pages.
24933 */
24934- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24935-#endif
24936+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24937
24938 NEXT_PAGE(level3_kernel_pgt)
24939 .fill L3_START_KERNEL,8,0
24940@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
24941 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24942 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24943
24944+NEXT_PAGE(level2_vmemmap_pgt)
24945+ .fill 512,8,0
24946+
24947 NEXT_PAGE(level2_kernel_pgt)
24948 /*
24949 * 512 MB kernel mapping. We spend a full page on this pagetable
24950@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
24951 NEXT_PAGE(level2_fixmap_pgt)
24952 .fill 506,8,0
24953 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24954- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24955- .fill 5,8,0
24956+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24957+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24958+ .fill 4,8,0
24959
24960 NEXT_PAGE(level1_fixmap_pgt)
24961 .fill 512,8,0
24962
24963+NEXT_PAGE(level1_vsyscall_pgt)
24964+ .fill 512,8,0
24965+
24966 #undef PMDS
24967
24968- .data
24969+ .align PAGE_SIZE
24970+ENTRY(cpu_gdt_table)
24971+ .rept NR_CPUS
24972+ .quad 0x0000000000000000 /* NULL descriptor */
24973+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24974+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24975+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24976+ .quad 0x00cffb000000ffff /* __USER32_CS */
24977+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24978+ .quad 0x00affb000000ffff /* __USER_CS */
24979+
24980+#ifdef CONFIG_PAX_KERNEXEC
24981+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24982+#else
24983+ .quad 0x0 /* unused */
24984+#endif
24985+
24986+ .quad 0,0 /* TSS */
24987+ .quad 0,0 /* LDT */
24988+ .quad 0,0,0 /* three TLS descriptors */
24989+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
24990+ /* asm/segment.h:GDT_ENTRIES must match this */
24991+
24992+#ifdef CONFIG_PAX_MEMORY_UDEREF
24993+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24994+#else
24995+ .quad 0x0 /* unused */
24996+#endif
24997+
24998+ /* zero the remaining page */
24999+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25000+ .endr
25001+
25002 .align 16
25003 .globl early_gdt_descr
25004 early_gdt_descr:
25005 .word GDT_ENTRIES*8-1
25006 early_gdt_descr_base:
25007- .quad INIT_PER_CPU_VAR(gdt_page)
25008+ .quad cpu_gdt_table
25009
25010 ENTRY(phys_base)
25011 /* This must match the first entry in level2_kernel_pgt */
25012 .quad 0x0000000000000000
25013
25014 #include "../../x86/xen/xen-head.S"
25015-
25016- __PAGE_ALIGNED_BSS
25017+
25018+ .section .rodata,"a",@progbits
25019 NEXT_PAGE(empty_zero_page)
25020 .skip PAGE_SIZE
25021diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25022index 05fd74f..c3548b1 100644
25023--- a/arch/x86/kernel/i386_ksyms_32.c
25024+++ b/arch/x86/kernel/i386_ksyms_32.c
25025@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25026 EXPORT_SYMBOL(cmpxchg8b_emu);
25027 #endif
25028
25029+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25030+
25031 /* Networking helper routines. */
25032 EXPORT_SYMBOL(csum_partial_copy_generic);
25033+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25034+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25035
25036 EXPORT_SYMBOL(__get_user_1);
25037 EXPORT_SYMBOL(__get_user_2);
25038@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25039 EXPORT_SYMBOL(___preempt_schedule_context);
25040 #endif
25041 #endif
25042+
25043+#ifdef CONFIG_PAX_KERNEXEC
25044+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25045+#endif
25046+
25047+#ifdef CONFIG_PAX_PER_CPU_PGD
25048+EXPORT_SYMBOL(cpu_pgd);
25049+#endif
25050diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25051index a9a4229..6f4d476 100644
25052--- a/arch/x86/kernel/i387.c
25053+++ b/arch/x86/kernel/i387.c
25054@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25055 static inline bool interrupted_user_mode(void)
25056 {
25057 struct pt_regs *regs = get_irq_regs();
25058- return regs && user_mode_vm(regs);
25059+ return regs && user_mode(regs);
25060 }
25061
25062 /*
25063diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25064index e7cc537..67d7372 100644
25065--- a/arch/x86/kernel/i8259.c
25066+++ b/arch/x86/kernel/i8259.c
25067@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25068 static void make_8259A_irq(unsigned int irq)
25069 {
25070 disable_irq_nosync(irq);
25071- io_apic_irqs &= ~(1<<irq);
25072+ io_apic_irqs &= ~(1UL<<irq);
25073 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25074 enable_irq(irq);
25075 }
25076@@ -208,7 +208,7 @@ spurious_8259A_irq:
25077 "spurious 8259A interrupt: IRQ%d.\n", irq);
25078 spurious_irq_mask |= irqmask;
25079 }
25080- atomic_inc(&irq_err_count);
25081+ atomic_inc_unchecked(&irq_err_count);
25082 /*
25083 * Theoretically we do not have to handle this IRQ,
25084 * but in Linux this does not cause problems and is
25085@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25086 /* (slave's support for AEOI in flat mode is to be investigated) */
25087 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25088
25089+ pax_open_kernel();
25090 if (auto_eoi)
25091 /*
25092 * In AEOI mode we just have to mask the interrupt
25093 * when acking.
25094 */
25095- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25096+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25097 else
25098- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25099+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25100+ pax_close_kernel();
25101
25102 udelay(100); /* wait for 8259A to initialize */
25103
25104diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25105index a979b5b..1d6db75 100644
25106--- a/arch/x86/kernel/io_delay.c
25107+++ b/arch/x86/kernel/io_delay.c
25108@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25109 * Quirk table for systems that misbehave (lock up, etc.) if port
25110 * 0x80 is used:
25111 */
25112-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25113+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25114 {
25115 .callback = dmi_io_delay_0xed_port,
25116 .ident = "Compaq Presario V6000",
25117diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25118index 4ddaf66..49d5c18 100644
25119--- a/arch/x86/kernel/ioport.c
25120+++ b/arch/x86/kernel/ioport.c
25121@@ -6,6 +6,7 @@
25122 #include <linux/sched.h>
25123 #include <linux/kernel.h>
25124 #include <linux/capability.h>
25125+#include <linux/security.h>
25126 #include <linux/errno.h>
25127 #include <linux/types.h>
25128 #include <linux/ioport.h>
25129@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25130 return -EINVAL;
25131 if (turn_on && !capable(CAP_SYS_RAWIO))
25132 return -EPERM;
25133+#ifdef CONFIG_GRKERNSEC_IO
25134+ if (turn_on && grsec_disable_privio) {
25135+ gr_handle_ioperm();
25136+ return -ENODEV;
25137+ }
25138+#endif
25139
25140 /*
25141 * If it's the first ioperm() call in this thread's lifetime, set the
25142@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25143 * because the ->io_bitmap_max value must match the bitmap
25144 * contents:
25145 */
25146- tss = &per_cpu(init_tss, get_cpu());
25147+ tss = init_tss + get_cpu();
25148
25149 if (turn_on)
25150 bitmap_clear(t->io_bitmap_ptr, from, num);
25151@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25152 if (level > old) {
25153 if (!capable(CAP_SYS_RAWIO))
25154 return -EPERM;
25155+#ifdef CONFIG_GRKERNSEC_IO
25156+ if (grsec_disable_privio) {
25157+ gr_handle_iopl();
25158+ return -ENODEV;
25159+ }
25160+#endif
25161 }
25162 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25163 t->iopl = level << 12;
25164diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25165index 705ef8d..8672c9d 100644
25166--- a/arch/x86/kernel/irq.c
25167+++ b/arch/x86/kernel/irq.c
25168@@ -22,7 +22,7 @@
25169 #define CREATE_TRACE_POINTS
25170 #include <asm/trace/irq_vectors.h>
25171
25172-atomic_t irq_err_count;
25173+atomic_unchecked_t irq_err_count;
25174
25175 /* Function pointer for generic interrupt vector handling */
25176 void (*x86_platform_ipi_callback)(void) = NULL;
25177@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25178 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25179 seq_puts(p, " Hypervisor callback interrupts\n");
25180 #endif
25181- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25182+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25183 #if defined(CONFIG_X86_IO_APIC)
25184- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25185+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25186 #endif
25187 return 0;
25188 }
25189@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25190
25191 u64 arch_irq_stat(void)
25192 {
25193- u64 sum = atomic_read(&irq_err_count);
25194+ u64 sum = atomic_read_unchecked(&irq_err_count);
25195 return sum;
25196 }
25197
25198diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25199index 63ce838..2ea3e06 100644
25200--- a/arch/x86/kernel/irq_32.c
25201+++ b/arch/x86/kernel/irq_32.c
25202@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25203
25204 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25205
25206+extern void gr_handle_kernel_exploit(void);
25207+
25208 int sysctl_panic_on_stackoverflow __read_mostly;
25209
25210 /* Debugging check for stack overflow: is there less than 1KB free? */
25211@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25212 __asm__ __volatile__("andl %%esp,%0" :
25213 "=r" (sp) : "0" (THREAD_SIZE - 1));
25214
25215- return sp < (sizeof(struct thread_info) + STACK_WARN);
25216+ return sp < STACK_WARN;
25217 }
25218
25219 static void print_stack_overflow(void)
25220 {
25221 printk(KERN_WARNING "low stack detected by irq handler\n");
25222 dump_stack();
25223+ gr_handle_kernel_exploit();
25224 if (sysctl_panic_on_stackoverflow)
25225 panic("low stack detected by irq handler - check messages\n");
25226 }
25227@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25228 static inline int
25229 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25230 {
25231- struct irq_stack *curstk, *irqstk;
25232+ struct irq_stack *irqstk;
25233 u32 *isp, *prev_esp, arg1, arg2;
25234
25235- curstk = (struct irq_stack *) current_stack();
25236 irqstk = __this_cpu_read(hardirq_stack);
25237
25238 /*
25239@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25240 * handler) we can't do that and just have to keep using the
25241 * current stack (which is the irq stack already after all)
25242 */
25243- if (unlikely(curstk == irqstk))
25244+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25245 return 0;
25246
25247- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25248+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25249
25250 /* Save the next esp at the bottom of the stack */
25251 prev_esp = (u32 *)irqstk;
25252 *prev_esp = current_stack_pointer;
25253
25254+#ifdef CONFIG_PAX_MEMORY_UDEREF
25255+ __set_fs(MAKE_MM_SEG(0));
25256+#endif
25257+
25258 if (unlikely(overflow))
25259 call_on_stack(print_stack_overflow, isp);
25260
25261@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25262 : "0" (irq), "1" (desc), "2" (isp),
25263 "D" (desc->handle_irq)
25264 : "memory", "cc", "ecx");
25265+
25266+#ifdef CONFIG_PAX_MEMORY_UDEREF
25267+ __set_fs(current_thread_info()->addr_limit);
25268+#endif
25269+
25270 return 1;
25271 }
25272
25273@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25274 */
25275 void irq_ctx_init(int cpu)
25276 {
25277- struct irq_stack *irqstk;
25278-
25279 if (per_cpu(hardirq_stack, cpu))
25280 return;
25281
25282- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25283- THREADINFO_GFP,
25284- THREAD_SIZE_ORDER));
25285- per_cpu(hardirq_stack, cpu) = irqstk;
25286-
25287- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25288- THREADINFO_GFP,
25289- THREAD_SIZE_ORDER));
25290- per_cpu(softirq_stack, cpu) = irqstk;
25291-
25292- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25293- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25294+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25295+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25296 }
25297
25298 void do_softirq_own_stack(void)
25299 {
25300- struct thread_info *curstk;
25301 struct irq_stack *irqstk;
25302 u32 *isp, *prev_esp;
25303
25304- curstk = current_stack();
25305 irqstk = __this_cpu_read(softirq_stack);
25306
25307 /* build the stack frame on the softirq stack */
25308@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25309 prev_esp = (u32 *)irqstk;
25310 *prev_esp = current_stack_pointer;
25311
25312+#ifdef CONFIG_PAX_MEMORY_UDEREF
25313+ __set_fs(MAKE_MM_SEG(0));
25314+#endif
25315+
25316 call_on_stack(__do_softirq, isp);
25317+
25318+#ifdef CONFIG_PAX_MEMORY_UDEREF
25319+ __set_fs(current_thread_info()->addr_limit);
25320+#endif
25321+
25322 }
25323
25324 bool handle_irq(unsigned irq, struct pt_regs *regs)
25325@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25326 if (unlikely(!desc))
25327 return false;
25328
25329- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25330+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25331 if (unlikely(overflow))
25332 print_stack_overflow();
25333 desc->handle_irq(irq, desc);
25334diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25335index e4b503d..824fce8 100644
25336--- a/arch/x86/kernel/irq_64.c
25337+++ b/arch/x86/kernel/irq_64.c
25338@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25339 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25340 EXPORT_PER_CPU_SYMBOL(irq_regs);
25341
25342+extern void gr_handle_kernel_exploit(void);
25343+
25344 int sysctl_panic_on_stackoverflow;
25345
25346 /*
25347@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25348 u64 estack_top, estack_bottom;
25349 u64 curbase = (u64)task_stack_page(current);
25350
25351- if (user_mode_vm(regs))
25352+ if (user_mode(regs))
25353 return;
25354
25355 if (regs->sp >= curbase + sizeof(struct thread_info) +
25356@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25357 irq_stack_top, irq_stack_bottom,
25358 estack_top, estack_bottom);
25359
25360+ gr_handle_kernel_exploit();
25361+
25362 if (sysctl_panic_on_stackoverflow)
25363 panic("low stack detected by irq handler - check messages\n");
25364 #endif
25365diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25366index 26d5a55..a01160a 100644
25367--- a/arch/x86/kernel/jump_label.c
25368+++ b/arch/x86/kernel/jump_label.c
25369@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25370 * Jump label is enabled for the first time.
25371 * So we expect a default_nop...
25372 */
25373- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25374+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25375 != 0))
25376 bug_at((void *)entry->code, __LINE__);
25377 } else {
25378@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25379 * ...otherwise expect an ideal_nop. Otherwise
25380 * something went horribly wrong.
25381 */
25382- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25383+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25384 != 0))
25385 bug_at((void *)entry->code, __LINE__);
25386 }
25387@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25388 * are converting the default nop to the ideal nop.
25389 */
25390 if (init) {
25391- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25392+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25393 bug_at((void *)entry->code, __LINE__);
25394 } else {
25395 code.jump = 0xe9;
25396 code.offset = entry->target -
25397 (entry->code + JUMP_LABEL_NOP_SIZE);
25398- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25399+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25400 bug_at((void *)entry->code, __LINE__);
25401 }
25402 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25403diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25404index 7ec1d5f..5a7d130 100644
25405--- a/arch/x86/kernel/kgdb.c
25406+++ b/arch/x86/kernel/kgdb.c
25407@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25408 #ifdef CONFIG_X86_32
25409 switch (regno) {
25410 case GDB_SS:
25411- if (!user_mode_vm(regs))
25412+ if (!user_mode(regs))
25413 *(unsigned long *)mem = __KERNEL_DS;
25414 break;
25415 case GDB_SP:
25416- if (!user_mode_vm(regs))
25417+ if (!user_mode(regs))
25418 *(unsigned long *)mem = kernel_stack_pointer(regs);
25419 break;
25420 case GDB_GS:
25421@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25422 bp->attr.bp_addr = breakinfo[breakno].addr;
25423 bp->attr.bp_len = breakinfo[breakno].len;
25424 bp->attr.bp_type = breakinfo[breakno].type;
25425- info->address = breakinfo[breakno].addr;
25426+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25427+ info->address = ktla_ktva(breakinfo[breakno].addr);
25428+ else
25429+ info->address = breakinfo[breakno].addr;
25430 info->len = breakinfo[breakno].len;
25431 info->type = breakinfo[breakno].type;
25432 val = arch_install_hw_breakpoint(bp);
25433@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25434 case 'k':
25435 /* clear the trace bit */
25436 linux_regs->flags &= ~X86_EFLAGS_TF;
25437- atomic_set(&kgdb_cpu_doing_single_step, -1);
25438+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25439
25440 /* set the trace bit if we're stepping */
25441 if (remcomInBuffer[0] == 's') {
25442 linux_regs->flags |= X86_EFLAGS_TF;
25443- atomic_set(&kgdb_cpu_doing_single_step,
25444+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25445 raw_smp_processor_id());
25446 }
25447
25448@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25449
25450 switch (cmd) {
25451 case DIE_DEBUG:
25452- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25453+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25454 if (user_mode(regs))
25455 return single_step_cont(regs, args);
25456 break;
25457@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25458 #endif /* CONFIG_DEBUG_RODATA */
25459
25460 bpt->type = BP_BREAKPOINT;
25461- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25462+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25463 BREAK_INSTR_SIZE);
25464 if (err)
25465 return err;
25466- err = probe_kernel_write((char *)bpt->bpt_addr,
25467+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25468 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25469 #ifdef CONFIG_DEBUG_RODATA
25470 if (!err)
25471@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25472 return -EBUSY;
25473 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25474 BREAK_INSTR_SIZE);
25475- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25476+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25477 if (err)
25478 return err;
25479 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25480@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25481 if (mutex_is_locked(&text_mutex))
25482 goto knl_write;
25483 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25484- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25485+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25486 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25487 goto knl_write;
25488 return err;
25489 knl_write:
25490 #endif /* CONFIG_DEBUG_RODATA */
25491- return probe_kernel_write((char *)bpt->bpt_addr,
25492+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25493 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25494 }
25495
25496diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25497index 98f654d..ac04352 100644
25498--- a/arch/x86/kernel/kprobes/core.c
25499+++ b/arch/x86/kernel/kprobes/core.c
25500@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25501 s32 raddr;
25502 } __packed *insn;
25503
25504- insn = (struct __arch_relative_insn *)from;
25505+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25506+
25507+ pax_open_kernel();
25508 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25509 insn->op = op;
25510+ pax_close_kernel();
25511 }
25512
25513 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25514@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25515 kprobe_opcode_t opcode;
25516 kprobe_opcode_t *orig_opcodes = opcodes;
25517
25518- if (search_exception_tables((unsigned long)opcodes))
25519+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25520 return 0; /* Page fault may occur on this address. */
25521
25522 retry:
25523@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25524 * for the first byte, we can recover the original instruction
25525 * from it and kp->opcode.
25526 */
25527- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25528+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25529 buf[0] = kp->opcode;
25530- return (unsigned long)buf;
25531+ return ktva_ktla((unsigned long)buf);
25532 }
25533
25534 /*
25535@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25536 /* Another subsystem puts a breakpoint, failed to recover */
25537 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25538 return 0;
25539+ pax_open_kernel();
25540 memcpy(dest, insn.kaddr, insn.length);
25541+ pax_close_kernel();
25542
25543 #ifdef CONFIG_X86_64
25544 if (insn_rip_relative(&insn)) {
25545@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25546 return 0;
25547 }
25548 disp = (u8 *) dest + insn_offset_displacement(&insn);
25549+ pax_open_kernel();
25550 *(s32 *) disp = (s32) newdisp;
25551+ pax_close_kernel();
25552 }
25553 #endif
25554 return insn.length;
25555@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25556 * nor set current_kprobe, because it doesn't use single
25557 * stepping.
25558 */
25559- regs->ip = (unsigned long)p->ainsn.insn;
25560+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25561 preempt_enable_no_resched();
25562 return;
25563 }
25564@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25565 regs->flags &= ~X86_EFLAGS_IF;
25566 /* single step inline if the instruction is an int3 */
25567 if (p->opcode == BREAKPOINT_INSTRUCTION)
25568- regs->ip = (unsigned long)p->addr;
25569+ regs->ip = ktla_ktva((unsigned long)p->addr);
25570 else
25571- regs->ip = (unsigned long)p->ainsn.insn;
25572+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25573 }
25574 NOKPROBE_SYMBOL(setup_singlestep);
25575
25576@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25577 struct kprobe *p;
25578 struct kprobe_ctlblk *kcb;
25579
25580- if (user_mode_vm(regs))
25581+ if (user_mode(regs))
25582 return 0;
25583
25584 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25585@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25586 setup_singlestep(p, regs, kcb, 0);
25587 return 1;
25588 }
25589- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25590+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25591 /*
25592 * The breakpoint instruction was removed right
25593 * after we hit it. Another cpu has removed
25594@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
25595 " movq %rax, 152(%rsp)\n"
25596 RESTORE_REGS_STRING
25597 " popfq\n"
25598+#ifdef KERNEXEC_PLUGIN
25599+ " btsq $63,(%rsp)\n"
25600+#endif
25601 #else
25602 " pushf\n"
25603 SAVE_REGS_STRING
25604@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25605 struct kprobe_ctlblk *kcb)
25606 {
25607 unsigned long *tos = stack_addr(regs);
25608- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25609+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25610 unsigned long orig_ip = (unsigned long)p->addr;
25611 kprobe_opcode_t *insn = p->ainsn.insn;
25612
25613@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25614 struct die_args *args = data;
25615 int ret = NOTIFY_DONE;
25616
25617- if (args->regs && user_mode_vm(args->regs))
25618+ if (args->regs && user_mode(args->regs))
25619 return ret;
25620
25621 if (val == DIE_GPF) {
25622diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25623index 7c523bb..01b051b 100644
25624--- a/arch/x86/kernel/kprobes/opt.c
25625+++ b/arch/x86/kernel/kprobes/opt.c
25626@@ -79,6 +79,7 @@ found:
25627 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25628 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25629 {
25630+ pax_open_kernel();
25631 #ifdef CONFIG_X86_64
25632 *addr++ = 0x48;
25633 *addr++ = 0xbf;
25634@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25635 *addr++ = 0xb8;
25636 #endif
25637 *(unsigned long *)addr = val;
25638+ pax_close_kernel();
25639 }
25640
25641 asm (
25642@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25643 * Verify if the address gap is in 2GB range, because this uses
25644 * a relative jump.
25645 */
25646- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25647+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25648 if (abs(rel) > 0x7fffffff) {
25649 __arch_remove_optimized_kprobe(op, 0);
25650 return -ERANGE;
25651@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25652 op->optinsn.size = ret;
25653
25654 /* Copy arch-dep-instance from template */
25655- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25656+ pax_open_kernel();
25657+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25658+ pax_close_kernel();
25659
25660 /* Set probe information */
25661 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25662
25663 /* Set probe function call */
25664- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25665+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25666
25667 /* Set returning jmp instruction at the tail of out-of-line buffer */
25668- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25669+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25670 (u8 *)op->kp.addr + op->optinsn.size);
25671
25672 flush_icache_range((unsigned long) buf,
25673@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25674 WARN_ON(kprobe_disabled(&op->kp));
25675
25676 /* Backup instructions which will be replaced by jump address */
25677- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25678+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25679 RELATIVE_ADDR_SIZE);
25680
25681 insn_buf[0] = RELATIVEJUMP_OPCODE;
25682@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25683 /* This kprobe is really able to run optimized path. */
25684 op = container_of(p, struct optimized_kprobe, kp);
25685 /* Detour through copied instructions */
25686- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25687+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25688 if (!reenter)
25689 reset_current_kprobe();
25690 preempt_enable_no_resched();
25691diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25692index c2bedae..25e7ab60 100644
25693--- a/arch/x86/kernel/ksysfs.c
25694+++ b/arch/x86/kernel/ksysfs.c
25695@@ -184,7 +184,7 @@ out:
25696
25697 static struct kobj_attribute type_attr = __ATTR_RO(type);
25698
25699-static struct bin_attribute data_attr = {
25700+static bin_attribute_no_const data_attr __read_only = {
25701 .attr = {
25702 .name = "data",
25703 .mode = S_IRUGO,
25704diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25705index c37886d..d851d32 100644
25706--- a/arch/x86/kernel/ldt.c
25707+++ b/arch/x86/kernel/ldt.c
25708@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25709 if (reload) {
25710 #ifdef CONFIG_SMP
25711 preempt_disable();
25712- load_LDT(pc);
25713+ load_LDT_nolock(pc);
25714 if (!cpumask_equal(mm_cpumask(current->mm),
25715 cpumask_of(smp_processor_id())))
25716 smp_call_function(flush_ldt, current->mm, 1);
25717 preempt_enable();
25718 #else
25719- load_LDT(pc);
25720+ load_LDT_nolock(pc);
25721 #endif
25722 }
25723 if (oldsize) {
25724@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25725 return err;
25726
25727 for (i = 0; i < old->size; i++)
25728- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25729+ write_ldt_entry(new->ldt, i, old->ldt + i);
25730 return 0;
25731 }
25732
25733@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25734 retval = copy_ldt(&mm->context, &old_mm->context);
25735 mutex_unlock(&old_mm->context.lock);
25736 }
25737+
25738+ if (tsk == current) {
25739+ mm->context.vdso = 0;
25740+
25741+#ifdef CONFIG_X86_32
25742+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25743+ mm->context.user_cs_base = 0UL;
25744+ mm->context.user_cs_limit = ~0UL;
25745+
25746+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25747+ cpus_clear(mm->context.cpu_user_cs_mask);
25748+#endif
25749+
25750+#endif
25751+#endif
25752+
25753+ }
25754+
25755 return retval;
25756 }
25757
25758@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25759 }
25760 }
25761
25762+#ifdef CONFIG_PAX_SEGMEXEC
25763+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25764+ error = -EINVAL;
25765+ goto out_unlock;
25766+ }
25767+#endif
25768+
25769 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25770 error = -EINVAL;
25771 goto out_unlock;
25772diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25773index 469b23d..5449cfe 100644
25774--- a/arch/x86/kernel/machine_kexec_32.c
25775+++ b/arch/x86/kernel/machine_kexec_32.c
25776@@ -26,7 +26,7 @@
25777 #include <asm/cacheflush.h>
25778 #include <asm/debugreg.h>
25779
25780-static void set_idt(void *newidt, __u16 limit)
25781+static void set_idt(struct desc_struct *newidt, __u16 limit)
25782 {
25783 struct desc_ptr curidt;
25784
25785@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25786 }
25787
25788
25789-static void set_gdt(void *newgdt, __u16 limit)
25790+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25791 {
25792 struct desc_ptr curgdt;
25793
25794@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25795 }
25796
25797 control_page = page_address(image->control_code_page);
25798- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25799+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25800
25801 relocate_kernel_ptr = control_page;
25802 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25803diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25804index 94ea120..4154cea 100644
25805--- a/arch/x86/kernel/mcount_64.S
25806+++ b/arch/x86/kernel/mcount_64.S
25807@@ -7,7 +7,7 @@
25808 #include <linux/linkage.h>
25809 #include <asm/ptrace.h>
25810 #include <asm/ftrace.h>
25811-
25812+#include <asm/alternative-asm.h>
25813
25814 .code64
25815 .section .entry.text, "ax"
25816@@ -148,8 +148,9 @@
25817 #ifdef CONFIG_DYNAMIC_FTRACE
25818
25819 ENTRY(function_hook)
25820+ pax_force_retaddr
25821 retq
25822-END(function_hook)
25823+ENDPROC(function_hook)
25824
25825 ENTRY(ftrace_caller)
25826 /* save_mcount_regs fills in first two parameters */
25827@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25828 #endif
25829
25830 GLOBAL(ftrace_stub)
25831+ pax_force_retaddr
25832 retq
25833-END(ftrace_caller)
25834+ENDPROC(ftrace_caller)
25835
25836 ENTRY(ftrace_regs_caller)
25837 /* Save the current flags before any operations that can change them */
25838@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25839
25840 jmp ftrace_return
25841
25842-END(ftrace_regs_caller)
25843+ENDPROC(ftrace_regs_caller)
25844
25845
25846 #else /* ! CONFIG_DYNAMIC_FTRACE */
25847@@ -272,18 +274,20 @@ fgraph_trace:
25848 #endif
25849
25850 GLOBAL(ftrace_stub)
25851+ pax_force_retaddr
25852 retq
25853
25854 trace:
25855 /* save_mcount_regs fills in first two parameters */
25856 save_mcount_regs
25857
25858+ pax_force_fptr ftrace_trace_function
25859 call *ftrace_trace_function
25860
25861 restore_mcount_regs
25862
25863 jmp fgraph_trace
25864-END(function_hook)
25865+ENDPROC(function_hook)
25866 #endif /* CONFIG_DYNAMIC_FTRACE */
25867 #endif /* CONFIG_FUNCTION_TRACER */
25868
25869@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25870
25871 restore_mcount_regs
25872
25873+ pax_force_retaddr
25874 retq
25875-END(ftrace_graph_caller)
25876+ENDPROC(ftrace_graph_caller)
25877
25878 GLOBAL(return_to_handler)
25879 subq $24, %rsp
25880@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25881 movq 8(%rsp), %rdx
25882 movq (%rsp), %rax
25883 addq $24, %rsp
25884+ pax_force_fptr %rdi
25885 jmp *%rdi
25886+ENDPROC(return_to_handler)
25887 #endif
25888diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25889index e69f988..72902b7 100644
25890--- a/arch/x86/kernel/module.c
25891+++ b/arch/x86/kernel/module.c
25892@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
25893 }
25894 #endif
25895
25896-void *module_alloc(unsigned long size)
25897+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25898 {
25899- if (PAGE_ALIGN(size) > MODULES_LEN)
25900+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25901 return NULL;
25902 return __vmalloc_node_range(size, 1,
25903 MODULES_VADDR + get_module_load_offset(),
25904- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25905- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
25906+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25907+ prot, NUMA_NO_NODE,
25908 __builtin_return_address(0));
25909 }
25910
25911+void *module_alloc(unsigned long size)
25912+{
25913+
25914+#ifdef CONFIG_PAX_KERNEXEC
25915+ return __module_alloc(size, PAGE_KERNEL);
25916+#else
25917+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25918+#endif
25919+
25920+}
25921+
25922+#ifdef CONFIG_PAX_KERNEXEC
25923+#ifdef CONFIG_X86_32
25924+void *module_alloc_exec(unsigned long size)
25925+{
25926+ struct vm_struct *area;
25927+
25928+ if (size == 0)
25929+ return NULL;
25930+
25931+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25932+return area ? area->addr : NULL;
25933+}
25934+EXPORT_SYMBOL(module_alloc_exec);
25935+
25936+void module_memfree_exec(void *module_region)
25937+{
25938+ vunmap(module_region);
25939+}
25940+EXPORT_SYMBOL(module_memfree_exec);
25941+#else
25942+void module_memfree_exec(void *module_region)
25943+{
25944+ module_memfree(module_region);
25945+}
25946+EXPORT_SYMBOL(module_memfree_exec);
25947+
25948+void *module_alloc_exec(unsigned long size)
25949+{
25950+ return __module_alloc(size, PAGE_KERNEL_RX);
25951+}
25952+EXPORT_SYMBOL(module_alloc_exec);
25953+#endif
25954+#endif
25955+
25956 #ifdef CONFIG_X86_32
25957 int apply_relocate(Elf32_Shdr *sechdrs,
25958 const char *strtab,
25959@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25960 unsigned int i;
25961 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
25962 Elf32_Sym *sym;
25963- uint32_t *location;
25964+ uint32_t *plocation, location;
25965
25966 DEBUGP("Applying relocate section %u to %u\n",
25967 relsec, sechdrs[relsec].sh_info);
25968 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
25969 /* This is where to make the change */
25970- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
25971- + rel[i].r_offset;
25972+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
25973+ location = (uint32_t)plocation;
25974+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
25975+ plocation = ktla_ktva((void *)plocation);
25976 /* This is the symbol it is referring to. Note that all
25977 undefined symbols have been resolved. */
25978 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
25979@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25980 switch (ELF32_R_TYPE(rel[i].r_info)) {
25981 case R_386_32:
25982 /* We add the value into the location given */
25983- *location += sym->st_value;
25984+ pax_open_kernel();
25985+ *plocation += sym->st_value;
25986+ pax_close_kernel();
25987 break;
25988 case R_386_PC32:
25989 /* Add the value, subtract its position */
25990- *location += sym->st_value - (uint32_t)location;
25991+ pax_open_kernel();
25992+ *plocation += sym->st_value - location;
25993+ pax_close_kernel();
25994 break;
25995 default:
25996 pr_err("%s: Unknown relocation: %u\n",
25997@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
25998 case R_X86_64_NONE:
25999 break;
26000 case R_X86_64_64:
26001+ pax_open_kernel();
26002 *(u64 *)loc = val;
26003+ pax_close_kernel();
26004 break;
26005 case R_X86_64_32:
26006+ pax_open_kernel();
26007 *(u32 *)loc = val;
26008+ pax_close_kernel();
26009 if (val != *(u32 *)loc)
26010 goto overflow;
26011 break;
26012 case R_X86_64_32S:
26013+ pax_open_kernel();
26014 *(s32 *)loc = val;
26015+ pax_close_kernel();
26016 if ((s64)val != *(s32 *)loc)
26017 goto overflow;
26018 break;
26019 case R_X86_64_PC32:
26020 val -= (u64)loc;
26021+ pax_open_kernel();
26022 *(u32 *)loc = val;
26023+ pax_close_kernel();
26024+
26025 #if 0
26026 if ((s64)val != *(s32 *)loc)
26027 goto overflow;
26028diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26029index 113e707..0a690e1 100644
26030--- a/arch/x86/kernel/msr.c
26031+++ b/arch/x86/kernel/msr.c
26032@@ -39,6 +39,7 @@
26033 #include <linux/notifier.h>
26034 #include <linux/uaccess.h>
26035 #include <linux/gfp.h>
26036+#include <linux/grsecurity.h>
26037
26038 #include <asm/processor.h>
26039 #include <asm/msr.h>
26040@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26041 int err = 0;
26042 ssize_t bytes = 0;
26043
26044+#ifdef CONFIG_GRKERNSEC_KMEM
26045+ gr_handle_msr_write();
26046+ return -EPERM;
26047+#endif
26048+
26049 if (count % 8)
26050 return -EINVAL; /* Invalid chunk size */
26051
26052@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26053 err = -EBADF;
26054 break;
26055 }
26056+#ifdef CONFIG_GRKERNSEC_KMEM
26057+ gr_handle_msr_write();
26058+ return -EPERM;
26059+#endif
26060 if (copy_from_user(&regs, uregs, sizeof regs)) {
26061 err = -EFAULT;
26062 break;
26063@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26064 return notifier_from_errno(err);
26065 }
26066
26067-static struct notifier_block __refdata msr_class_cpu_notifier = {
26068+static struct notifier_block msr_class_cpu_notifier = {
26069 .notifier_call = msr_class_cpu_callback,
26070 };
26071
26072diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26073index c3e985d..110a36a 100644
26074--- a/arch/x86/kernel/nmi.c
26075+++ b/arch/x86/kernel/nmi.c
26076@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26077
26078 static void nmi_max_handler(struct irq_work *w)
26079 {
26080- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26081+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26082 int remainder_ns, decimal_msecs;
26083- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26084+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26085
26086 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26087 decimal_msecs = remainder_ns / 1000;
26088
26089 printk_ratelimited(KERN_INFO
26090 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26091- a->handler, whole_msecs, decimal_msecs);
26092+ n->action->handler, whole_msecs, decimal_msecs);
26093 }
26094
26095 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26096@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26097 delta = sched_clock() - delta;
26098 trace_nmi_handler(a->handler, (int)delta, thishandled);
26099
26100- if (delta < nmi_longest_ns || delta < a->max_duration)
26101+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26102 continue;
26103
26104- a->max_duration = delta;
26105- irq_work_queue(&a->irq_work);
26106+ a->work->max_duration = delta;
26107+ irq_work_queue(&a->work->irq_work);
26108 }
26109
26110 rcu_read_unlock();
26111@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26112 }
26113 NOKPROBE_SYMBOL(nmi_handle);
26114
26115-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26116+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26117 {
26118 struct nmi_desc *desc = nmi_to_desc(type);
26119 unsigned long flags;
26120@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26121 if (!action->handler)
26122 return -EINVAL;
26123
26124- init_irq_work(&action->irq_work, nmi_max_handler);
26125+ action->work->action = action;
26126+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26127
26128 spin_lock_irqsave(&desc->lock, flags);
26129
26130@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26131 * event confuses some handlers (kdump uses this flag)
26132 */
26133 if (action->flags & NMI_FLAG_FIRST)
26134- list_add_rcu(&action->list, &desc->head);
26135+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26136 else
26137- list_add_tail_rcu(&action->list, &desc->head);
26138+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26139
26140 spin_unlock_irqrestore(&desc->lock, flags);
26141 return 0;
26142@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26143 if (!strcmp(n->name, name)) {
26144 WARN(in_nmi(),
26145 "Trying to free NMI (%s) from NMI context!\n", n->name);
26146- list_del_rcu(&n->list);
26147+ pax_list_del_rcu((struct list_head *)&n->list);
26148 break;
26149 }
26150 }
26151@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26152 dotraplinkage notrace void
26153 do_nmi(struct pt_regs *regs, long error_code)
26154 {
26155+
26156+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26157+ if (!user_mode(regs)) {
26158+ unsigned long cs = regs->cs & 0xFFFF;
26159+ unsigned long ip = ktva_ktla(regs->ip);
26160+
26161+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26162+ regs->ip = ip;
26163+ }
26164+#endif
26165+
26166 nmi_nesting_preprocess(regs);
26167
26168 nmi_enter();
26169diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26170index 6d9582e..f746287 100644
26171--- a/arch/x86/kernel/nmi_selftest.c
26172+++ b/arch/x86/kernel/nmi_selftest.c
26173@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26174 {
26175 /* trap all the unknown NMIs we may generate */
26176 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26177- __initdata);
26178+ __initconst);
26179 }
26180
26181 static void __init cleanup_nmi_testsuite(void)
26182@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26183 unsigned long timeout;
26184
26185 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26186- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26187+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26188 nmi_fail = FAILURE;
26189 return;
26190 }
26191diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26192index bbb6c73..24a58ef 100644
26193--- a/arch/x86/kernel/paravirt-spinlocks.c
26194+++ b/arch/x86/kernel/paravirt-spinlocks.c
26195@@ -8,7 +8,7 @@
26196
26197 #include <asm/paravirt.h>
26198
26199-struct pv_lock_ops pv_lock_ops = {
26200+struct pv_lock_ops pv_lock_ops __read_only = {
26201 #ifdef CONFIG_SMP
26202 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26203 .unlock_kick = paravirt_nop,
26204diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26205index 548d25f..f8fb99c 100644
26206--- a/arch/x86/kernel/paravirt.c
26207+++ b/arch/x86/kernel/paravirt.c
26208@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26209 {
26210 return x;
26211 }
26212+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26213+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26214+#endif
26215
26216 void __init default_banner(void)
26217 {
26218@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26219
26220 if (opfunc == NULL)
26221 /* If there's no function, patch it with a ud2a (BUG) */
26222- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26223- else if (opfunc == _paravirt_nop)
26224+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26225+ else if (opfunc == (void *)_paravirt_nop)
26226 /* If the operation is a nop, then nop the callsite */
26227 ret = paravirt_patch_nop();
26228
26229 /* identity functions just return their single argument */
26230- else if (opfunc == _paravirt_ident_32)
26231+ else if (opfunc == (void *)_paravirt_ident_32)
26232 ret = paravirt_patch_ident_32(insnbuf, len);
26233- else if (opfunc == _paravirt_ident_64)
26234+ else if (opfunc == (void *)_paravirt_ident_64)
26235 ret = paravirt_patch_ident_64(insnbuf, len);
26236+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26237+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26238+ ret = paravirt_patch_ident_64(insnbuf, len);
26239+#endif
26240
26241 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26242 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26243@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26244 if (insn_len > len || start == NULL)
26245 insn_len = len;
26246 else
26247- memcpy(insnbuf, start, insn_len);
26248+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26249
26250 return insn_len;
26251 }
26252@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26253 return this_cpu_read(paravirt_lazy_mode);
26254 }
26255
26256-struct pv_info pv_info = {
26257+struct pv_info pv_info __read_only = {
26258 .name = "bare hardware",
26259 .paravirt_enabled = 0,
26260 .kernel_rpl = 0,
26261@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26262 #endif
26263 };
26264
26265-struct pv_init_ops pv_init_ops = {
26266+struct pv_init_ops pv_init_ops __read_only = {
26267 .patch = native_patch,
26268 };
26269
26270-struct pv_time_ops pv_time_ops = {
26271+struct pv_time_ops pv_time_ops __read_only = {
26272 .sched_clock = native_sched_clock,
26273 .steal_clock = native_steal_clock,
26274 };
26275
26276-__visible struct pv_irq_ops pv_irq_ops = {
26277+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26278 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26279 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26280 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26281@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26282 #endif
26283 };
26284
26285-__visible struct pv_cpu_ops pv_cpu_ops = {
26286+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26287 .cpuid = native_cpuid,
26288 .get_debugreg = native_get_debugreg,
26289 .set_debugreg = native_set_debugreg,
26290@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26291 NOKPROBE_SYMBOL(native_set_debugreg);
26292 NOKPROBE_SYMBOL(native_load_idt);
26293
26294-struct pv_apic_ops pv_apic_ops = {
26295+struct pv_apic_ops pv_apic_ops __read_only= {
26296 #ifdef CONFIG_X86_LOCAL_APIC
26297 .startup_ipi_hook = paravirt_nop,
26298 #endif
26299 };
26300
26301-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26302+#ifdef CONFIG_X86_32
26303+#ifdef CONFIG_X86_PAE
26304+/* 64-bit pagetable entries */
26305+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26306+#else
26307 /* 32-bit pagetable entries */
26308 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26309+#endif
26310 #else
26311 /* 64-bit pagetable entries */
26312 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26313 #endif
26314
26315-struct pv_mmu_ops pv_mmu_ops = {
26316+struct pv_mmu_ops pv_mmu_ops __read_only = {
26317
26318 .read_cr2 = native_read_cr2,
26319 .write_cr2 = native_write_cr2,
26320@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26321 .make_pud = PTE_IDENT,
26322
26323 .set_pgd = native_set_pgd,
26324+ .set_pgd_batched = native_set_pgd_batched,
26325 #endif
26326 #endif /* PAGETABLE_LEVELS >= 3 */
26327
26328@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26329 },
26330
26331 .set_fixmap = native_set_fixmap,
26332+
26333+#ifdef CONFIG_PAX_KERNEXEC
26334+ .pax_open_kernel = native_pax_open_kernel,
26335+ .pax_close_kernel = native_pax_close_kernel,
26336+#endif
26337+
26338 };
26339
26340 EXPORT_SYMBOL_GPL(pv_time_ops);
26341diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26342index a1da673..b6f5831 100644
26343--- a/arch/x86/kernel/paravirt_patch_64.c
26344+++ b/arch/x86/kernel/paravirt_patch_64.c
26345@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26346 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26347 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26348 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26349+
26350+#ifndef CONFIG_PAX_MEMORY_UDEREF
26351 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26352+#endif
26353+
26354 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26355 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26356
26357@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26358 PATCH_SITE(pv_mmu_ops, read_cr3);
26359 PATCH_SITE(pv_mmu_ops, write_cr3);
26360 PATCH_SITE(pv_cpu_ops, clts);
26361+
26362+#ifndef CONFIG_PAX_MEMORY_UDEREF
26363 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26364+#endif
26365+
26366 PATCH_SITE(pv_cpu_ops, wbinvd);
26367
26368 patch_site:
26369diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26370index 0497f71..7186c0d 100644
26371--- a/arch/x86/kernel/pci-calgary_64.c
26372+++ b/arch/x86/kernel/pci-calgary_64.c
26373@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26374 tce_space = be64_to_cpu(readq(target));
26375 tce_space = tce_space & TAR_SW_BITS;
26376
26377- tce_space = tce_space & (~specified_table_size);
26378+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26379 info->tce_space = (u64 *)__va(tce_space);
26380 }
26381 }
26382diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26383index 35ccf75..7a15747 100644
26384--- a/arch/x86/kernel/pci-iommu_table.c
26385+++ b/arch/x86/kernel/pci-iommu_table.c
26386@@ -2,7 +2,7 @@
26387 #include <asm/iommu_table.h>
26388 #include <linux/string.h>
26389 #include <linux/kallsyms.h>
26390-
26391+#include <linux/sched.h>
26392
26393 #define DEBUG 1
26394
26395diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26396index 77dd0ad..9ec4723 100644
26397--- a/arch/x86/kernel/pci-swiotlb.c
26398+++ b/arch/x86/kernel/pci-swiotlb.c
26399@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26400 struct dma_attrs *attrs)
26401 {
26402 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26403- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26404+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26405 else
26406 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26407 }
26408diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26409index e127dda..94e384d 100644
26410--- a/arch/x86/kernel/process.c
26411+++ b/arch/x86/kernel/process.c
26412@@ -36,7 +36,8 @@
26413 * section. Since TSS's are completely CPU-local, we want them
26414 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26415 */
26416-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26417+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26418+EXPORT_SYMBOL(init_tss);
26419
26420 #ifdef CONFIG_X86_64
26421 static DEFINE_PER_CPU(unsigned char, is_idle);
26422@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
26423 task_xstate_cachep =
26424 kmem_cache_create("task_xstate", xstate_size,
26425 __alignof__(union thread_xstate),
26426- SLAB_PANIC | SLAB_NOTRACK, NULL);
26427+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26428 setup_xstate_comp();
26429 }
26430
26431@@ -108,7 +109,7 @@ void exit_thread(void)
26432 unsigned long *bp = t->io_bitmap_ptr;
26433
26434 if (bp) {
26435- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26436+ struct tss_struct *tss = init_tss + get_cpu();
26437
26438 t->io_bitmap_ptr = NULL;
26439 clear_thread_flag(TIF_IO_BITMAP);
26440@@ -128,6 +129,9 @@ void flush_thread(void)
26441 {
26442 struct task_struct *tsk = current;
26443
26444+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26445+ loadsegment(gs, 0);
26446+#endif
26447 flush_ptrace_hw_breakpoint(tsk);
26448 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26449 drop_init_fpu(tsk);
26450@@ -274,7 +278,7 @@ static void __exit_idle(void)
26451 void exit_idle(void)
26452 {
26453 /* idle loop has pid 0 */
26454- if (current->pid)
26455+ if (task_pid_nr(current))
26456 return;
26457 __exit_idle();
26458 }
26459@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26460 return ret;
26461 }
26462 #endif
26463-void stop_this_cpu(void *dummy)
26464+__noreturn void stop_this_cpu(void *dummy)
26465 {
26466 local_irq_disable();
26467 /*
26468@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26469 }
26470 early_param("idle", idle_setup);
26471
26472-unsigned long arch_align_stack(unsigned long sp)
26473+#ifdef CONFIG_PAX_RANDKSTACK
26474+void pax_randomize_kstack(struct pt_regs *regs)
26475 {
26476- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26477- sp -= get_random_int() % 8192;
26478- return sp & ~0xf;
26479-}
26480+ struct thread_struct *thread = &current->thread;
26481+ unsigned long time;
26482
26483-unsigned long arch_randomize_brk(struct mm_struct *mm)
26484-{
26485- unsigned long range_end = mm->brk + 0x02000000;
26486- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26487-}
26488+ if (!randomize_va_space)
26489+ return;
26490+
26491+ if (v8086_mode(regs))
26492+ return;
26493
26494+ rdtscl(time);
26495+
26496+ /* P4 seems to return a 0 LSB, ignore it */
26497+#ifdef CONFIG_MPENTIUM4
26498+ time &= 0x3EUL;
26499+ time <<= 2;
26500+#elif defined(CONFIG_X86_64)
26501+ time &= 0xFUL;
26502+ time <<= 4;
26503+#else
26504+ time &= 0x1FUL;
26505+ time <<= 3;
26506+#endif
26507+
26508+ thread->sp0 ^= time;
26509+ load_sp0(init_tss + smp_processor_id(), thread);
26510+
26511+#ifdef CONFIG_X86_64
26512+ this_cpu_write(kernel_stack, thread->sp0);
26513+#endif
26514+}
26515+#endif
26516diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26517index 8f3ebfe..cbc731b 100644
26518--- a/arch/x86/kernel/process_32.c
26519+++ b/arch/x86/kernel/process_32.c
26520@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26521 unsigned long thread_saved_pc(struct task_struct *tsk)
26522 {
26523 return ((unsigned long *)tsk->thread.sp)[3];
26524+//XXX return tsk->thread.eip;
26525 }
26526
26527 void __show_regs(struct pt_regs *regs, int all)
26528@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26529 unsigned long sp;
26530 unsigned short ss, gs;
26531
26532- if (user_mode_vm(regs)) {
26533+ if (user_mode(regs)) {
26534 sp = regs->sp;
26535 ss = regs->ss & 0xffff;
26536- gs = get_user_gs(regs);
26537 } else {
26538 sp = kernel_stack_pointer(regs);
26539 savesegment(ss, ss);
26540- savesegment(gs, gs);
26541 }
26542+ gs = get_user_gs(regs);
26543
26544 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26545 (u16)regs->cs, regs->ip, regs->flags,
26546- smp_processor_id());
26547+ raw_smp_processor_id());
26548 print_symbol("EIP is at %s\n", regs->ip);
26549
26550 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26551@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26552 int copy_thread(unsigned long clone_flags, unsigned long sp,
26553 unsigned long arg, struct task_struct *p)
26554 {
26555- struct pt_regs *childregs = task_pt_regs(p);
26556+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26557 struct task_struct *tsk;
26558 int err;
26559
26560 p->thread.sp = (unsigned long) childregs;
26561 p->thread.sp0 = (unsigned long) (childregs+1);
26562+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26563 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26564
26565 if (unlikely(p->flags & PF_KTHREAD)) {
26566 /* kernel thread */
26567 memset(childregs, 0, sizeof(struct pt_regs));
26568 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26569- task_user_gs(p) = __KERNEL_STACK_CANARY;
26570- childregs->ds = __USER_DS;
26571- childregs->es = __USER_DS;
26572+ savesegment(gs, childregs->gs);
26573+ childregs->ds = __KERNEL_DS;
26574+ childregs->es = __KERNEL_DS;
26575 childregs->fs = __KERNEL_PERCPU;
26576 childregs->bx = sp; /* function */
26577 childregs->bp = arg;
26578@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26579 struct thread_struct *prev = &prev_p->thread,
26580 *next = &next_p->thread;
26581 int cpu = smp_processor_id();
26582- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26583+ struct tss_struct *tss = init_tss + cpu;
26584 fpu_switch_t fpu;
26585
26586 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26587@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26588 */
26589 lazy_save_gs(prev->gs);
26590
26591+#ifdef CONFIG_PAX_MEMORY_UDEREF
26592+ __set_fs(task_thread_info(next_p)->addr_limit);
26593+#endif
26594+
26595 /*
26596 * Load the per-thread Thread-Local Storage descriptor.
26597 */
26598@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26599 */
26600 arch_end_context_switch(next_p);
26601
26602- this_cpu_write(kernel_stack,
26603- (unsigned long)task_stack_page(next_p) +
26604- THREAD_SIZE - KERNEL_STACK_OFFSET);
26605+ this_cpu_write(current_task, next_p);
26606+ this_cpu_write(current_tinfo, &next_p->tinfo);
26607+ this_cpu_write(kernel_stack, next->sp0);
26608
26609 /*
26610 * Restore %gs if needed (which is common)
26611@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26612
26613 switch_fpu_finish(next_p, fpu);
26614
26615- this_cpu_write(current_task, next_p);
26616-
26617 return prev_p;
26618 }
26619
26620@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26621 } while (count++ < 16);
26622 return 0;
26623 }
26624-
26625diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26626index 5a2c029..ec8611d 100644
26627--- a/arch/x86/kernel/process_64.c
26628+++ b/arch/x86/kernel/process_64.c
26629@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26630 struct pt_regs *childregs;
26631 struct task_struct *me = current;
26632
26633- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26634+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26635 childregs = task_pt_regs(p);
26636 p->thread.sp = (unsigned long) childregs;
26637 p->thread.usersp = me->thread.usersp;
26638+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26639 set_tsk_thread_flag(p, TIF_FORK);
26640 p->thread.io_bitmap_ptr = NULL;
26641
26642@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26643 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26644 savesegment(es, p->thread.es);
26645 savesegment(ds, p->thread.ds);
26646+ savesegment(ss, p->thread.ss);
26647+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26648 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26649
26650 if (unlikely(p->flags & PF_KTHREAD)) {
26651@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26652 struct thread_struct *prev = &prev_p->thread;
26653 struct thread_struct *next = &next_p->thread;
26654 int cpu = smp_processor_id();
26655- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26656+ struct tss_struct *tss = init_tss + cpu;
26657 unsigned fsindex, gsindex;
26658 fpu_switch_t fpu;
26659
26660@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26661 if (unlikely(next->ds | prev->ds))
26662 loadsegment(ds, next->ds);
26663
26664+ savesegment(ss, prev->ss);
26665+ if (unlikely(next->ss != prev->ss))
26666+ loadsegment(ss, next->ss);
26667+
26668 /*
26669 * Switch FS and GS.
26670 *
26671@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26672 prev->usersp = this_cpu_read(old_rsp);
26673 this_cpu_write(old_rsp, next->usersp);
26674 this_cpu_write(current_task, next_p);
26675+ this_cpu_write(current_tinfo, &next_p->tinfo);
26676
26677 /*
26678 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26679@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26680 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26681 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26682
26683- this_cpu_write(kernel_stack,
26684- (unsigned long)task_stack_page(next_p) +
26685- THREAD_SIZE - KERNEL_STACK_OFFSET);
26686+ this_cpu_write(kernel_stack, next->sp0);
26687
26688 /*
26689 * Now maybe reload the debug registers and handle I/O bitmaps
26690@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26691 if (!p || p == current || p->state == TASK_RUNNING)
26692 return 0;
26693 stack = (unsigned long)task_stack_page(p);
26694- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26695+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26696 return 0;
26697 fp = *(u64 *)(p->thread.sp);
26698 do {
26699- if (fp < (unsigned long)stack ||
26700- fp >= (unsigned long)stack+THREAD_SIZE)
26701+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26702 return 0;
26703 ip = *(u64 *)(fp+8);
26704 if (!in_sched_functions(ip))
26705diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26706index e510618..5165ac0 100644
26707--- a/arch/x86/kernel/ptrace.c
26708+++ b/arch/x86/kernel/ptrace.c
26709@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26710 unsigned long sp = (unsigned long)&regs->sp;
26711 u32 *prev_esp;
26712
26713- if (context == (sp & ~(THREAD_SIZE - 1)))
26714+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26715 return sp;
26716
26717- prev_esp = (u32 *)(context);
26718+ prev_esp = *(u32 **)(context);
26719 if (prev_esp)
26720 return (unsigned long)prev_esp;
26721
26722@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26723 if (child->thread.gs != value)
26724 return do_arch_prctl(child, ARCH_SET_GS, value);
26725 return 0;
26726+
26727+ case offsetof(struct user_regs_struct,ip):
26728+ /*
26729+ * Protect against any attempt to set ip to an
26730+ * impossible address. There are dragons lurking if the
26731+ * address is noncanonical. (This explicitly allows
26732+ * setting ip to TASK_SIZE_MAX, because user code can do
26733+ * that all by itself by running off the end of its
26734+ * address space.
26735+ */
26736+ if (value > TASK_SIZE_MAX)
26737+ return -EIO;
26738+ break;
26739+
26740 #endif
26741 }
26742
26743@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26744 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26745 {
26746 int i;
26747- int dr7 = 0;
26748+ unsigned long dr7 = 0;
26749 struct arch_hw_breakpoint *info;
26750
26751 for (i = 0; i < HBP_NUM; i++) {
26752@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26753 unsigned long addr, unsigned long data)
26754 {
26755 int ret;
26756- unsigned long __user *datap = (unsigned long __user *)data;
26757+ unsigned long __user *datap = (__force unsigned long __user *)data;
26758
26759 switch (request) {
26760 /* read the word at location addr in the USER area. */
26761@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26762 if ((int) addr < 0)
26763 return -EIO;
26764 ret = do_get_thread_area(child, addr,
26765- (struct user_desc __user *)data);
26766+ (__force struct user_desc __user *) data);
26767 break;
26768
26769 case PTRACE_SET_THREAD_AREA:
26770 if ((int) addr < 0)
26771 return -EIO;
26772 ret = do_set_thread_area(child, addr,
26773- (struct user_desc __user *)data, 0);
26774+ (__force struct user_desc __user *) data, 0);
26775 break;
26776 #endif
26777
26778@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26779
26780 #ifdef CONFIG_X86_64
26781
26782-static struct user_regset x86_64_regsets[] __read_mostly = {
26783+static user_regset_no_const x86_64_regsets[] __read_only = {
26784 [REGSET_GENERAL] = {
26785 .core_note_type = NT_PRSTATUS,
26786 .n = sizeof(struct user_regs_struct) / sizeof(long),
26787@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26788 #endif /* CONFIG_X86_64 */
26789
26790 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26791-static struct user_regset x86_32_regsets[] __read_mostly = {
26792+static user_regset_no_const x86_32_regsets[] __read_only = {
26793 [REGSET_GENERAL] = {
26794 .core_note_type = NT_PRSTATUS,
26795 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26796@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26797 */
26798 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26799
26800-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26801+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26802 {
26803 #ifdef CONFIG_X86_64
26804 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26805@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26806 memset(info, 0, sizeof(*info));
26807 info->si_signo = SIGTRAP;
26808 info->si_code = si_code;
26809- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26810+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26811 }
26812
26813 void user_single_step_siginfo(struct task_struct *tsk,
26814@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26815 }
26816 }
26817
26818+#ifdef CONFIG_GRKERNSEC_SETXID
26819+extern void gr_delayed_cred_worker(void);
26820+#endif
26821+
26822 /*
26823 * We can return 0 to resume the syscall or anything else to go to phase
26824 * 2. If we resume the syscall, we need to put something appropriate in
26825@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26826
26827 BUG_ON(regs != task_pt_regs(current));
26828
26829+#ifdef CONFIG_GRKERNSEC_SETXID
26830+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26831+ gr_delayed_cred_worker();
26832+#endif
26833+
26834 /*
26835 * If we stepped into a sysenter/syscall insn, it trapped in
26836 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26837@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26838 */
26839 user_exit();
26840
26841+#ifdef CONFIG_GRKERNSEC_SETXID
26842+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26843+ gr_delayed_cred_worker();
26844+#endif
26845+
26846 audit_syscall_exit(regs);
26847
26848 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26849diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26850index 2f355d2..e75ed0a 100644
26851--- a/arch/x86/kernel/pvclock.c
26852+++ b/arch/x86/kernel/pvclock.c
26853@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26854 reset_hung_task_detector();
26855 }
26856
26857-static atomic64_t last_value = ATOMIC64_INIT(0);
26858+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26859
26860 void pvclock_resume(void)
26861 {
26862- atomic64_set(&last_value, 0);
26863+ atomic64_set_unchecked(&last_value, 0);
26864 }
26865
26866 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26867@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26868 * updating at the same time, and one of them could be slightly behind,
26869 * making the assumption that last_value always go forward fail to hold.
26870 */
26871- last = atomic64_read(&last_value);
26872+ last = atomic64_read_unchecked(&last_value);
26873 do {
26874 if (ret < last)
26875 return last;
26876- last = atomic64_cmpxchg(&last_value, last, ret);
26877+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26878 } while (unlikely(last != ret));
26879
26880 return ret;
26881diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26882index bae6c60..b438619 100644
26883--- a/arch/x86/kernel/reboot.c
26884+++ b/arch/x86/kernel/reboot.c
26885@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26886
26887 void __noreturn machine_real_restart(unsigned int type)
26888 {
26889+
26890+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26891+ struct desc_struct *gdt;
26892+#endif
26893+
26894 local_irq_disable();
26895
26896 /*
26897@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26898
26899 /* Jump to the identity-mapped low memory code */
26900 #ifdef CONFIG_X86_32
26901- asm volatile("jmpl *%0" : :
26902+
26903+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26904+ gdt = get_cpu_gdt_table(smp_processor_id());
26905+ pax_open_kernel();
26906+#ifdef CONFIG_PAX_MEMORY_UDEREF
26907+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26908+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26909+ loadsegment(ds, __KERNEL_DS);
26910+ loadsegment(es, __KERNEL_DS);
26911+ loadsegment(ss, __KERNEL_DS);
26912+#endif
26913+#ifdef CONFIG_PAX_KERNEXEC
26914+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26915+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26916+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26917+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26918+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26919+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26920+#endif
26921+ pax_close_kernel();
26922+#endif
26923+
26924+ asm volatile("ljmpl *%0" : :
26925 "rm" (real_mode_header->machine_real_restart_asm),
26926 "a" (type));
26927 #else
26928@@ -501,7 +528,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26929 * This means that this function can never return, it can misbehave
26930 * by not rebooting properly and hanging.
26931 */
26932-static void native_machine_emergency_restart(void)
26933+static void __noreturn native_machine_emergency_restart(void)
26934 {
26935 int i;
26936 int attempt = 0;
26937@@ -621,13 +648,13 @@ void native_machine_shutdown(void)
26938 #endif
26939 }
26940
26941-static void __machine_emergency_restart(int emergency)
26942+static void __noreturn __machine_emergency_restart(int emergency)
26943 {
26944 reboot_emergency = emergency;
26945 machine_ops.emergency_restart();
26946 }
26947
26948-static void native_machine_restart(char *__unused)
26949+static void __noreturn native_machine_restart(char *__unused)
26950 {
26951 pr_notice("machine restart\n");
26952
26953@@ -636,7 +663,7 @@ static void native_machine_restart(char *__unused)
26954 __machine_emergency_restart(0);
26955 }
26956
26957-static void native_machine_halt(void)
26958+static void __noreturn native_machine_halt(void)
26959 {
26960 /* Stop other cpus and apics */
26961 machine_shutdown();
26962@@ -646,7 +673,7 @@ static void native_machine_halt(void)
26963 stop_this_cpu(NULL);
26964 }
26965
26966-static void native_machine_power_off(void)
26967+static void __noreturn native_machine_power_off(void)
26968 {
26969 if (pm_power_off) {
26970 if (!reboot_force)
26971@@ -655,9 +682,10 @@ static void native_machine_power_off(void)
26972 }
26973 /* A fallback in case there is no PM info available */
26974 tboot_shutdown(TB_SHUTDOWN_HALT);
26975+ unreachable();
26976 }
26977
26978-struct machine_ops machine_ops = {
26979+struct machine_ops machine_ops __read_only = {
26980 .power_off = native_machine_power_off,
26981 .shutdown = native_machine_shutdown,
26982 .emergency_restart = native_machine_emergency_restart,
26983diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
26984index c8e41e9..64049ef 100644
26985--- a/arch/x86/kernel/reboot_fixups_32.c
26986+++ b/arch/x86/kernel/reboot_fixups_32.c
26987@@ -57,7 +57,7 @@ struct device_fixup {
26988 unsigned int vendor;
26989 unsigned int device;
26990 void (*reboot_fixup)(struct pci_dev *);
26991-};
26992+} __do_const;
26993
26994 /*
26995 * PCI ids solely used for fixups_table go here
26996diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
26997index 3fd2c69..a444264 100644
26998--- a/arch/x86/kernel/relocate_kernel_64.S
26999+++ b/arch/x86/kernel/relocate_kernel_64.S
27000@@ -96,8 +96,7 @@ relocate_kernel:
27001
27002 /* jump to identity mapped page */
27003 addq $(identity_mapped - relocate_kernel), %r8
27004- pushq %r8
27005- ret
27006+ jmp *%r8
27007
27008 identity_mapped:
27009 /* set return address to 0 if not preserving context */
27010diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27011index ab4734e..c4ca0eb 100644
27012--- a/arch/x86/kernel/setup.c
27013+++ b/arch/x86/kernel/setup.c
27014@@ -110,6 +110,7 @@
27015 #include <asm/mce.h>
27016 #include <asm/alternative.h>
27017 #include <asm/prom.h>
27018+#include <asm/boot.h>
27019
27020 /*
27021 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27022@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27023 #endif
27024
27025
27026-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27027-__visible unsigned long mmu_cr4_features;
27028+#ifdef CONFIG_X86_64
27029+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27030+#elif defined(CONFIG_X86_PAE)
27031+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27032 #else
27033-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27034+__visible unsigned long mmu_cr4_features __read_only;
27035 #endif
27036
27037+void set_in_cr4(unsigned long mask)
27038+{
27039+ unsigned long cr4 = read_cr4();
27040+
27041+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27042+ return;
27043+
27044+ pax_open_kernel();
27045+ mmu_cr4_features |= mask;
27046+ pax_close_kernel();
27047+
27048+ if (trampoline_cr4_features)
27049+ *trampoline_cr4_features = mmu_cr4_features;
27050+ cr4 |= mask;
27051+ write_cr4(cr4);
27052+}
27053+EXPORT_SYMBOL(set_in_cr4);
27054+
27055+void clear_in_cr4(unsigned long mask)
27056+{
27057+ unsigned long cr4 = read_cr4();
27058+
27059+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27060+ return;
27061+
27062+ pax_open_kernel();
27063+ mmu_cr4_features &= ~mask;
27064+ pax_close_kernel();
27065+
27066+ if (trampoline_cr4_features)
27067+ *trampoline_cr4_features = mmu_cr4_features;
27068+ cr4 &= ~mask;
27069+ write_cr4(cr4);
27070+}
27071+EXPORT_SYMBOL(clear_in_cr4);
27072+
27073 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27074 int bootloader_type, bootloader_version;
27075
27076@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27077 * area (640->1Mb) as ram even though it is not.
27078 * take them out.
27079 */
27080- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27081+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27082
27083 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27084 }
27085@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27086 /* called before trim_bios_range() to spare extra sanitize */
27087 static void __init e820_add_kernel_range(void)
27088 {
27089- u64 start = __pa_symbol(_text);
27090+ u64 start = __pa_symbol(ktla_ktva(_text));
27091 u64 size = __pa_symbol(_end) - start;
27092
27093 /*
27094@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27095
27096 void __init setup_arch(char **cmdline_p)
27097 {
27098+#ifdef CONFIG_X86_32
27099+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27100+#else
27101 memblock_reserve(__pa_symbol(_text),
27102 (unsigned long)__bss_stop - (unsigned long)_text);
27103+#endif
27104
27105 early_reserve_initrd();
27106
27107@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
27108
27109 if (!boot_params.hdr.root_flags)
27110 root_mountflags &= ~MS_RDONLY;
27111- init_mm.start_code = (unsigned long) _text;
27112- init_mm.end_code = (unsigned long) _etext;
27113+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27114+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27115 init_mm.end_data = (unsigned long) _edata;
27116 init_mm.brk = _brk_end;
27117
27118 mpx_mm_init(&init_mm);
27119
27120- code_resource.start = __pa_symbol(_text);
27121- code_resource.end = __pa_symbol(_etext)-1;
27122- data_resource.start = __pa_symbol(_etext);
27123+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27124+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27125+ data_resource.start = __pa_symbol(_sdata);
27126 data_resource.end = __pa_symbol(_edata)-1;
27127 bss_resource.start = __pa_symbol(__bss_start);
27128 bss_resource.end = __pa_symbol(__bss_stop)-1;
27129diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27130index e4fcb87..9c06c55 100644
27131--- a/arch/x86/kernel/setup_percpu.c
27132+++ b/arch/x86/kernel/setup_percpu.c
27133@@ -21,19 +21,17 @@
27134 #include <asm/cpu.h>
27135 #include <asm/stackprotector.h>
27136
27137-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27138+#ifdef CONFIG_SMP
27139+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27140 EXPORT_PER_CPU_SYMBOL(cpu_number);
27141+#endif
27142
27143-#ifdef CONFIG_X86_64
27144 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27145-#else
27146-#define BOOT_PERCPU_OFFSET 0
27147-#endif
27148
27149 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27150 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27151
27152-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27153+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27154 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27155 };
27156 EXPORT_SYMBOL(__per_cpu_offset);
27157@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27158 {
27159 #ifdef CONFIG_NEED_MULTIPLE_NODES
27160 pg_data_t *last = NULL;
27161- unsigned int cpu;
27162+ int cpu;
27163
27164 for_each_possible_cpu(cpu) {
27165 int node = early_cpu_to_node(cpu);
27166@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27167 {
27168 #ifdef CONFIG_X86_32
27169 struct desc_struct gdt;
27170+ unsigned long base = per_cpu_offset(cpu);
27171
27172- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27173- 0x2 | DESCTYPE_S, 0x8);
27174- gdt.s = 1;
27175+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27176+ 0x83 | DESCTYPE_S, 0xC);
27177 write_gdt_entry(get_cpu_gdt_table(cpu),
27178 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27179 #endif
27180@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27181 /* alrighty, percpu areas up and running */
27182 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27183 for_each_possible_cpu(cpu) {
27184+#ifdef CONFIG_CC_STACKPROTECTOR
27185+#ifdef CONFIG_X86_32
27186+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27187+#endif
27188+#endif
27189 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27190 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27191 per_cpu(cpu_number, cpu) = cpu;
27192@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27193 */
27194 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27195 #endif
27196+#ifdef CONFIG_CC_STACKPROTECTOR
27197+#ifdef CONFIG_X86_32
27198+ if (!cpu)
27199+ per_cpu(stack_canary.canary, cpu) = canary;
27200+#endif
27201+#endif
27202 /*
27203 * Up to this point, the boot CPU has been using .init.data
27204 * area. Reload any changed state for the boot CPU.
27205diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27206index ed37a76..39f936e 100644
27207--- a/arch/x86/kernel/signal.c
27208+++ b/arch/x86/kernel/signal.c
27209@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27210 * Align the stack pointer according to the i386 ABI,
27211 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27212 */
27213- sp = ((sp + 4) & -16ul) - 4;
27214+ sp = ((sp - 12) & -16ul) - 4;
27215 #else /* !CONFIG_X86_32 */
27216 sp = round_down(sp, 16) - 8;
27217 #endif
27218@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27219 }
27220
27221 if (current->mm->context.vdso)
27222- restorer = current->mm->context.vdso +
27223- selected_vdso32->sym___kernel_sigreturn;
27224+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27225 else
27226- restorer = &frame->retcode;
27227+ restorer = (void __user *)&frame->retcode;
27228 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27229 restorer = ksig->ka.sa.sa_restorer;
27230
27231@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27232 * reasons and because gdb uses it as a signature to notice
27233 * signal handler stack frames.
27234 */
27235- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27236+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27237
27238 if (err)
27239 return -EFAULT;
27240@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27241 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27242
27243 /* Set up to return from userspace. */
27244- restorer = current->mm->context.vdso +
27245- selected_vdso32->sym___kernel_rt_sigreturn;
27246+ if (current->mm->context.vdso)
27247+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27248+ else
27249+ restorer = (void __user *)&frame->retcode;
27250 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27251 restorer = ksig->ka.sa.sa_restorer;
27252 put_user_ex(restorer, &frame->pretcode);
27253@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27254 * reasons and because gdb uses it as a signature to notice
27255 * signal handler stack frames.
27256 */
27257- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27258+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27259 } put_user_catch(err);
27260
27261 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27262@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27263 {
27264 int usig = signr_convert(ksig->sig);
27265 sigset_t *set = sigmask_to_save();
27266- compat_sigset_t *cset = (compat_sigset_t *) set;
27267+ sigset_t sigcopy;
27268+ compat_sigset_t *cset;
27269+
27270+ sigcopy = *set;
27271+
27272+ cset = (compat_sigset_t *) &sigcopy;
27273
27274 /* Set up the stack frame */
27275 if (is_ia32_frame()) {
27276@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27277 } else if (is_x32_frame()) {
27278 return x32_setup_rt_frame(ksig, cset, regs);
27279 } else {
27280- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27281+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27282 }
27283 }
27284
27285diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27286index be8e1bd..a3d93fa 100644
27287--- a/arch/x86/kernel/smp.c
27288+++ b/arch/x86/kernel/smp.c
27289@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27290
27291 __setup("nonmi_ipi", nonmi_ipi_setup);
27292
27293-struct smp_ops smp_ops = {
27294+struct smp_ops smp_ops __read_only = {
27295 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27296 .smp_prepare_cpus = native_smp_prepare_cpus,
27297 .smp_cpus_done = native_smp_cpus_done,
27298diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27299index 6d7022c..4feb6be 100644
27300--- a/arch/x86/kernel/smpboot.c
27301+++ b/arch/x86/kernel/smpboot.c
27302@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
27303
27304 enable_start_cpu0 = 0;
27305
27306-#ifdef CONFIG_X86_32
27307+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27308+ barrier();
27309+
27310 /* switch away from the initial page table */
27311+#ifdef CONFIG_PAX_PER_CPU_PGD
27312+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27313+#else
27314 load_cr3(swapper_pg_dir);
27315+#endif
27316 __flush_tlb_all();
27317-#endif
27318
27319- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27320- barrier();
27321 /*
27322 * Check TSC synchronization with the BP:
27323 */
27324@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27325 alternatives_enable_smp();
27326
27327 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27328- (THREAD_SIZE + task_stack_page(idle))) - 1);
27329+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27330 per_cpu(current_task, cpu) = idle;
27331+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27332
27333 #ifdef CONFIG_X86_32
27334 /* Stack for startup_32 can be just as for start_secondary onwards */
27335@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27336 clear_tsk_thread_flag(idle, TIF_FORK);
27337 initial_gs = per_cpu_offset(cpu);
27338 #endif
27339- per_cpu(kernel_stack, cpu) =
27340- (unsigned long)task_stack_page(idle) -
27341- KERNEL_STACK_OFFSET + THREAD_SIZE;
27342+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27343+ pax_open_kernel();
27344 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27345+ pax_close_kernel();
27346 initial_code = (unsigned long)start_secondary;
27347 stack_start = idle->thread.sp;
27348
27349@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27350 /* the FPU context is blank, nobody can own it */
27351 __cpu_disable_lazy_restore(cpu);
27352
27353+#ifdef CONFIG_PAX_PER_CPU_PGD
27354+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27355+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27356+ KERNEL_PGD_PTRS);
27357+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27358+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27359+ KERNEL_PGD_PTRS);
27360+#endif
27361+
27362 err = do_boot_cpu(apicid, cpu, tidle);
27363 if (err) {
27364 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27365diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27366index 9b4d51d..5d28b58 100644
27367--- a/arch/x86/kernel/step.c
27368+++ b/arch/x86/kernel/step.c
27369@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27370 struct desc_struct *desc;
27371 unsigned long base;
27372
27373- seg &= ~7UL;
27374+ seg >>= 3;
27375
27376 mutex_lock(&child->mm->context.lock);
27377- if (unlikely((seg >> 3) >= child->mm->context.size))
27378+ if (unlikely(seg >= child->mm->context.size))
27379 addr = -1L; /* bogus selector, access would fault */
27380 else {
27381 desc = child->mm->context.ldt + seg;
27382@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27383 addr += base;
27384 }
27385 mutex_unlock(&child->mm->context.lock);
27386- }
27387+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27388+ addr = ktla_ktva(addr);
27389
27390 return addr;
27391 }
27392@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27393 unsigned char opcode[15];
27394 unsigned long addr = convert_ip_to_linear(child, regs);
27395
27396+ if (addr == -EINVAL)
27397+ return 0;
27398+
27399 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27400 for (i = 0; i < copied; i++) {
27401 switch (opcode[i]) {
27402diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27403new file mode 100644
27404index 0000000..5877189
27405--- /dev/null
27406+++ b/arch/x86/kernel/sys_i386_32.c
27407@@ -0,0 +1,189 @@
27408+/*
27409+ * This file contains various random system calls that
27410+ * have a non-standard calling sequence on the Linux/i386
27411+ * platform.
27412+ */
27413+
27414+#include <linux/errno.h>
27415+#include <linux/sched.h>
27416+#include <linux/mm.h>
27417+#include <linux/fs.h>
27418+#include <linux/smp.h>
27419+#include <linux/sem.h>
27420+#include <linux/msg.h>
27421+#include <linux/shm.h>
27422+#include <linux/stat.h>
27423+#include <linux/syscalls.h>
27424+#include <linux/mman.h>
27425+#include <linux/file.h>
27426+#include <linux/utsname.h>
27427+#include <linux/ipc.h>
27428+#include <linux/elf.h>
27429+
27430+#include <linux/uaccess.h>
27431+#include <linux/unistd.h>
27432+
27433+#include <asm/syscalls.h>
27434+
27435+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27436+{
27437+ unsigned long pax_task_size = TASK_SIZE;
27438+
27439+#ifdef CONFIG_PAX_SEGMEXEC
27440+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27441+ pax_task_size = SEGMEXEC_TASK_SIZE;
27442+#endif
27443+
27444+ if (flags & MAP_FIXED)
27445+ if (len > pax_task_size || addr > pax_task_size - len)
27446+ return -EINVAL;
27447+
27448+ return 0;
27449+}
27450+
27451+/*
27452+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27453+ */
27454+static unsigned long get_align_mask(void)
27455+{
27456+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27457+ return 0;
27458+
27459+ if (!(current->flags & PF_RANDOMIZE))
27460+ return 0;
27461+
27462+ return va_align.mask;
27463+}
27464+
27465+unsigned long
27466+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27467+ unsigned long len, unsigned long pgoff, unsigned long flags)
27468+{
27469+ struct mm_struct *mm = current->mm;
27470+ struct vm_area_struct *vma;
27471+ unsigned long pax_task_size = TASK_SIZE;
27472+ struct vm_unmapped_area_info info;
27473+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27474+
27475+#ifdef CONFIG_PAX_SEGMEXEC
27476+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27477+ pax_task_size = SEGMEXEC_TASK_SIZE;
27478+#endif
27479+
27480+ pax_task_size -= PAGE_SIZE;
27481+
27482+ if (len > pax_task_size)
27483+ return -ENOMEM;
27484+
27485+ if (flags & MAP_FIXED)
27486+ return addr;
27487+
27488+#ifdef CONFIG_PAX_RANDMMAP
27489+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27490+#endif
27491+
27492+ if (addr) {
27493+ addr = PAGE_ALIGN(addr);
27494+ if (pax_task_size - len >= addr) {
27495+ vma = find_vma(mm, addr);
27496+ if (check_heap_stack_gap(vma, addr, len, offset))
27497+ return addr;
27498+ }
27499+ }
27500+
27501+ info.flags = 0;
27502+ info.length = len;
27503+ info.align_mask = filp ? get_align_mask() : 0;
27504+ info.align_offset = pgoff << PAGE_SHIFT;
27505+ info.threadstack_offset = offset;
27506+
27507+#ifdef CONFIG_PAX_PAGEEXEC
27508+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27509+ info.low_limit = 0x00110000UL;
27510+ info.high_limit = mm->start_code;
27511+
27512+#ifdef CONFIG_PAX_RANDMMAP
27513+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27514+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27515+#endif
27516+
27517+ if (info.low_limit < info.high_limit) {
27518+ addr = vm_unmapped_area(&info);
27519+ if (!IS_ERR_VALUE(addr))
27520+ return addr;
27521+ }
27522+ } else
27523+#endif
27524+
27525+ info.low_limit = mm->mmap_base;
27526+ info.high_limit = pax_task_size;
27527+
27528+ return vm_unmapped_area(&info);
27529+}
27530+
27531+unsigned long
27532+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27533+ const unsigned long len, const unsigned long pgoff,
27534+ const unsigned long flags)
27535+{
27536+ struct vm_area_struct *vma;
27537+ struct mm_struct *mm = current->mm;
27538+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27539+ struct vm_unmapped_area_info info;
27540+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27541+
27542+#ifdef CONFIG_PAX_SEGMEXEC
27543+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27544+ pax_task_size = SEGMEXEC_TASK_SIZE;
27545+#endif
27546+
27547+ pax_task_size -= PAGE_SIZE;
27548+
27549+ /* requested length too big for entire address space */
27550+ if (len > pax_task_size)
27551+ return -ENOMEM;
27552+
27553+ if (flags & MAP_FIXED)
27554+ return addr;
27555+
27556+#ifdef CONFIG_PAX_PAGEEXEC
27557+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27558+ goto bottomup;
27559+#endif
27560+
27561+#ifdef CONFIG_PAX_RANDMMAP
27562+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27563+#endif
27564+
27565+ /* requesting a specific address */
27566+ if (addr) {
27567+ addr = PAGE_ALIGN(addr);
27568+ if (pax_task_size - len >= addr) {
27569+ vma = find_vma(mm, addr);
27570+ if (check_heap_stack_gap(vma, addr, len, offset))
27571+ return addr;
27572+ }
27573+ }
27574+
27575+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27576+ info.length = len;
27577+ info.low_limit = PAGE_SIZE;
27578+ info.high_limit = mm->mmap_base;
27579+ info.align_mask = filp ? get_align_mask() : 0;
27580+ info.align_offset = pgoff << PAGE_SHIFT;
27581+ info.threadstack_offset = offset;
27582+
27583+ addr = vm_unmapped_area(&info);
27584+ if (!(addr & ~PAGE_MASK))
27585+ return addr;
27586+ VM_BUG_ON(addr != -ENOMEM);
27587+
27588+bottomup:
27589+ /*
27590+ * A failed mmap() very likely causes application failure,
27591+ * so fall back to the bottom-up function here. This scenario
27592+ * can happen with large stack limits and large mmap()
27593+ * allocations.
27594+ */
27595+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27596+}
27597diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27598index 30277e2..5664a29 100644
27599--- a/arch/x86/kernel/sys_x86_64.c
27600+++ b/arch/x86/kernel/sys_x86_64.c
27601@@ -81,8 +81,8 @@ out:
27602 return error;
27603 }
27604
27605-static void find_start_end(unsigned long flags, unsigned long *begin,
27606- unsigned long *end)
27607+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27608+ unsigned long *begin, unsigned long *end)
27609 {
27610 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27611 unsigned long new_begin;
27612@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27613 *begin = new_begin;
27614 }
27615 } else {
27616- *begin = current->mm->mmap_legacy_base;
27617+ *begin = mm->mmap_legacy_base;
27618 *end = TASK_SIZE;
27619 }
27620 }
27621@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27622 struct vm_area_struct *vma;
27623 struct vm_unmapped_area_info info;
27624 unsigned long begin, end;
27625+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27626
27627 if (flags & MAP_FIXED)
27628 return addr;
27629
27630- find_start_end(flags, &begin, &end);
27631+ find_start_end(mm, flags, &begin, &end);
27632
27633 if (len > end)
27634 return -ENOMEM;
27635
27636+#ifdef CONFIG_PAX_RANDMMAP
27637+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27638+#endif
27639+
27640 if (addr) {
27641 addr = PAGE_ALIGN(addr);
27642 vma = find_vma(mm, addr);
27643- if (end - len >= addr &&
27644- (!vma || addr + len <= vma->vm_start))
27645+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27646 return addr;
27647 }
27648
27649@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27650 info.high_limit = end;
27651 info.align_mask = filp ? get_align_mask() : 0;
27652 info.align_offset = pgoff << PAGE_SHIFT;
27653+ info.threadstack_offset = offset;
27654 return vm_unmapped_area(&info);
27655 }
27656
27657@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27658 struct mm_struct *mm = current->mm;
27659 unsigned long addr = addr0;
27660 struct vm_unmapped_area_info info;
27661+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27662
27663 /* requested length too big for entire address space */
27664 if (len > TASK_SIZE)
27665@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27666 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27667 goto bottomup;
27668
27669+#ifdef CONFIG_PAX_RANDMMAP
27670+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27671+#endif
27672+
27673 /* requesting a specific address */
27674 if (addr) {
27675 addr = PAGE_ALIGN(addr);
27676 vma = find_vma(mm, addr);
27677- if (TASK_SIZE - len >= addr &&
27678- (!vma || addr + len <= vma->vm_start))
27679+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27680 return addr;
27681 }
27682
27683@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27684 info.high_limit = mm->mmap_base;
27685 info.align_mask = filp ? get_align_mask() : 0;
27686 info.align_offset = pgoff << PAGE_SHIFT;
27687+ info.threadstack_offset = offset;
27688 addr = vm_unmapped_area(&info);
27689 if (!(addr & ~PAGE_MASK))
27690 return addr;
27691diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27692index 91a4496..bb87552 100644
27693--- a/arch/x86/kernel/tboot.c
27694+++ b/arch/x86/kernel/tboot.c
27695@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27696
27697 void tboot_shutdown(u32 shutdown_type)
27698 {
27699- void (*shutdown)(void);
27700+ void (* __noreturn shutdown)(void);
27701
27702 if (!tboot_enabled())
27703 return;
27704@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27705
27706 switch_to_tboot_pt();
27707
27708- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27709+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27710 shutdown();
27711
27712 /* should not reach here */
27713@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27714 return -ENODEV;
27715 }
27716
27717-static atomic_t ap_wfs_count;
27718+static atomic_unchecked_t ap_wfs_count;
27719
27720 static int tboot_wait_for_aps(int num_aps)
27721 {
27722@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27723 {
27724 switch (action) {
27725 case CPU_DYING:
27726- atomic_inc(&ap_wfs_count);
27727+ atomic_inc_unchecked(&ap_wfs_count);
27728 if (num_online_cpus() == 1)
27729- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27730+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27731 return NOTIFY_BAD;
27732 break;
27733 }
27734@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27735
27736 tboot_create_trampoline();
27737
27738- atomic_set(&ap_wfs_count, 0);
27739+ atomic_set_unchecked(&ap_wfs_count, 0);
27740 register_hotcpu_notifier(&tboot_cpu_notifier);
27741
27742 #ifdef CONFIG_DEBUG_FS
27743diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27744index 25adc0e..1df4349 100644
27745--- a/arch/x86/kernel/time.c
27746+++ b/arch/x86/kernel/time.c
27747@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27748 {
27749 unsigned long pc = instruction_pointer(regs);
27750
27751- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27752+ if (!user_mode(regs) && in_lock_functions(pc)) {
27753 #ifdef CONFIG_FRAME_POINTER
27754- return *(unsigned long *)(regs->bp + sizeof(long));
27755+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27756 #else
27757 unsigned long *sp =
27758 (unsigned long *)kernel_stack_pointer(regs);
27759@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27760 * or above a saved flags. Eflags has bits 22-31 zero,
27761 * kernel addresses don't.
27762 */
27763+
27764+#ifdef CONFIG_PAX_KERNEXEC
27765+ return ktla_ktva(sp[0]);
27766+#else
27767 if (sp[0] >> 22)
27768 return sp[0];
27769 if (sp[1] >> 22)
27770 return sp[1];
27771 #endif
27772+
27773+#endif
27774 }
27775 return pc;
27776 }
27777diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27778index 7fc5e84..c6e445a 100644
27779--- a/arch/x86/kernel/tls.c
27780+++ b/arch/x86/kernel/tls.c
27781@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27782 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27783 return -EINVAL;
27784
27785+#ifdef CONFIG_PAX_SEGMEXEC
27786+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27787+ return -EINVAL;
27788+#endif
27789+
27790 set_tls_desc(p, idx, &info, 1);
27791
27792 return 0;
27793@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27794
27795 if (kbuf)
27796 info = kbuf;
27797- else if (__copy_from_user(infobuf, ubuf, count))
27798+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27799 return -EFAULT;
27800 else
27801 info = infobuf;
27802diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27803index 1c113db..287b42e 100644
27804--- a/arch/x86/kernel/tracepoint.c
27805+++ b/arch/x86/kernel/tracepoint.c
27806@@ -9,11 +9,11 @@
27807 #include <linux/atomic.h>
27808
27809 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27810-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27811+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27812 (unsigned long) trace_idt_table };
27813
27814 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27815-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27816+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27817
27818 static int trace_irq_vector_refcount;
27819 static DEFINE_MUTEX(irq_vector_mutex);
27820diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27821index 88900e2..aa4149d 100644
27822--- a/arch/x86/kernel/traps.c
27823+++ b/arch/x86/kernel/traps.c
27824@@ -68,7 +68,7 @@
27825 #include <asm/proto.h>
27826
27827 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27828-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27829+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27830 #else
27831 #include <asm/processor-flags.h>
27832 #include <asm/setup.h>
27833@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27834 #endif
27835
27836 /* Must be page-aligned because the real IDT is used in a fixmap. */
27837-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27838+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27839
27840 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27841 EXPORT_SYMBOL_GPL(used_vectors);
27842@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27843 }
27844
27845 static nokprobe_inline int
27846-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27847+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27848 struct pt_regs *regs, long error_code)
27849 {
27850 #ifdef CONFIG_X86_32
27851- if (regs->flags & X86_VM_MASK) {
27852+ if (v8086_mode(regs)) {
27853 /*
27854 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27855 * On nmi (interrupt 2), do_trap should not be called.
27856@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27857 return -1;
27858 }
27859 #endif
27860- if (!user_mode(regs)) {
27861+ if (!user_mode_novm(regs)) {
27862 if (!fixup_exception(regs)) {
27863 tsk->thread.error_code = error_code;
27864 tsk->thread.trap_nr = trapnr;
27865+
27866+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27867+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27868+ str = "PAX: suspicious stack segment fault";
27869+#endif
27870+
27871 die(str, regs, error_code);
27872 }
27873+
27874+#ifdef CONFIG_PAX_REFCOUNT
27875+ if (trapnr == X86_TRAP_OF)
27876+ pax_report_refcount_overflow(regs);
27877+#endif
27878+
27879 return 0;
27880 }
27881
27882@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27883 }
27884
27885 static void
27886-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27887+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27888 long error_code, siginfo_t *info)
27889 {
27890 struct task_struct *tsk = current;
27891@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27892 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27893 printk_ratelimit()) {
27894 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27895- tsk->comm, tsk->pid, str,
27896+ tsk->comm, task_pid_nr(tsk), str,
27897 regs->ip, regs->sp, error_code);
27898 print_vma_addr(" in ", regs->ip);
27899 pr_cont("\n");
27900@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27901 tsk->thread.error_code = error_code;
27902 tsk->thread.trap_nr = X86_TRAP_DF;
27903
27904+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27905+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27906+ die("grsec: kernel stack overflow detected", regs, error_code);
27907+#endif
27908+
27909 #ifdef CONFIG_DOUBLEFAULT
27910 df_debug(regs, error_code);
27911 #endif
27912@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27913 conditional_sti(regs);
27914
27915 #ifdef CONFIG_X86_32
27916- if (regs->flags & X86_VM_MASK) {
27917+ if (v8086_mode(regs)) {
27918 local_irq_enable();
27919 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27920 goto exit;
27921@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27922 #endif
27923
27924 tsk = current;
27925- if (!user_mode(regs)) {
27926+ if (!user_mode_novm(regs)) {
27927 if (fixup_exception(regs))
27928 goto exit;
27929
27930 tsk->thread.error_code = error_code;
27931 tsk->thread.trap_nr = X86_TRAP_GP;
27932 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
27933- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
27934+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
27935+
27936+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27937+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
27938+ die("PAX: suspicious general protection fault", regs, error_code);
27939+ else
27940+#endif
27941+
27942 die("general protection fault", regs, error_code);
27943+ }
27944 goto exit;
27945 }
27946
27947+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27948+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
27949+ struct mm_struct *mm = tsk->mm;
27950+ unsigned long limit;
27951+
27952+ down_write(&mm->mmap_sem);
27953+ limit = mm->context.user_cs_limit;
27954+ if (limit < TASK_SIZE) {
27955+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
27956+ up_write(&mm->mmap_sem);
27957+ return;
27958+ }
27959+ up_write(&mm->mmap_sem);
27960+ }
27961+#endif
27962+
27963 tsk->thread.error_code = error_code;
27964 tsk->thread.trap_nr = X86_TRAP_GP;
27965
27966@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
27967 container_of(task_pt_regs(current),
27968 struct bad_iret_stack, regs);
27969
27970+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
27971+ new_stack = s;
27972+
27973 /* Copy the IRET target to the new stack. */
27974 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
27975
27976 /* Copy the remainder of the stack from the current stack. */
27977 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
27978
27979- BUG_ON(!user_mode_vm(&new_stack->regs));
27980+ BUG_ON(!user_mode(&new_stack->regs));
27981 return new_stack;
27982 }
27983 NOKPROBE_SYMBOL(fixup_bad_iret);
27984@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
27985 /* It's safe to allow irq's after DR6 has been saved */
27986 preempt_conditional_sti(regs);
27987
27988- if (regs->flags & X86_VM_MASK) {
27989+ if (v8086_mode(regs)) {
27990 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
27991 X86_TRAP_DB);
27992 preempt_conditional_cli(regs);
27993@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
27994 * We already checked v86 mode above, so we can check for kernel mode
27995 * by just checking the CPL of CS.
27996 */
27997- if ((dr6 & DR_STEP) && !user_mode(regs)) {
27998+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
27999 tsk->thread.debugreg6 &= ~DR_STEP;
28000 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28001 regs->flags &= ~X86_EFLAGS_TF;
28002@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28003 return;
28004 conditional_sti(regs);
28005
28006- if (!user_mode_vm(regs))
28007+ if (!user_mode(regs))
28008 {
28009 if (!fixup_exception(regs)) {
28010 task->thread.error_code = error_code;
28011diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28012index 5054497..139f8f8 100644
28013--- a/arch/x86/kernel/tsc.c
28014+++ b/arch/x86/kernel/tsc.c
28015@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28016 */
28017 smp_wmb();
28018
28019- ACCESS_ONCE(c2n->head) = data;
28020+ ACCESS_ONCE_RW(c2n->head) = data;
28021 }
28022
28023 /*
28024diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28025index 8b96a94..792b410 100644
28026--- a/arch/x86/kernel/uprobes.c
28027+++ b/arch/x86/kernel/uprobes.c
28028@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28029 int ret = NOTIFY_DONE;
28030
28031 /* We are only interested in userspace traps */
28032- if (regs && !user_mode_vm(regs))
28033+ if (regs && !user_mode(regs))
28034 return NOTIFY_DONE;
28035
28036 switch (val) {
28037@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28038
28039 if (nleft != rasize) {
28040 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28041- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28042+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28043
28044 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28045 }
28046diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28047index b9242ba..50c5edd 100644
28048--- a/arch/x86/kernel/verify_cpu.S
28049+++ b/arch/x86/kernel/verify_cpu.S
28050@@ -20,6 +20,7 @@
28051 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28052 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28053 * arch/x86/kernel/head_32.S: processor startup
28054+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28055 *
28056 * verify_cpu, returns the status of longmode and SSE in register %eax.
28057 * 0: Success 1: Failure
28058diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28059index e8edcf5..27f9344 100644
28060--- a/arch/x86/kernel/vm86_32.c
28061+++ b/arch/x86/kernel/vm86_32.c
28062@@ -44,6 +44,7 @@
28063 #include <linux/ptrace.h>
28064 #include <linux/audit.h>
28065 #include <linux/stddef.h>
28066+#include <linux/grsecurity.h>
28067
28068 #include <asm/uaccess.h>
28069 #include <asm/io.h>
28070@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28071 do_exit(SIGSEGV);
28072 }
28073
28074- tss = &per_cpu(init_tss, get_cpu());
28075+ tss = init_tss + get_cpu();
28076 current->thread.sp0 = current->thread.saved_sp0;
28077 current->thread.sysenter_cs = __KERNEL_CS;
28078 load_sp0(tss, &current->thread);
28079@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28080
28081 if (tsk->thread.saved_sp0)
28082 return -EPERM;
28083+
28084+#ifdef CONFIG_GRKERNSEC_VM86
28085+ if (!capable(CAP_SYS_RAWIO)) {
28086+ gr_handle_vm86();
28087+ return -EPERM;
28088+ }
28089+#endif
28090+
28091 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28092 offsetof(struct kernel_vm86_struct, vm86plus) -
28093 sizeof(info.regs));
28094@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28095 int tmp;
28096 struct vm86plus_struct __user *v86;
28097
28098+#ifdef CONFIG_GRKERNSEC_VM86
28099+ if (!capable(CAP_SYS_RAWIO)) {
28100+ gr_handle_vm86();
28101+ return -EPERM;
28102+ }
28103+#endif
28104+
28105 tsk = current;
28106 switch (cmd) {
28107 case VM86_REQUEST_IRQ:
28108@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28109 tsk->thread.saved_fs = info->regs32->fs;
28110 tsk->thread.saved_gs = get_user_gs(info->regs32);
28111
28112- tss = &per_cpu(init_tss, get_cpu());
28113+ tss = init_tss + get_cpu();
28114 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28115 if (cpu_has_sep)
28116 tsk->thread.sysenter_cs = 0;
28117@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28118 goto cannot_handle;
28119 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28120 goto cannot_handle;
28121- intr_ptr = (unsigned long __user *) (i << 2);
28122+ intr_ptr = (__force unsigned long __user *) (i << 2);
28123 if (get_user(segoffs, intr_ptr))
28124 goto cannot_handle;
28125 if ((segoffs >> 16) == BIOSSEG)
28126diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28127index 00bf300..129df8e 100644
28128--- a/arch/x86/kernel/vmlinux.lds.S
28129+++ b/arch/x86/kernel/vmlinux.lds.S
28130@@ -26,6 +26,13 @@
28131 #include <asm/page_types.h>
28132 #include <asm/cache.h>
28133 #include <asm/boot.h>
28134+#include <asm/segment.h>
28135+
28136+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28137+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28138+#else
28139+#define __KERNEL_TEXT_OFFSET 0
28140+#endif
28141
28142 #undef i386 /* in case the preprocessor is a 32bit one */
28143
28144@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28145
28146 PHDRS {
28147 text PT_LOAD FLAGS(5); /* R_E */
28148+#ifdef CONFIG_X86_32
28149+ module PT_LOAD FLAGS(5); /* R_E */
28150+#endif
28151+#ifdef CONFIG_XEN
28152+ rodata PT_LOAD FLAGS(5); /* R_E */
28153+#else
28154+ rodata PT_LOAD FLAGS(4); /* R__ */
28155+#endif
28156 data PT_LOAD FLAGS(6); /* RW_ */
28157-#ifdef CONFIG_X86_64
28158+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28159 #ifdef CONFIG_SMP
28160 percpu PT_LOAD FLAGS(6); /* RW_ */
28161 #endif
28162+ text.init PT_LOAD FLAGS(5); /* R_E */
28163+ text.exit PT_LOAD FLAGS(5); /* R_E */
28164 init PT_LOAD FLAGS(7); /* RWE */
28165-#endif
28166 note PT_NOTE FLAGS(0); /* ___ */
28167 }
28168
28169 SECTIONS
28170 {
28171 #ifdef CONFIG_X86_32
28172- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28173- phys_startup_32 = startup_32 - LOAD_OFFSET;
28174+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28175 #else
28176- . = __START_KERNEL;
28177- phys_startup_64 = startup_64 - LOAD_OFFSET;
28178+ . = __START_KERNEL;
28179 #endif
28180
28181 /* Text and read-only data */
28182- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28183- _text = .;
28184+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28185 /* bootstrapping code */
28186+#ifdef CONFIG_X86_32
28187+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28188+#else
28189+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28190+#endif
28191+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28192+ _text = .;
28193 HEAD_TEXT
28194 . = ALIGN(8);
28195 _stext = .;
28196@@ -104,13 +124,47 @@ SECTIONS
28197 IRQENTRY_TEXT
28198 *(.fixup)
28199 *(.gnu.warning)
28200- /* End of text section */
28201- _etext = .;
28202 } :text = 0x9090
28203
28204- NOTES :text :note
28205+ . += __KERNEL_TEXT_OFFSET;
28206
28207- EXCEPTION_TABLE(16) :text = 0x9090
28208+#ifdef CONFIG_X86_32
28209+ . = ALIGN(PAGE_SIZE);
28210+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28211+
28212+#ifdef CONFIG_PAX_KERNEXEC
28213+ MODULES_EXEC_VADDR = .;
28214+ BYTE(0)
28215+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28216+ . = ALIGN(HPAGE_SIZE) - 1;
28217+ MODULES_EXEC_END = .;
28218+#endif
28219+
28220+ } :module
28221+#endif
28222+
28223+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28224+ /* End of text section */
28225+ BYTE(0)
28226+ _etext = . - __KERNEL_TEXT_OFFSET;
28227+ }
28228+
28229+#ifdef CONFIG_X86_32
28230+ . = ALIGN(PAGE_SIZE);
28231+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28232+ . = ALIGN(PAGE_SIZE);
28233+ *(.empty_zero_page)
28234+ *(.initial_pg_fixmap)
28235+ *(.initial_pg_pmd)
28236+ *(.initial_page_table)
28237+ *(.swapper_pg_dir)
28238+ } :rodata
28239+#endif
28240+
28241+ . = ALIGN(PAGE_SIZE);
28242+ NOTES :rodata :note
28243+
28244+ EXCEPTION_TABLE(16) :rodata
28245
28246 #if defined(CONFIG_DEBUG_RODATA)
28247 /* .text should occupy whole number of pages */
28248@@ -122,16 +176,20 @@ SECTIONS
28249
28250 /* Data */
28251 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28252+
28253+#ifdef CONFIG_PAX_KERNEXEC
28254+ . = ALIGN(HPAGE_SIZE);
28255+#else
28256+ . = ALIGN(PAGE_SIZE);
28257+#endif
28258+
28259 /* Start of data section */
28260 _sdata = .;
28261
28262 /* init_task */
28263 INIT_TASK_DATA(THREAD_SIZE)
28264
28265-#ifdef CONFIG_X86_32
28266- /* 32 bit has nosave before _edata */
28267 NOSAVE_DATA
28268-#endif
28269
28270 PAGE_ALIGNED_DATA(PAGE_SIZE)
28271
28272@@ -174,12 +232,19 @@ SECTIONS
28273 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28274
28275 /* Init code and data - will be freed after init */
28276- . = ALIGN(PAGE_SIZE);
28277 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28278+ BYTE(0)
28279+
28280+#ifdef CONFIG_PAX_KERNEXEC
28281+ . = ALIGN(HPAGE_SIZE);
28282+#else
28283+ . = ALIGN(PAGE_SIZE);
28284+#endif
28285+
28286 __init_begin = .; /* paired with __init_end */
28287- }
28288+ } :init.begin
28289
28290-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28291+#ifdef CONFIG_SMP
28292 /*
28293 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28294 * output PHDR, so the next output section - .init.text - should
28295@@ -190,12 +255,27 @@ SECTIONS
28296 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28297 #endif
28298
28299- INIT_TEXT_SECTION(PAGE_SIZE)
28300-#ifdef CONFIG_X86_64
28301- :init
28302-#endif
28303+ . = ALIGN(PAGE_SIZE);
28304+ init_begin = .;
28305+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28306+ VMLINUX_SYMBOL(_sinittext) = .;
28307+ INIT_TEXT
28308+ . = ALIGN(PAGE_SIZE);
28309+ } :text.init
28310
28311- INIT_DATA_SECTION(16)
28312+ /*
28313+ * .exit.text is discard at runtime, not link time, to deal with
28314+ * references from .altinstructions and .eh_frame
28315+ */
28316+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28317+ EXIT_TEXT
28318+ VMLINUX_SYMBOL(_einittext) = .;
28319+ . = ALIGN(16);
28320+ } :text.exit
28321+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28322+
28323+ . = ALIGN(PAGE_SIZE);
28324+ INIT_DATA_SECTION(16) :init
28325
28326 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28327 __x86_cpu_dev_start = .;
28328@@ -266,19 +346,12 @@ SECTIONS
28329 }
28330
28331 . = ALIGN(8);
28332- /*
28333- * .exit.text is discard at runtime, not link time, to deal with
28334- * references from .altinstructions and .eh_frame
28335- */
28336- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28337- EXIT_TEXT
28338- }
28339
28340 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28341 EXIT_DATA
28342 }
28343
28344-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28345+#ifndef CONFIG_SMP
28346 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28347 #endif
28348
28349@@ -297,16 +370,10 @@ SECTIONS
28350 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28351 __smp_locks = .;
28352 *(.smp_locks)
28353- . = ALIGN(PAGE_SIZE);
28354 __smp_locks_end = .;
28355+ . = ALIGN(PAGE_SIZE);
28356 }
28357
28358-#ifdef CONFIG_X86_64
28359- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28360- NOSAVE_DATA
28361- }
28362-#endif
28363-
28364 /* BSS */
28365 . = ALIGN(PAGE_SIZE);
28366 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28367@@ -322,6 +389,7 @@ SECTIONS
28368 __brk_base = .;
28369 . += 64 * 1024; /* 64k alignment slop space */
28370 *(.brk_reservation) /* areas brk users have reserved */
28371+ . = ALIGN(HPAGE_SIZE);
28372 __brk_limit = .;
28373 }
28374
28375@@ -348,13 +416,12 @@ SECTIONS
28376 * for the boot processor.
28377 */
28378 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28379-INIT_PER_CPU(gdt_page);
28380 INIT_PER_CPU(irq_stack_union);
28381
28382 /*
28383 * Build-time check on the image size:
28384 */
28385-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28386+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28387 "kernel image bigger than KERNEL_IMAGE_SIZE");
28388
28389 #ifdef CONFIG_SMP
28390diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28391index 2dcc6ff..082dc7a 100644
28392--- a/arch/x86/kernel/vsyscall_64.c
28393+++ b/arch/x86/kernel/vsyscall_64.c
28394@@ -38,15 +38,13 @@
28395 #define CREATE_TRACE_POINTS
28396 #include "vsyscall_trace.h"
28397
28398-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28399+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28400
28401 static int __init vsyscall_setup(char *str)
28402 {
28403 if (str) {
28404 if (!strcmp("emulate", str))
28405 vsyscall_mode = EMULATE;
28406- else if (!strcmp("native", str))
28407- vsyscall_mode = NATIVE;
28408 else if (!strcmp("none", str))
28409 vsyscall_mode = NONE;
28410 else
28411@@ -264,8 +262,7 @@ do_ret:
28412 return true;
28413
28414 sigsegv:
28415- force_sig(SIGSEGV, current);
28416- return true;
28417+ do_group_exit(SIGKILL);
28418 }
28419
28420 /*
28421@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28422 static struct vm_area_struct gate_vma = {
28423 .vm_start = VSYSCALL_ADDR,
28424 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28425- .vm_page_prot = PAGE_READONLY_EXEC,
28426- .vm_flags = VM_READ | VM_EXEC,
28427+ .vm_page_prot = PAGE_READONLY,
28428+ .vm_flags = VM_READ,
28429 .vm_ops = &gate_vma_ops,
28430 };
28431
28432@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28433 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28434
28435 if (vsyscall_mode != NONE)
28436- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28437- vsyscall_mode == NATIVE
28438- ? PAGE_KERNEL_VSYSCALL
28439- : PAGE_KERNEL_VVAR);
28440+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28441
28442 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28443 (unsigned long)VSYSCALL_ADDR);
28444diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28445index 04068192..4d75aa6 100644
28446--- a/arch/x86/kernel/x8664_ksyms_64.c
28447+++ b/arch/x86/kernel/x8664_ksyms_64.c
28448@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28449 EXPORT_SYMBOL(copy_user_generic_unrolled);
28450 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28451 EXPORT_SYMBOL(__copy_user_nocache);
28452-EXPORT_SYMBOL(_copy_from_user);
28453-EXPORT_SYMBOL(_copy_to_user);
28454
28455 EXPORT_SYMBOL(copy_page);
28456 EXPORT_SYMBOL(clear_page);
28457@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28458 EXPORT_SYMBOL(___preempt_schedule_context);
28459 #endif
28460 #endif
28461+
28462+#ifdef CONFIG_PAX_PER_CPU_PGD
28463+EXPORT_SYMBOL(cpu_pgd);
28464+#endif
28465diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28466index 234b072..b7ab191 100644
28467--- a/arch/x86/kernel/x86_init.c
28468+++ b/arch/x86/kernel/x86_init.c
28469@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28470 static void default_nmi_init(void) { };
28471 static int default_i8042_detect(void) { return 1; };
28472
28473-struct x86_platform_ops x86_platform = {
28474+struct x86_platform_ops x86_platform __read_only = {
28475 .calibrate_tsc = native_calibrate_tsc,
28476 .get_wallclock = mach_get_cmos_time,
28477 .set_wallclock = mach_set_rtc_mmss,
28478@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28479 EXPORT_SYMBOL_GPL(x86_platform);
28480
28481 #if defined(CONFIG_PCI_MSI)
28482-struct x86_msi_ops x86_msi = {
28483+struct x86_msi_ops x86_msi __read_only = {
28484 .setup_msi_irqs = native_setup_msi_irqs,
28485 .compose_msi_msg = native_compose_msi_msg,
28486 .teardown_msi_irq = native_teardown_msi_irq,
28487@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28488 }
28489 #endif
28490
28491-struct x86_io_apic_ops x86_io_apic_ops = {
28492+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28493 .init = native_io_apic_init_mappings,
28494 .read = native_io_apic_read,
28495 .write = native_io_apic_write,
28496diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28497index 0de1fae..298d037 100644
28498--- a/arch/x86/kernel/xsave.c
28499+++ b/arch/x86/kernel/xsave.c
28500@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28501
28502 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28503 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28504- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28505+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28506
28507 if (!use_xsave())
28508 return err;
28509
28510- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28511+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28512
28513 /*
28514 * Read the xstate_bv which we copied (directly from the cpu or
28515 * from the state in task struct) to the user buffers.
28516 */
28517- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28518+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28519
28520 /*
28521 * For legacy compatible, we always set FP/SSE bits in the bit
28522@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28523 */
28524 xstate_bv |= XSTATE_FPSSE;
28525
28526- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28527+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28528
28529 return err;
28530 }
28531@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28532 {
28533 int err;
28534
28535+ buf = (struct xsave_struct __user *)____m(buf);
28536 if (use_xsave())
28537 err = xsave_user(buf);
28538 else if (use_fxsr())
28539@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28540 */
28541 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28542 {
28543+ buf = (void __user *)____m(buf);
28544 if (use_xsave()) {
28545 if ((unsigned long)buf % 64 || fx_only) {
28546 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28547diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28548index 8a80737..bac4961 100644
28549--- a/arch/x86/kvm/cpuid.c
28550+++ b/arch/x86/kvm/cpuid.c
28551@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28552 struct kvm_cpuid2 *cpuid,
28553 struct kvm_cpuid_entry2 __user *entries)
28554 {
28555- int r;
28556+ int r, i;
28557
28558 r = -E2BIG;
28559 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28560 goto out;
28561 r = -EFAULT;
28562- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28563- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28564+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28565 goto out;
28566+ for (i = 0; i < cpuid->nent; ++i) {
28567+ struct kvm_cpuid_entry2 cpuid_entry;
28568+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28569+ goto out;
28570+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28571+ }
28572 vcpu->arch.cpuid_nent = cpuid->nent;
28573 kvm_apic_set_version(vcpu);
28574 kvm_x86_ops->cpuid_update(vcpu);
28575@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28576 struct kvm_cpuid2 *cpuid,
28577 struct kvm_cpuid_entry2 __user *entries)
28578 {
28579- int r;
28580+ int r, i;
28581
28582 r = -E2BIG;
28583 if (cpuid->nent < vcpu->arch.cpuid_nent)
28584 goto out;
28585 r = -EFAULT;
28586- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28587- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28588+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28589 goto out;
28590+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28591+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28592+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28593+ goto out;
28594+ }
28595 return 0;
28596
28597 out:
28598diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28599index de12c1d..4031e2a 100644
28600--- a/arch/x86/kvm/emulate.c
28601+++ b/arch/x86/kvm/emulate.c
28602@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28603 int cr = ctxt->modrm_reg;
28604 u64 efer = 0;
28605
28606- static u64 cr_reserved_bits[] = {
28607+ static const u64 cr_reserved_bits[] = {
28608 0xffffffff00000000ULL,
28609 0, 0, 0, /* CR3 checked later */
28610 CR4_RESERVED_BITS,
28611diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28612index d52dcf0..cec7e84 100644
28613--- a/arch/x86/kvm/lapic.c
28614+++ b/arch/x86/kvm/lapic.c
28615@@ -55,7 +55,7 @@
28616 #define APIC_BUS_CYCLE_NS 1
28617
28618 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28619-#define apic_debug(fmt, arg...)
28620+#define apic_debug(fmt, arg...) do {} while (0)
28621
28622 #define APIC_LVT_NUM 6
28623 /* 14 is the version for Xeon and Pentium 8.4.8*/
28624diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28625index fd49c86..77e1aa0 100644
28626--- a/arch/x86/kvm/paging_tmpl.h
28627+++ b/arch/x86/kvm/paging_tmpl.h
28628@@ -343,7 +343,7 @@ retry_walk:
28629 if (unlikely(kvm_is_error_hva(host_addr)))
28630 goto error;
28631
28632- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28633+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28634 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28635 goto error;
28636 walker->ptep_user[walker->level - 1] = ptep_user;
28637diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28638index 41dd038..de331cf 100644
28639--- a/arch/x86/kvm/svm.c
28640+++ b/arch/x86/kvm/svm.c
28641@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28642 int cpu = raw_smp_processor_id();
28643
28644 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28645+
28646+ pax_open_kernel();
28647 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28648+ pax_close_kernel();
28649+
28650 load_TR_desc();
28651 }
28652
28653@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28654 #endif
28655 #endif
28656
28657+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28658+ __set_fs(current_thread_info()->addr_limit);
28659+#endif
28660+
28661 reload_tss(vcpu);
28662
28663 local_irq_disable();
28664diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28665index d4c58d8..eaf2568 100644
28666--- a/arch/x86/kvm/vmx.c
28667+++ b/arch/x86/kvm/vmx.c
28668@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28669 #endif
28670 }
28671
28672-static void vmcs_clear_bits(unsigned long field, u32 mask)
28673+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28674 {
28675 vmcs_writel(field, vmcs_readl(field) & ~mask);
28676 }
28677
28678-static void vmcs_set_bits(unsigned long field, u32 mask)
28679+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28680 {
28681 vmcs_writel(field, vmcs_readl(field) | mask);
28682 }
28683@@ -1645,7 +1645,11 @@ static void reload_tss(void)
28684 struct desc_struct *descs;
28685
28686 descs = (void *)gdt->address;
28687+
28688+ pax_open_kernel();
28689 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28690+ pax_close_kernel();
28691+
28692 load_TR_desc();
28693 }
28694
28695@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28696 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28697 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28698
28699+#ifdef CONFIG_PAX_PER_CPU_PGD
28700+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28701+#endif
28702+
28703 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28704 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28705 vmx->loaded_vmcs->cpu = cpu;
28706@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28707 * reads and returns guest's timestamp counter "register"
28708 * guest_tsc = host_tsc + tsc_offset -- 21.3
28709 */
28710-static u64 guest_read_tsc(void)
28711+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28712 {
28713 u64 host_tsc, tsc_offset;
28714
28715@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28716 unsigned long cr4;
28717
28718 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28719+
28720+#ifndef CONFIG_PAX_PER_CPU_PGD
28721 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28722+#endif
28723
28724 /* Save the most likely value for this task's CR4 in the VMCS. */
28725 cr4 = read_cr4();
28726@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28727 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28728 vmx->host_idt_base = dt.address;
28729
28730- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28731+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28732
28733 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28734 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28735@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
28736 * page upon invalidation. No need to do anything if the
28737 * processor does not have the APIC_ACCESS_ADDR VMCS field.
28738 */
28739- kvm_x86_ops->set_apic_access_page_addr = NULL;
28740+ pax_open_kernel();
28741+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28742+ pax_close_kernel();
28743 }
28744
28745- if (!cpu_has_vmx_tpr_shadow())
28746- kvm_x86_ops->update_cr8_intercept = NULL;
28747+ if (!cpu_has_vmx_tpr_shadow()) {
28748+ pax_open_kernel();
28749+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28750+ pax_close_kernel();
28751+ }
28752
28753 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28754 kvm_disable_largepages();
28755@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
28756 if (!cpu_has_vmx_apicv())
28757 enable_apicv = 0;
28758
28759+ pax_open_kernel();
28760 if (enable_apicv)
28761- kvm_x86_ops->update_cr8_intercept = NULL;
28762+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28763 else {
28764- kvm_x86_ops->hwapic_irr_update = NULL;
28765- kvm_x86_ops->deliver_posted_interrupt = NULL;
28766- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28767+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28768+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28769+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28770 }
28771+ pax_close_kernel();
28772
28773 if (nested)
28774 nested_vmx_setup_ctls_msrs();
28775@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28776 "jmp 2f \n\t"
28777 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28778 "2: "
28779+
28780+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28781+ "ljmp %[cs],$3f\n\t"
28782+ "3: "
28783+#endif
28784+
28785 /* Save guest registers, load host registers, keep flags */
28786 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28787 "pop %0 \n\t"
28788@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28789 #endif
28790 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28791 [wordsize]"i"(sizeof(ulong))
28792+
28793+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28794+ ,[cs]"i"(__KERNEL_CS)
28795+#endif
28796+
28797 : "cc", "memory"
28798 #ifdef CONFIG_X86_64
28799 , "rax", "rbx", "rdi", "rsi"
28800@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28801 if (debugctlmsr)
28802 update_debugctlmsr(debugctlmsr);
28803
28804-#ifndef CONFIG_X86_64
28805+#ifdef CONFIG_X86_32
28806 /*
28807 * The sysexit path does not restore ds/es, so we must set them to
28808 * a reasonable value ourselves.
28809@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28810 * may be executed in interrupt context, which saves and restore segments
28811 * around it, nullifying its effect.
28812 */
28813- loadsegment(ds, __USER_DS);
28814- loadsegment(es, __USER_DS);
28815+ loadsegment(ds, __KERNEL_DS);
28816+ loadsegment(es, __KERNEL_DS);
28817+ loadsegment(ss, __KERNEL_DS);
28818+
28819+#ifdef CONFIG_PAX_KERNEXEC
28820+ loadsegment(fs, __KERNEL_PERCPU);
28821+#endif
28822+
28823+#ifdef CONFIG_PAX_MEMORY_UDEREF
28824+ __set_fs(current_thread_info()->addr_limit);
28825+#endif
28826+
28827 #endif
28828
28829 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28830diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28831index c259814..9a0345b 100644
28832--- a/arch/x86/kvm/x86.c
28833+++ b/arch/x86/kvm/x86.c
28834@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28835 {
28836 struct kvm *kvm = vcpu->kvm;
28837 int lm = is_long_mode(vcpu);
28838- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28839- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28840+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28841+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28842 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28843 : kvm->arch.xen_hvm_config.blob_size_32;
28844 u32 page_num = data & ~PAGE_MASK;
28845@@ -2810,6 +2810,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28846 if (n < msr_list.nmsrs)
28847 goto out;
28848 r = -EFAULT;
28849+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28850+ goto out;
28851 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28852 num_msrs_to_save * sizeof(u32)))
28853 goto out;
28854@@ -5746,7 +5748,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28855 };
28856 #endif
28857
28858-int kvm_arch_init(void *opaque)
28859+int kvm_arch_init(const void *opaque)
28860 {
28861 int r;
28862 struct kvm_x86_ops *ops = opaque;
28863diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28864index c1c1544..f90c9d5 100644
28865--- a/arch/x86/lguest/boot.c
28866+++ b/arch/x86/lguest/boot.c
28867@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28868 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28869 * Launcher to reboot us.
28870 */
28871-static void lguest_restart(char *reason)
28872+static __noreturn void lguest_restart(char *reason)
28873 {
28874 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28875+ BUG();
28876 }
28877
28878 /*G:050
28879diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28880index 00933d5..3a64af9 100644
28881--- a/arch/x86/lib/atomic64_386_32.S
28882+++ b/arch/x86/lib/atomic64_386_32.S
28883@@ -48,6 +48,10 @@ BEGIN(read)
28884 movl (v), %eax
28885 movl 4(v), %edx
28886 RET_ENDP
28887+BEGIN(read_unchecked)
28888+ movl (v), %eax
28889+ movl 4(v), %edx
28890+RET_ENDP
28891 #undef v
28892
28893 #define v %esi
28894@@ -55,6 +59,10 @@ BEGIN(set)
28895 movl %ebx, (v)
28896 movl %ecx, 4(v)
28897 RET_ENDP
28898+BEGIN(set_unchecked)
28899+ movl %ebx, (v)
28900+ movl %ecx, 4(v)
28901+RET_ENDP
28902 #undef v
28903
28904 #define v %esi
28905@@ -70,6 +78,20 @@ RET_ENDP
28906 BEGIN(add)
28907 addl %eax, (v)
28908 adcl %edx, 4(v)
28909+
28910+#ifdef CONFIG_PAX_REFCOUNT
28911+ jno 0f
28912+ subl %eax, (v)
28913+ sbbl %edx, 4(v)
28914+ int $4
28915+0:
28916+ _ASM_EXTABLE(0b, 0b)
28917+#endif
28918+
28919+RET_ENDP
28920+BEGIN(add_unchecked)
28921+ addl %eax, (v)
28922+ adcl %edx, 4(v)
28923 RET_ENDP
28924 #undef v
28925
28926@@ -77,6 +99,24 @@ RET_ENDP
28927 BEGIN(add_return)
28928 addl (v), %eax
28929 adcl 4(v), %edx
28930+
28931+#ifdef CONFIG_PAX_REFCOUNT
28932+ into
28933+1234:
28934+ _ASM_EXTABLE(1234b, 2f)
28935+#endif
28936+
28937+ movl %eax, (v)
28938+ movl %edx, 4(v)
28939+
28940+#ifdef CONFIG_PAX_REFCOUNT
28941+2:
28942+#endif
28943+
28944+RET_ENDP
28945+BEGIN(add_return_unchecked)
28946+ addl (v), %eax
28947+ adcl 4(v), %edx
28948 movl %eax, (v)
28949 movl %edx, 4(v)
28950 RET_ENDP
28951@@ -86,6 +126,20 @@ RET_ENDP
28952 BEGIN(sub)
28953 subl %eax, (v)
28954 sbbl %edx, 4(v)
28955+
28956+#ifdef CONFIG_PAX_REFCOUNT
28957+ jno 0f
28958+ addl %eax, (v)
28959+ adcl %edx, 4(v)
28960+ int $4
28961+0:
28962+ _ASM_EXTABLE(0b, 0b)
28963+#endif
28964+
28965+RET_ENDP
28966+BEGIN(sub_unchecked)
28967+ subl %eax, (v)
28968+ sbbl %edx, 4(v)
28969 RET_ENDP
28970 #undef v
28971
28972@@ -96,6 +150,27 @@ BEGIN(sub_return)
28973 sbbl $0, %edx
28974 addl (v), %eax
28975 adcl 4(v), %edx
28976+
28977+#ifdef CONFIG_PAX_REFCOUNT
28978+ into
28979+1234:
28980+ _ASM_EXTABLE(1234b, 2f)
28981+#endif
28982+
28983+ movl %eax, (v)
28984+ movl %edx, 4(v)
28985+
28986+#ifdef CONFIG_PAX_REFCOUNT
28987+2:
28988+#endif
28989+
28990+RET_ENDP
28991+BEGIN(sub_return_unchecked)
28992+ negl %edx
28993+ negl %eax
28994+ sbbl $0, %edx
28995+ addl (v), %eax
28996+ adcl 4(v), %edx
28997 movl %eax, (v)
28998 movl %edx, 4(v)
28999 RET_ENDP
29000@@ -105,6 +180,20 @@ RET_ENDP
29001 BEGIN(inc)
29002 addl $1, (v)
29003 adcl $0, 4(v)
29004+
29005+#ifdef CONFIG_PAX_REFCOUNT
29006+ jno 0f
29007+ subl $1, (v)
29008+ sbbl $0, 4(v)
29009+ int $4
29010+0:
29011+ _ASM_EXTABLE(0b, 0b)
29012+#endif
29013+
29014+RET_ENDP
29015+BEGIN(inc_unchecked)
29016+ addl $1, (v)
29017+ adcl $0, 4(v)
29018 RET_ENDP
29019 #undef v
29020
29021@@ -114,6 +203,26 @@ BEGIN(inc_return)
29022 movl 4(v), %edx
29023 addl $1, %eax
29024 adcl $0, %edx
29025+
29026+#ifdef CONFIG_PAX_REFCOUNT
29027+ into
29028+1234:
29029+ _ASM_EXTABLE(1234b, 2f)
29030+#endif
29031+
29032+ movl %eax, (v)
29033+ movl %edx, 4(v)
29034+
29035+#ifdef CONFIG_PAX_REFCOUNT
29036+2:
29037+#endif
29038+
29039+RET_ENDP
29040+BEGIN(inc_return_unchecked)
29041+ movl (v), %eax
29042+ movl 4(v), %edx
29043+ addl $1, %eax
29044+ adcl $0, %edx
29045 movl %eax, (v)
29046 movl %edx, 4(v)
29047 RET_ENDP
29048@@ -123,6 +232,20 @@ RET_ENDP
29049 BEGIN(dec)
29050 subl $1, (v)
29051 sbbl $0, 4(v)
29052+
29053+#ifdef CONFIG_PAX_REFCOUNT
29054+ jno 0f
29055+ addl $1, (v)
29056+ adcl $0, 4(v)
29057+ int $4
29058+0:
29059+ _ASM_EXTABLE(0b, 0b)
29060+#endif
29061+
29062+RET_ENDP
29063+BEGIN(dec_unchecked)
29064+ subl $1, (v)
29065+ sbbl $0, 4(v)
29066 RET_ENDP
29067 #undef v
29068
29069@@ -132,6 +255,26 @@ BEGIN(dec_return)
29070 movl 4(v), %edx
29071 subl $1, %eax
29072 sbbl $0, %edx
29073+
29074+#ifdef CONFIG_PAX_REFCOUNT
29075+ into
29076+1234:
29077+ _ASM_EXTABLE(1234b, 2f)
29078+#endif
29079+
29080+ movl %eax, (v)
29081+ movl %edx, 4(v)
29082+
29083+#ifdef CONFIG_PAX_REFCOUNT
29084+2:
29085+#endif
29086+
29087+RET_ENDP
29088+BEGIN(dec_return_unchecked)
29089+ movl (v), %eax
29090+ movl 4(v), %edx
29091+ subl $1, %eax
29092+ sbbl $0, %edx
29093 movl %eax, (v)
29094 movl %edx, 4(v)
29095 RET_ENDP
29096@@ -143,6 +286,13 @@ BEGIN(add_unless)
29097 adcl %edx, %edi
29098 addl (v), %eax
29099 adcl 4(v), %edx
29100+
29101+#ifdef CONFIG_PAX_REFCOUNT
29102+ into
29103+1234:
29104+ _ASM_EXTABLE(1234b, 2f)
29105+#endif
29106+
29107 cmpl %eax, %ecx
29108 je 3f
29109 1:
29110@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29111 1:
29112 addl $1, %eax
29113 adcl $0, %edx
29114+
29115+#ifdef CONFIG_PAX_REFCOUNT
29116+ into
29117+1234:
29118+ _ASM_EXTABLE(1234b, 2f)
29119+#endif
29120+
29121 movl %eax, (v)
29122 movl %edx, 4(v)
29123 movl $1, %eax
29124@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29125 movl 4(v), %edx
29126 subl $1, %eax
29127 sbbl $0, %edx
29128+
29129+#ifdef CONFIG_PAX_REFCOUNT
29130+ into
29131+1234:
29132+ _ASM_EXTABLE(1234b, 1f)
29133+#endif
29134+
29135 js 1f
29136 movl %eax, (v)
29137 movl %edx, 4(v)
29138diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29139index f5cc9eb..51fa319 100644
29140--- a/arch/x86/lib/atomic64_cx8_32.S
29141+++ b/arch/x86/lib/atomic64_cx8_32.S
29142@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29143 CFI_STARTPROC
29144
29145 read64 %ecx
29146+ pax_force_retaddr
29147 ret
29148 CFI_ENDPROC
29149 ENDPROC(atomic64_read_cx8)
29150
29151+ENTRY(atomic64_read_unchecked_cx8)
29152+ CFI_STARTPROC
29153+
29154+ read64 %ecx
29155+ pax_force_retaddr
29156+ ret
29157+ CFI_ENDPROC
29158+ENDPROC(atomic64_read_unchecked_cx8)
29159+
29160 ENTRY(atomic64_set_cx8)
29161 CFI_STARTPROC
29162
29163@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29164 cmpxchg8b (%esi)
29165 jne 1b
29166
29167+ pax_force_retaddr
29168 ret
29169 CFI_ENDPROC
29170 ENDPROC(atomic64_set_cx8)
29171
29172+ENTRY(atomic64_set_unchecked_cx8)
29173+ CFI_STARTPROC
29174+
29175+1:
29176+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29177+ * are atomic on 586 and newer */
29178+ cmpxchg8b (%esi)
29179+ jne 1b
29180+
29181+ pax_force_retaddr
29182+ ret
29183+ CFI_ENDPROC
29184+ENDPROC(atomic64_set_unchecked_cx8)
29185+
29186 ENTRY(atomic64_xchg_cx8)
29187 CFI_STARTPROC
29188
29189@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29190 cmpxchg8b (%esi)
29191 jne 1b
29192
29193+ pax_force_retaddr
29194 ret
29195 CFI_ENDPROC
29196 ENDPROC(atomic64_xchg_cx8)
29197
29198-.macro addsub_return func ins insc
29199-ENTRY(atomic64_\func\()_return_cx8)
29200+.macro addsub_return func ins insc unchecked=""
29201+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29202 CFI_STARTPROC
29203 SAVE ebp
29204 SAVE ebx
29205@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29206 movl %edx, %ecx
29207 \ins\()l %esi, %ebx
29208 \insc\()l %edi, %ecx
29209+
29210+.ifb \unchecked
29211+#ifdef CONFIG_PAX_REFCOUNT
29212+ into
29213+2:
29214+ _ASM_EXTABLE(2b, 3f)
29215+#endif
29216+.endif
29217+
29218 LOCK_PREFIX
29219 cmpxchg8b (%ebp)
29220 jne 1b
29221-
29222-10:
29223 movl %ebx, %eax
29224 movl %ecx, %edx
29225+
29226+.ifb \unchecked
29227+#ifdef CONFIG_PAX_REFCOUNT
29228+3:
29229+#endif
29230+.endif
29231+
29232 RESTORE edi
29233 RESTORE esi
29234 RESTORE ebx
29235 RESTORE ebp
29236+ pax_force_retaddr
29237 ret
29238 CFI_ENDPROC
29239-ENDPROC(atomic64_\func\()_return_cx8)
29240+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29241 .endm
29242
29243 addsub_return add add adc
29244 addsub_return sub sub sbb
29245+addsub_return add add adc _unchecked
29246+addsub_return sub sub sbb _unchecked
29247
29248-.macro incdec_return func ins insc
29249-ENTRY(atomic64_\func\()_return_cx8)
29250+.macro incdec_return func ins insc unchecked=""
29251+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29252 CFI_STARTPROC
29253 SAVE ebx
29254
29255@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29256 movl %edx, %ecx
29257 \ins\()l $1, %ebx
29258 \insc\()l $0, %ecx
29259+
29260+.ifb \unchecked
29261+#ifdef CONFIG_PAX_REFCOUNT
29262+ into
29263+2:
29264+ _ASM_EXTABLE(2b, 3f)
29265+#endif
29266+.endif
29267+
29268 LOCK_PREFIX
29269 cmpxchg8b (%esi)
29270 jne 1b
29271
29272-10:
29273 movl %ebx, %eax
29274 movl %ecx, %edx
29275+
29276+.ifb \unchecked
29277+#ifdef CONFIG_PAX_REFCOUNT
29278+3:
29279+#endif
29280+.endif
29281+
29282 RESTORE ebx
29283+ pax_force_retaddr
29284 ret
29285 CFI_ENDPROC
29286-ENDPROC(atomic64_\func\()_return_cx8)
29287+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29288 .endm
29289
29290 incdec_return inc add adc
29291 incdec_return dec sub sbb
29292+incdec_return inc add adc _unchecked
29293+incdec_return dec sub sbb _unchecked
29294
29295 ENTRY(atomic64_dec_if_positive_cx8)
29296 CFI_STARTPROC
29297@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29298 movl %edx, %ecx
29299 subl $1, %ebx
29300 sbb $0, %ecx
29301+
29302+#ifdef CONFIG_PAX_REFCOUNT
29303+ into
29304+1234:
29305+ _ASM_EXTABLE(1234b, 2f)
29306+#endif
29307+
29308 js 2f
29309 LOCK_PREFIX
29310 cmpxchg8b (%esi)
29311@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29312 movl %ebx, %eax
29313 movl %ecx, %edx
29314 RESTORE ebx
29315+ pax_force_retaddr
29316 ret
29317 CFI_ENDPROC
29318 ENDPROC(atomic64_dec_if_positive_cx8)
29319@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29320 movl %edx, %ecx
29321 addl %ebp, %ebx
29322 adcl %edi, %ecx
29323+
29324+#ifdef CONFIG_PAX_REFCOUNT
29325+ into
29326+1234:
29327+ _ASM_EXTABLE(1234b, 3f)
29328+#endif
29329+
29330 LOCK_PREFIX
29331 cmpxchg8b (%esi)
29332 jne 1b
29333@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29334 CFI_ADJUST_CFA_OFFSET -8
29335 RESTORE ebx
29336 RESTORE ebp
29337+ pax_force_retaddr
29338 ret
29339 4:
29340 cmpl %edx, 4(%esp)
29341@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29342 xorl %ecx, %ecx
29343 addl $1, %ebx
29344 adcl %edx, %ecx
29345+
29346+#ifdef CONFIG_PAX_REFCOUNT
29347+ into
29348+1234:
29349+ _ASM_EXTABLE(1234b, 3f)
29350+#endif
29351+
29352 LOCK_PREFIX
29353 cmpxchg8b (%esi)
29354 jne 1b
29355@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29356 movl $1, %eax
29357 3:
29358 RESTORE ebx
29359+ pax_force_retaddr
29360 ret
29361 CFI_ENDPROC
29362 ENDPROC(atomic64_inc_not_zero_cx8)
29363diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29364index e78b8eee..7e173a8 100644
29365--- a/arch/x86/lib/checksum_32.S
29366+++ b/arch/x86/lib/checksum_32.S
29367@@ -29,7 +29,8 @@
29368 #include <asm/dwarf2.h>
29369 #include <asm/errno.h>
29370 #include <asm/asm.h>
29371-
29372+#include <asm/segment.h>
29373+
29374 /*
29375 * computes a partial checksum, e.g. for TCP/UDP fragments
29376 */
29377@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29378
29379 #define ARGBASE 16
29380 #define FP 12
29381-
29382-ENTRY(csum_partial_copy_generic)
29383+
29384+ENTRY(csum_partial_copy_generic_to_user)
29385 CFI_STARTPROC
29386+
29387+#ifdef CONFIG_PAX_MEMORY_UDEREF
29388+ pushl_cfi %gs
29389+ popl_cfi %es
29390+ jmp csum_partial_copy_generic
29391+#endif
29392+
29393+ENTRY(csum_partial_copy_generic_from_user)
29394+
29395+#ifdef CONFIG_PAX_MEMORY_UDEREF
29396+ pushl_cfi %gs
29397+ popl_cfi %ds
29398+#endif
29399+
29400+ENTRY(csum_partial_copy_generic)
29401 subl $4,%esp
29402 CFI_ADJUST_CFA_OFFSET 4
29403 pushl_cfi %edi
29404@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29405 jmp 4f
29406 SRC(1: movw (%esi), %bx )
29407 addl $2, %esi
29408-DST( movw %bx, (%edi) )
29409+DST( movw %bx, %es:(%edi) )
29410 addl $2, %edi
29411 addw %bx, %ax
29412 adcl $0, %eax
29413@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29414 SRC(1: movl (%esi), %ebx )
29415 SRC( movl 4(%esi), %edx )
29416 adcl %ebx, %eax
29417-DST( movl %ebx, (%edi) )
29418+DST( movl %ebx, %es:(%edi) )
29419 adcl %edx, %eax
29420-DST( movl %edx, 4(%edi) )
29421+DST( movl %edx, %es:4(%edi) )
29422
29423 SRC( movl 8(%esi), %ebx )
29424 SRC( movl 12(%esi), %edx )
29425 adcl %ebx, %eax
29426-DST( movl %ebx, 8(%edi) )
29427+DST( movl %ebx, %es:8(%edi) )
29428 adcl %edx, %eax
29429-DST( movl %edx, 12(%edi) )
29430+DST( movl %edx, %es:12(%edi) )
29431
29432 SRC( movl 16(%esi), %ebx )
29433 SRC( movl 20(%esi), %edx )
29434 adcl %ebx, %eax
29435-DST( movl %ebx, 16(%edi) )
29436+DST( movl %ebx, %es:16(%edi) )
29437 adcl %edx, %eax
29438-DST( movl %edx, 20(%edi) )
29439+DST( movl %edx, %es:20(%edi) )
29440
29441 SRC( movl 24(%esi), %ebx )
29442 SRC( movl 28(%esi), %edx )
29443 adcl %ebx, %eax
29444-DST( movl %ebx, 24(%edi) )
29445+DST( movl %ebx, %es:24(%edi) )
29446 adcl %edx, %eax
29447-DST( movl %edx, 28(%edi) )
29448+DST( movl %edx, %es:28(%edi) )
29449
29450 lea 32(%esi), %esi
29451 lea 32(%edi), %edi
29452@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29453 shrl $2, %edx # This clears CF
29454 SRC(3: movl (%esi), %ebx )
29455 adcl %ebx, %eax
29456-DST( movl %ebx, (%edi) )
29457+DST( movl %ebx, %es:(%edi) )
29458 lea 4(%esi), %esi
29459 lea 4(%edi), %edi
29460 dec %edx
29461@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29462 jb 5f
29463 SRC( movw (%esi), %cx )
29464 leal 2(%esi), %esi
29465-DST( movw %cx, (%edi) )
29466+DST( movw %cx, %es:(%edi) )
29467 leal 2(%edi), %edi
29468 je 6f
29469 shll $16,%ecx
29470 SRC(5: movb (%esi), %cl )
29471-DST( movb %cl, (%edi) )
29472+DST( movb %cl, %es:(%edi) )
29473 6: addl %ecx, %eax
29474 adcl $0, %eax
29475 7:
29476@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29477
29478 6001:
29479 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29480- movl $-EFAULT, (%ebx)
29481+ movl $-EFAULT, %ss:(%ebx)
29482
29483 # zero the complete destination - computing the rest
29484 # is too much work
29485@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29486
29487 6002:
29488 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29489- movl $-EFAULT,(%ebx)
29490+ movl $-EFAULT,%ss:(%ebx)
29491 jmp 5000b
29492
29493 .previous
29494
29495+ pushl_cfi %ss
29496+ popl_cfi %ds
29497+ pushl_cfi %ss
29498+ popl_cfi %es
29499 popl_cfi %ebx
29500 CFI_RESTORE ebx
29501 popl_cfi %esi
29502@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29503 popl_cfi %ecx # equivalent to addl $4,%esp
29504 ret
29505 CFI_ENDPROC
29506-ENDPROC(csum_partial_copy_generic)
29507+ENDPROC(csum_partial_copy_generic_to_user)
29508
29509 #else
29510
29511 /* Version for PentiumII/PPro */
29512
29513 #define ROUND1(x) \
29514+ nop; nop; nop; \
29515 SRC(movl x(%esi), %ebx ) ; \
29516 addl %ebx, %eax ; \
29517- DST(movl %ebx, x(%edi) ) ;
29518+ DST(movl %ebx, %es:x(%edi)) ;
29519
29520 #define ROUND(x) \
29521+ nop; nop; nop; \
29522 SRC(movl x(%esi), %ebx ) ; \
29523 adcl %ebx, %eax ; \
29524- DST(movl %ebx, x(%edi) ) ;
29525+ DST(movl %ebx, %es:x(%edi)) ;
29526
29527 #define ARGBASE 12
29528-
29529-ENTRY(csum_partial_copy_generic)
29530+
29531+ENTRY(csum_partial_copy_generic_to_user)
29532 CFI_STARTPROC
29533+
29534+#ifdef CONFIG_PAX_MEMORY_UDEREF
29535+ pushl_cfi %gs
29536+ popl_cfi %es
29537+ jmp csum_partial_copy_generic
29538+#endif
29539+
29540+ENTRY(csum_partial_copy_generic_from_user)
29541+
29542+#ifdef CONFIG_PAX_MEMORY_UDEREF
29543+ pushl_cfi %gs
29544+ popl_cfi %ds
29545+#endif
29546+
29547+ENTRY(csum_partial_copy_generic)
29548 pushl_cfi %ebx
29549 CFI_REL_OFFSET ebx, 0
29550 pushl_cfi %edi
29551@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29552 subl %ebx, %edi
29553 lea -1(%esi),%edx
29554 andl $-32,%edx
29555- lea 3f(%ebx,%ebx), %ebx
29556+ lea 3f(%ebx,%ebx,2), %ebx
29557 testl %esi, %esi
29558 jmp *%ebx
29559 1: addl $64,%esi
29560@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29561 jb 5f
29562 SRC( movw (%esi), %dx )
29563 leal 2(%esi), %esi
29564-DST( movw %dx, (%edi) )
29565+DST( movw %dx, %es:(%edi) )
29566 leal 2(%edi), %edi
29567 je 6f
29568 shll $16,%edx
29569 5:
29570 SRC( movb (%esi), %dl )
29571-DST( movb %dl, (%edi) )
29572+DST( movb %dl, %es:(%edi) )
29573 6: addl %edx, %eax
29574 adcl $0, %eax
29575 7:
29576 .section .fixup, "ax"
29577 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29578- movl $-EFAULT, (%ebx)
29579+ movl $-EFAULT, %ss:(%ebx)
29580 # zero the complete destination (computing the rest is too much work)
29581 movl ARGBASE+8(%esp),%edi # dst
29582 movl ARGBASE+12(%esp),%ecx # len
29583@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29584 rep; stosb
29585 jmp 7b
29586 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29587- movl $-EFAULT, (%ebx)
29588+ movl $-EFAULT, %ss:(%ebx)
29589 jmp 7b
29590 .previous
29591
29592+#ifdef CONFIG_PAX_MEMORY_UDEREF
29593+ pushl_cfi %ss
29594+ popl_cfi %ds
29595+ pushl_cfi %ss
29596+ popl_cfi %es
29597+#endif
29598+
29599 popl_cfi %esi
29600 CFI_RESTORE esi
29601 popl_cfi %edi
29602@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29603 CFI_RESTORE ebx
29604 ret
29605 CFI_ENDPROC
29606-ENDPROC(csum_partial_copy_generic)
29607+ENDPROC(csum_partial_copy_generic_to_user)
29608
29609 #undef ROUND
29610 #undef ROUND1
29611diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29612index f2145cf..cea889d 100644
29613--- a/arch/x86/lib/clear_page_64.S
29614+++ b/arch/x86/lib/clear_page_64.S
29615@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29616 movl $4096/8,%ecx
29617 xorl %eax,%eax
29618 rep stosq
29619+ pax_force_retaddr
29620 ret
29621 CFI_ENDPROC
29622 ENDPROC(clear_page_c)
29623@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29624 movl $4096,%ecx
29625 xorl %eax,%eax
29626 rep stosb
29627+ pax_force_retaddr
29628 ret
29629 CFI_ENDPROC
29630 ENDPROC(clear_page_c_e)
29631@@ -43,6 +45,7 @@ ENTRY(clear_page)
29632 leaq 64(%rdi),%rdi
29633 jnz .Lloop
29634 nop
29635+ pax_force_retaddr
29636 ret
29637 CFI_ENDPROC
29638 .Lclear_page_end:
29639@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29640
29641 #include <asm/cpufeature.h>
29642
29643- .section .altinstr_replacement,"ax"
29644+ .section .altinstr_replacement,"a"
29645 1: .byte 0xeb /* jmp <disp8> */
29646 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29647 2: .byte 0xeb /* jmp <disp8> */
29648diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29649index 40a1725..5d12ac4 100644
29650--- a/arch/x86/lib/cmpxchg16b_emu.S
29651+++ b/arch/x86/lib/cmpxchg16b_emu.S
29652@@ -8,6 +8,7 @@
29653 #include <linux/linkage.h>
29654 #include <asm/dwarf2.h>
29655 #include <asm/percpu.h>
29656+#include <asm/alternative-asm.h>
29657
29658 .text
29659
29660@@ -46,12 +47,14 @@ CFI_STARTPROC
29661 CFI_REMEMBER_STATE
29662 popfq_cfi
29663 mov $1, %al
29664+ pax_force_retaddr
29665 ret
29666
29667 CFI_RESTORE_STATE
29668 .Lnot_same:
29669 popfq_cfi
29670 xor %al,%al
29671+ pax_force_retaddr
29672 ret
29673
29674 CFI_ENDPROC
29675diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29676index 176cca6..e0d658e 100644
29677--- a/arch/x86/lib/copy_page_64.S
29678+++ b/arch/x86/lib/copy_page_64.S
29679@@ -9,6 +9,7 @@ copy_page_rep:
29680 CFI_STARTPROC
29681 movl $4096/8, %ecx
29682 rep movsq
29683+ pax_force_retaddr
29684 ret
29685 CFI_ENDPROC
29686 ENDPROC(copy_page_rep)
29687@@ -24,8 +25,8 @@ ENTRY(copy_page)
29688 CFI_ADJUST_CFA_OFFSET 2*8
29689 movq %rbx, (%rsp)
29690 CFI_REL_OFFSET rbx, 0
29691- movq %r12, 1*8(%rsp)
29692- CFI_REL_OFFSET r12, 1*8
29693+ movq %r13, 1*8(%rsp)
29694+ CFI_REL_OFFSET r13, 1*8
29695
29696 movl $(4096/64)-5, %ecx
29697 .p2align 4
29698@@ -38,7 +39,7 @@ ENTRY(copy_page)
29699 movq 0x8*4(%rsi), %r9
29700 movq 0x8*5(%rsi), %r10
29701 movq 0x8*6(%rsi), %r11
29702- movq 0x8*7(%rsi), %r12
29703+ movq 0x8*7(%rsi), %r13
29704
29705 prefetcht0 5*64(%rsi)
29706
29707@@ -49,7 +50,7 @@ ENTRY(copy_page)
29708 movq %r9, 0x8*4(%rdi)
29709 movq %r10, 0x8*5(%rdi)
29710 movq %r11, 0x8*6(%rdi)
29711- movq %r12, 0x8*7(%rdi)
29712+ movq %r13, 0x8*7(%rdi)
29713
29714 leaq 64 (%rsi), %rsi
29715 leaq 64 (%rdi), %rdi
29716@@ -68,7 +69,7 @@ ENTRY(copy_page)
29717 movq 0x8*4(%rsi), %r9
29718 movq 0x8*5(%rsi), %r10
29719 movq 0x8*6(%rsi), %r11
29720- movq 0x8*7(%rsi), %r12
29721+ movq 0x8*7(%rsi), %r13
29722
29723 movq %rax, 0x8*0(%rdi)
29724 movq %rbx, 0x8*1(%rdi)
29725@@ -77,7 +78,7 @@ ENTRY(copy_page)
29726 movq %r9, 0x8*4(%rdi)
29727 movq %r10, 0x8*5(%rdi)
29728 movq %r11, 0x8*6(%rdi)
29729- movq %r12, 0x8*7(%rdi)
29730+ movq %r13, 0x8*7(%rdi)
29731
29732 leaq 64(%rdi), %rdi
29733 leaq 64(%rsi), %rsi
29734@@ -85,10 +86,11 @@ ENTRY(copy_page)
29735
29736 movq (%rsp), %rbx
29737 CFI_RESTORE rbx
29738- movq 1*8(%rsp), %r12
29739- CFI_RESTORE r12
29740+ movq 1*8(%rsp), %r13
29741+ CFI_RESTORE r13
29742 addq $2*8, %rsp
29743 CFI_ADJUST_CFA_OFFSET -2*8
29744+ pax_force_retaddr
29745 ret
29746 .Lcopy_page_end:
29747 CFI_ENDPROC
29748@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29749
29750 #include <asm/cpufeature.h>
29751
29752- .section .altinstr_replacement,"ax"
29753+ .section .altinstr_replacement,"a"
29754 1: .byte 0xeb /* jmp <disp8> */
29755 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29756 2:
29757diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29758index dee945d..a84067b 100644
29759--- a/arch/x86/lib/copy_user_64.S
29760+++ b/arch/x86/lib/copy_user_64.S
29761@@ -18,31 +18,7 @@
29762 #include <asm/alternative-asm.h>
29763 #include <asm/asm.h>
29764 #include <asm/smap.h>
29765-
29766-/*
29767- * By placing feature2 after feature1 in altinstructions section, we logically
29768- * implement:
29769- * If CPU has feature2, jmp to alt2 is used
29770- * else if CPU has feature1, jmp to alt1 is used
29771- * else jmp to orig is used.
29772- */
29773- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29774-0:
29775- .byte 0xe9 /* 32bit jump */
29776- .long \orig-1f /* by default jump to orig */
29777-1:
29778- .section .altinstr_replacement,"ax"
29779-2: .byte 0xe9 /* near jump with 32bit immediate */
29780- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29781-3: .byte 0xe9 /* near jump with 32bit immediate */
29782- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29783- .previous
29784-
29785- .section .altinstructions,"a"
29786- altinstruction_entry 0b,2b,\feature1,5,5
29787- altinstruction_entry 0b,3b,\feature2,5,5
29788- .previous
29789- .endm
29790+#include <asm/pgtable.h>
29791
29792 .macro ALIGN_DESTINATION
29793 #ifdef FIX_ALIGNMENT
29794@@ -70,52 +46,6 @@
29795 #endif
29796 .endm
29797
29798-/* Standard copy_to_user with segment limit checking */
29799-ENTRY(_copy_to_user)
29800- CFI_STARTPROC
29801- GET_THREAD_INFO(%rax)
29802- movq %rdi,%rcx
29803- addq %rdx,%rcx
29804- jc bad_to_user
29805- cmpq TI_addr_limit(%rax),%rcx
29806- ja bad_to_user
29807- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29808- copy_user_generic_unrolled,copy_user_generic_string, \
29809- copy_user_enhanced_fast_string
29810- CFI_ENDPROC
29811-ENDPROC(_copy_to_user)
29812-
29813-/* Standard copy_from_user with segment limit checking */
29814-ENTRY(_copy_from_user)
29815- CFI_STARTPROC
29816- GET_THREAD_INFO(%rax)
29817- movq %rsi,%rcx
29818- addq %rdx,%rcx
29819- jc bad_from_user
29820- cmpq TI_addr_limit(%rax),%rcx
29821- ja bad_from_user
29822- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29823- copy_user_generic_unrolled,copy_user_generic_string, \
29824- copy_user_enhanced_fast_string
29825- CFI_ENDPROC
29826-ENDPROC(_copy_from_user)
29827-
29828- .section .fixup,"ax"
29829- /* must zero dest */
29830-ENTRY(bad_from_user)
29831-bad_from_user:
29832- CFI_STARTPROC
29833- movl %edx,%ecx
29834- xorl %eax,%eax
29835- rep
29836- stosb
29837-bad_to_user:
29838- movl %edx,%eax
29839- ret
29840- CFI_ENDPROC
29841-ENDPROC(bad_from_user)
29842- .previous
29843-
29844 /*
29845 * copy_user_generic_unrolled - memory copy with exception handling.
29846 * This version is for CPUs like P4 that don't have efficient micro
29847@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29848 */
29849 ENTRY(copy_user_generic_unrolled)
29850 CFI_STARTPROC
29851+ ASM_PAX_OPEN_USERLAND
29852 ASM_STAC
29853 cmpl $8,%edx
29854 jb 20f /* less then 8 bytes, go to byte copy loop */
29855@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29856 jnz 21b
29857 23: xor %eax,%eax
29858 ASM_CLAC
29859+ ASM_PAX_CLOSE_USERLAND
29860+ pax_force_retaddr
29861 ret
29862
29863 .section .fixup,"ax"
29864@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29865 */
29866 ENTRY(copy_user_generic_string)
29867 CFI_STARTPROC
29868+ ASM_PAX_OPEN_USERLAND
29869 ASM_STAC
29870 cmpl $8,%edx
29871 jb 2f /* less than 8 bytes, go to byte copy loop */
29872@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29873 movsb
29874 xorl %eax,%eax
29875 ASM_CLAC
29876+ ASM_PAX_CLOSE_USERLAND
29877+ pax_force_retaddr
29878 ret
29879
29880 .section .fixup,"ax"
29881@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29882 */
29883 ENTRY(copy_user_enhanced_fast_string)
29884 CFI_STARTPROC
29885+ ASM_PAX_OPEN_USERLAND
29886 ASM_STAC
29887 movl %edx,%ecx
29888 1: rep
29889 movsb
29890 xorl %eax,%eax
29891 ASM_CLAC
29892+ ASM_PAX_CLOSE_USERLAND
29893+ pax_force_retaddr
29894 ret
29895
29896 .section .fixup,"ax"
29897diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29898index 6a4f43c..c70fb52 100644
29899--- a/arch/x86/lib/copy_user_nocache_64.S
29900+++ b/arch/x86/lib/copy_user_nocache_64.S
29901@@ -8,6 +8,7 @@
29902
29903 #include <linux/linkage.h>
29904 #include <asm/dwarf2.h>
29905+#include <asm/alternative-asm.h>
29906
29907 #define FIX_ALIGNMENT 1
29908
29909@@ -16,6 +17,7 @@
29910 #include <asm/thread_info.h>
29911 #include <asm/asm.h>
29912 #include <asm/smap.h>
29913+#include <asm/pgtable.h>
29914
29915 .macro ALIGN_DESTINATION
29916 #ifdef FIX_ALIGNMENT
29917@@ -49,6 +51,16 @@
29918 */
29919 ENTRY(__copy_user_nocache)
29920 CFI_STARTPROC
29921+
29922+#ifdef CONFIG_PAX_MEMORY_UDEREF
29923+ mov pax_user_shadow_base,%rcx
29924+ cmp %rcx,%rsi
29925+ jae 1f
29926+ add %rcx,%rsi
29927+1:
29928+#endif
29929+
29930+ ASM_PAX_OPEN_USERLAND
29931 ASM_STAC
29932 cmpl $8,%edx
29933 jb 20f /* less then 8 bytes, go to byte copy loop */
29934@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
29935 jnz 21b
29936 23: xorl %eax,%eax
29937 ASM_CLAC
29938+ ASM_PAX_CLOSE_USERLAND
29939 sfence
29940+ pax_force_retaddr
29941 ret
29942
29943 .section .fixup,"ax"
29944diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
29945index 2419d5f..fe52d0e 100644
29946--- a/arch/x86/lib/csum-copy_64.S
29947+++ b/arch/x86/lib/csum-copy_64.S
29948@@ -9,6 +9,7 @@
29949 #include <asm/dwarf2.h>
29950 #include <asm/errno.h>
29951 #include <asm/asm.h>
29952+#include <asm/alternative-asm.h>
29953
29954 /*
29955 * Checksum copy with exception handling.
29956@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
29957 CFI_ADJUST_CFA_OFFSET 7*8
29958 movq %rbx, 2*8(%rsp)
29959 CFI_REL_OFFSET rbx, 2*8
29960- movq %r12, 3*8(%rsp)
29961- CFI_REL_OFFSET r12, 3*8
29962+ movq %r15, 3*8(%rsp)
29963+ CFI_REL_OFFSET r15, 3*8
29964 movq %r14, 4*8(%rsp)
29965 CFI_REL_OFFSET r14, 4*8
29966 movq %r13, 5*8(%rsp)
29967@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
29968 movl %edx, %ecx
29969
29970 xorl %r9d, %r9d
29971- movq %rcx, %r12
29972+ movq %rcx, %r15
29973
29974- shrq $6, %r12
29975+ shrq $6, %r15
29976 jz .Lhandle_tail /* < 64 */
29977
29978 clc
29979
29980 /* main loop. clear in 64 byte blocks */
29981 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
29982- /* r11: temp3, rdx: temp4, r12 loopcnt */
29983+ /* r11: temp3, rdx: temp4, r15 loopcnt */
29984 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
29985 .p2align 4
29986 .Lloop:
29987@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
29988 adcq %r14, %rax
29989 adcq %r13, %rax
29990
29991- decl %r12d
29992+ decl %r15d
29993
29994 dest
29995 movq %rbx, (%rsi)
29996@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
29997 .Lende:
29998 movq 2*8(%rsp), %rbx
29999 CFI_RESTORE rbx
30000- movq 3*8(%rsp), %r12
30001- CFI_RESTORE r12
30002+ movq 3*8(%rsp), %r15
30003+ CFI_RESTORE r15
30004 movq 4*8(%rsp), %r14
30005 CFI_RESTORE r14
30006 movq 5*8(%rsp), %r13
30007@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30008 CFI_RESTORE rbp
30009 addq $7*8, %rsp
30010 CFI_ADJUST_CFA_OFFSET -7*8
30011+ pax_force_retaddr
30012 ret
30013 CFI_RESTORE_STATE
30014
30015diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30016index 1318f75..44c30fd 100644
30017--- a/arch/x86/lib/csum-wrappers_64.c
30018+++ b/arch/x86/lib/csum-wrappers_64.c
30019@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30020 len -= 2;
30021 }
30022 }
30023+ pax_open_userland();
30024 stac();
30025- isum = csum_partial_copy_generic((__force const void *)src,
30026+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30027 dst, len, isum, errp, NULL);
30028 clac();
30029+ pax_close_userland();
30030 if (unlikely(*errp))
30031 goto out_err;
30032
30033@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30034 }
30035
30036 *errp = 0;
30037+ pax_open_userland();
30038 stac();
30039- ret = csum_partial_copy_generic(src, (void __force *)dst,
30040+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30041 len, isum, NULL, errp);
30042 clac();
30043+ pax_close_userland();
30044 return ret;
30045 }
30046 EXPORT_SYMBOL(csum_partial_copy_to_user);
30047diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30048index a451235..1daa956 100644
30049--- a/arch/x86/lib/getuser.S
30050+++ b/arch/x86/lib/getuser.S
30051@@ -33,17 +33,40 @@
30052 #include <asm/thread_info.h>
30053 #include <asm/asm.h>
30054 #include <asm/smap.h>
30055+#include <asm/segment.h>
30056+#include <asm/pgtable.h>
30057+#include <asm/alternative-asm.h>
30058+
30059+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30060+#define __copyuser_seg gs;
30061+#else
30062+#define __copyuser_seg
30063+#endif
30064
30065 .text
30066 ENTRY(__get_user_1)
30067 CFI_STARTPROC
30068+
30069+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30070 GET_THREAD_INFO(%_ASM_DX)
30071 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30072 jae bad_get_user
30073 ASM_STAC
30074-1: movzbl (%_ASM_AX),%edx
30075+
30076+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30077+ mov pax_user_shadow_base,%_ASM_DX
30078+ cmp %_ASM_DX,%_ASM_AX
30079+ jae 1234f
30080+ add %_ASM_DX,%_ASM_AX
30081+1234:
30082+#endif
30083+
30084+#endif
30085+
30086+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30087 xor %eax,%eax
30088 ASM_CLAC
30089+ pax_force_retaddr
30090 ret
30091 CFI_ENDPROC
30092 ENDPROC(__get_user_1)
30093@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30094 ENTRY(__get_user_2)
30095 CFI_STARTPROC
30096 add $1,%_ASM_AX
30097+
30098+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30099 jc bad_get_user
30100 GET_THREAD_INFO(%_ASM_DX)
30101 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30102 jae bad_get_user
30103 ASM_STAC
30104-2: movzwl -1(%_ASM_AX),%edx
30105+
30106+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30107+ mov pax_user_shadow_base,%_ASM_DX
30108+ cmp %_ASM_DX,%_ASM_AX
30109+ jae 1234f
30110+ add %_ASM_DX,%_ASM_AX
30111+1234:
30112+#endif
30113+
30114+#endif
30115+
30116+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30117 xor %eax,%eax
30118 ASM_CLAC
30119+ pax_force_retaddr
30120 ret
30121 CFI_ENDPROC
30122 ENDPROC(__get_user_2)
30123@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30124 ENTRY(__get_user_4)
30125 CFI_STARTPROC
30126 add $3,%_ASM_AX
30127+
30128+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30129 jc bad_get_user
30130 GET_THREAD_INFO(%_ASM_DX)
30131 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30132 jae bad_get_user
30133 ASM_STAC
30134-3: movl -3(%_ASM_AX),%edx
30135+
30136+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30137+ mov pax_user_shadow_base,%_ASM_DX
30138+ cmp %_ASM_DX,%_ASM_AX
30139+ jae 1234f
30140+ add %_ASM_DX,%_ASM_AX
30141+1234:
30142+#endif
30143+
30144+#endif
30145+
30146+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30147 xor %eax,%eax
30148 ASM_CLAC
30149+ pax_force_retaddr
30150 ret
30151 CFI_ENDPROC
30152 ENDPROC(__get_user_4)
30153@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30154 GET_THREAD_INFO(%_ASM_DX)
30155 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30156 jae bad_get_user
30157+
30158+#ifdef CONFIG_PAX_MEMORY_UDEREF
30159+ mov pax_user_shadow_base,%_ASM_DX
30160+ cmp %_ASM_DX,%_ASM_AX
30161+ jae 1234f
30162+ add %_ASM_DX,%_ASM_AX
30163+1234:
30164+#endif
30165+
30166 ASM_STAC
30167 4: movq -7(%_ASM_AX),%rdx
30168 xor %eax,%eax
30169 ASM_CLAC
30170+ pax_force_retaddr
30171 ret
30172 #else
30173 add $7,%_ASM_AX
30174@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30175 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30176 jae bad_get_user_8
30177 ASM_STAC
30178-4: movl -7(%_ASM_AX),%edx
30179-5: movl -3(%_ASM_AX),%ecx
30180+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30181+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30182 xor %eax,%eax
30183 ASM_CLAC
30184+ pax_force_retaddr
30185 ret
30186 #endif
30187 CFI_ENDPROC
30188@@ -113,6 +175,7 @@ bad_get_user:
30189 xor %edx,%edx
30190 mov $(-EFAULT),%_ASM_AX
30191 ASM_CLAC
30192+ pax_force_retaddr
30193 ret
30194 CFI_ENDPROC
30195 END(bad_get_user)
30196@@ -124,6 +187,7 @@ bad_get_user_8:
30197 xor %ecx,%ecx
30198 mov $(-EFAULT),%_ASM_AX
30199 ASM_CLAC
30200+ pax_force_retaddr
30201 ret
30202 CFI_ENDPROC
30203 END(bad_get_user_8)
30204diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30205index 1313ae6..84f25ea 100644
30206--- a/arch/x86/lib/insn.c
30207+++ b/arch/x86/lib/insn.c
30208@@ -20,8 +20,10 @@
30209
30210 #ifdef __KERNEL__
30211 #include <linux/string.h>
30212+#include <asm/pgtable_types.h>
30213 #else
30214 #include <string.h>
30215+#define ktla_ktva(addr) addr
30216 #endif
30217 #include <asm/inat.h>
30218 #include <asm/insn.h>
30219@@ -53,9 +55,9 @@
30220 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30221 {
30222 memset(insn, 0, sizeof(*insn));
30223- insn->kaddr = kaddr;
30224- insn->end_kaddr = kaddr + buf_len;
30225- insn->next_byte = kaddr;
30226+ insn->kaddr = ktla_ktva(kaddr);
30227+ insn->end_kaddr = insn->kaddr + buf_len;
30228+ insn->next_byte = insn->kaddr;
30229 insn->x86_64 = x86_64 ? 1 : 0;
30230 insn->opnd_bytes = 4;
30231 if (x86_64)
30232diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30233index 05a95e7..326f2fa 100644
30234--- a/arch/x86/lib/iomap_copy_64.S
30235+++ b/arch/x86/lib/iomap_copy_64.S
30236@@ -17,6 +17,7 @@
30237
30238 #include <linux/linkage.h>
30239 #include <asm/dwarf2.h>
30240+#include <asm/alternative-asm.h>
30241
30242 /*
30243 * override generic version in lib/iomap_copy.c
30244@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30245 CFI_STARTPROC
30246 movl %edx,%ecx
30247 rep movsd
30248+ pax_force_retaddr
30249 ret
30250 CFI_ENDPROC
30251 ENDPROC(__iowrite32_copy)
30252diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30253index 56313a3..0db417e 100644
30254--- a/arch/x86/lib/memcpy_64.S
30255+++ b/arch/x86/lib/memcpy_64.S
30256@@ -24,7 +24,7 @@
30257 * This gets patched over the unrolled variant (below) via the
30258 * alternative instructions framework:
30259 */
30260- .section .altinstr_replacement, "ax", @progbits
30261+ .section .altinstr_replacement, "a", @progbits
30262 .Lmemcpy_c:
30263 movq %rdi, %rax
30264 movq %rdx, %rcx
30265@@ -33,6 +33,7 @@
30266 rep movsq
30267 movl %edx, %ecx
30268 rep movsb
30269+ pax_force_retaddr
30270 ret
30271 .Lmemcpy_e:
30272 .previous
30273@@ -44,11 +45,12 @@
30274 * This gets patched over the unrolled variant (below) via the
30275 * alternative instructions framework:
30276 */
30277- .section .altinstr_replacement, "ax", @progbits
30278+ .section .altinstr_replacement, "a", @progbits
30279 .Lmemcpy_c_e:
30280 movq %rdi, %rax
30281 movq %rdx, %rcx
30282 rep movsb
30283+ pax_force_retaddr
30284 ret
30285 .Lmemcpy_e_e:
30286 .previous
30287@@ -136,6 +138,7 @@ ENTRY(memcpy)
30288 movq %r9, 1*8(%rdi)
30289 movq %r10, -2*8(%rdi, %rdx)
30290 movq %r11, -1*8(%rdi, %rdx)
30291+ pax_force_retaddr
30292 retq
30293 .p2align 4
30294 .Lless_16bytes:
30295@@ -148,6 +151,7 @@ ENTRY(memcpy)
30296 movq -1*8(%rsi, %rdx), %r9
30297 movq %r8, 0*8(%rdi)
30298 movq %r9, -1*8(%rdi, %rdx)
30299+ pax_force_retaddr
30300 retq
30301 .p2align 4
30302 .Lless_8bytes:
30303@@ -161,6 +165,7 @@ ENTRY(memcpy)
30304 movl -4(%rsi, %rdx), %r8d
30305 movl %ecx, (%rdi)
30306 movl %r8d, -4(%rdi, %rdx)
30307+ pax_force_retaddr
30308 retq
30309 .p2align 4
30310 .Lless_3bytes:
30311@@ -179,6 +184,7 @@ ENTRY(memcpy)
30312 movb %cl, (%rdi)
30313
30314 .Lend:
30315+ pax_force_retaddr
30316 retq
30317 CFI_ENDPROC
30318 ENDPROC(memcpy)
30319diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30320index 65268a6..dd1de11 100644
30321--- a/arch/x86/lib/memmove_64.S
30322+++ b/arch/x86/lib/memmove_64.S
30323@@ -202,14 +202,16 @@ ENTRY(memmove)
30324 movb (%rsi), %r11b
30325 movb %r11b, (%rdi)
30326 13:
30327+ pax_force_retaddr
30328 retq
30329 CFI_ENDPROC
30330
30331- .section .altinstr_replacement,"ax"
30332+ .section .altinstr_replacement,"a"
30333 .Lmemmove_begin_forward_efs:
30334 /* Forward moving data. */
30335 movq %rdx, %rcx
30336 rep movsb
30337+ pax_force_retaddr
30338 retq
30339 .Lmemmove_end_forward_efs:
30340 .previous
30341diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30342index 2dcb380..2eb79fe 100644
30343--- a/arch/x86/lib/memset_64.S
30344+++ b/arch/x86/lib/memset_64.S
30345@@ -16,7 +16,7 @@
30346 *
30347 * rax original destination
30348 */
30349- .section .altinstr_replacement, "ax", @progbits
30350+ .section .altinstr_replacement, "a", @progbits
30351 .Lmemset_c:
30352 movq %rdi,%r9
30353 movq %rdx,%rcx
30354@@ -30,6 +30,7 @@
30355 movl %edx,%ecx
30356 rep stosb
30357 movq %r9,%rax
30358+ pax_force_retaddr
30359 ret
30360 .Lmemset_e:
30361 .previous
30362@@ -45,13 +46,14 @@
30363 *
30364 * rax original destination
30365 */
30366- .section .altinstr_replacement, "ax", @progbits
30367+ .section .altinstr_replacement, "a", @progbits
30368 .Lmemset_c_e:
30369 movq %rdi,%r9
30370 movb %sil,%al
30371 movq %rdx,%rcx
30372 rep stosb
30373 movq %r9,%rax
30374+ pax_force_retaddr
30375 ret
30376 .Lmemset_e_e:
30377 .previous
30378@@ -118,6 +120,7 @@ ENTRY(__memset)
30379
30380 .Lende:
30381 movq %r10,%rax
30382+ pax_force_retaddr
30383 ret
30384
30385 CFI_RESTORE_STATE
30386diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30387index c9f2d9b..e7fd2c0 100644
30388--- a/arch/x86/lib/mmx_32.c
30389+++ b/arch/x86/lib/mmx_32.c
30390@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30391 {
30392 void *p;
30393 int i;
30394+ unsigned long cr0;
30395
30396 if (unlikely(in_interrupt()))
30397 return __memcpy(to, from, len);
30398@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30399 kernel_fpu_begin();
30400
30401 __asm__ __volatile__ (
30402- "1: prefetch (%0)\n" /* This set is 28 bytes */
30403- " prefetch 64(%0)\n"
30404- " prefetch 128(%0)\n"
30405- " prefetch 192(%0)\n"
30406- " prefetch 256(%0)\n"
30407+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30408+ " prefetch 64(%1)\n"
30409+ " prefetch 128(%1)\n"
30410+ " prefetch 192(%1)\n"
30411+ " prefetch 256(%1)\n"
30412 "2: \n"
30413 ".section .fixup, \"ax\"\n"
30414- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30415+ "3: \n"
30416+
30417+#ifdef CONFIG_PAX_KERNEXEC
30418+ " movl %%cr0, %0\n"
30419+ " movl %0, %%eax\n"
30420+ " andl $0xFFFEFFFF, %%eax\n"
30421+ " movl %%eax, %%cr0\n"
30422+#endif
30423+
30424+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30425+
30426+#ifdef CONFIG_PAX_KERNEXEC
30427+ " movl %0, %%cr0\n"
30428+#endif
30429+
30430 " jmp 2b\n"
30431 ".previous\n"
30432 _ASM_EXTABLE(1b, 3b)
30433- : : "r" (from));
30434+ : "=&r" (cr0) : "r" (from) : "ax");
30435
30436 for ( ; i > 5; i--) {
30437 __asm__ __volatile__ (
30438- "1: prefetch 320(%0)\n"
30439- "2: movq (%0), %%mm0\n"
30440- " movq 8(%0), %%mm1\n"
30441- " movq 16(%0), %%mm2\n"
30442- " movq 24(%0), %%mm3\n"
30443- " movq %%mm0, (%1)\n"
30444- " movq %%mm1, 8(%1)\n"
30445- " movq %%mm2, 16(%1)\n"
30446- " movq %%mm3, 24(%1)\n"
30447- " movq 32(%0), %%mm0\n"
30448- " movq 40(%0), %%mm1\n"
30449- " movq 48(%0), %%mm2\n"
30450- " movq 56(%0), %%mm3\n"
30451- " movq %%mm0, 32(%1)\n"
30452- " movq %%mm1, 40(%1)\n"
30453- " movq %%mm2, 48(%1)\n"
30454- " movq %%mm3, 56(%1)\n"
30455+ "1: prefetch 320(%1)\n"
30456+ "2: movq (%1), %%mm0\n"
30457+ " movq 8(%1), %%mm1\n"
30458+ " movq 16(%1), %%mm2\n"
30459+ " movq 24(%1), %%mm3\n"
30460+ " movq %%mm0, (%2)\n"
30461+ " movq %%mm1, 8(%2)\n"
30462+ " movq %%mm2, 16(%2)\n"
30463+ " movq %%mm3, 24(%2)\n"
30464+ " movq 32(%1), %%mm0\n"
30465+ " movq 40(%1), %%mm1\n"
30466+ " movq 48(%1), %%mm2\n"
30467+ " movq 56(%1), %%mm3\n"
30468+ " movq %%mm0, 32(%2)\n"
30469+ " movq %%mm1, 40(%2)\n"
30470+ " movq %%mm2, 48(%2)\n"
30471+ " movq %%mm3, 56(%2)\n"
30472 ".section .fixup, \"ax\"\n"
30473- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30474+ "3:\n"
30475+
30476+#ifdef CONFIG_PAX_KERNEXEC
30477+ " movl %%cr0, %0\n"
30478+ " movl %0, %%eax\n"
30479+ " andl $0xFFFEFFFF, %%eax\n"
30480+ " movl %%eax, %%cr0\n"
30481+#endif
30482+
30483+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30484+
30485+#ifdef CONFIG_PAX_KERNEXEC
30486+ " movl %0, %%cr0\n"
30487+#endif
30488+
30489 " jmp 2b\n"
30490 ".previous\n"
30491 _ASM_EXTABLE(1b, 3b)
30492- : : "r" (from), "r" (to) : "memory");
30493+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30494
30495 from += 64;
30496 to += 64;
30497@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30498 static void fast_copy_page(void *to, void *from)
30499 {
30500 int i;
30501+ unsigned long cr0;
30502
30503 kernel_fpu_begin();
30504
30505@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30506 * but that is for later. -AV
30507 */
30508 __asm__ __volatile__(
30509- "1: prefetch (%0)\n"
30510- " prefetch 64(%0)\n"
30511- " prefetch 128(%0)\n"
30512- " prefetch 192(%0)\n"
30513- " prefetch 256(%0)\n"
30514+ "1: prefetch (%1)\n"
30515+ " prefetch 64(%1)\n"
30516+ " prefetch 128(%1)\n"
30517+ " prefetch 192(%1)\n"
30518+ " prefetch 256(%1)\n"
30519 "2: \n"
30520 ".section .fixup, \"ax\"\n"
30521- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30522+ "3: \n"
30523+
30524+#ifdef CONFIG_PAX_KERNEXEC
30525+ " movl %%cr0, %0\n"
30526+ " movl %0, %%eax\n"
30527+ " andl $0xFFFEFFFF, %%eax\n"
30528+ " movl %%eax, %%cr0\n"
30529+#endif
30530+
30531+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30532+
30533+#ifdef CONFIG_PAX_KERNEXEC
30534+ " movl %0, %%cr0\n"
30535+#endif
30536+
30537 " jmp 2b\n"
30538 ".previous\n"
30539- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30540+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30541
30542 for (i = 0; i < (4096-320)/64; i++) {
30543 __asm__ __volatile__ (
30544- "1: prefetch 320(%0)\n"
30545- "2: movq (%0), %%mm0\n"
30546- " movntq %%mm0, (%1)\n"
30547- " movq 8(%0), %%mm1\n"
30548- " movntq %%mm1, 8(%1)\n"
30549- " movq 16(%0), %%mm2\n"
30550- " movntq %%mm2, 16(%1)\n"
30551- " movq 24(%0), %%mm3\n"
30552- " movntq %%mm3, 24(%1)\n"
30553- " movq 32(%0), %%mm4\n"
30554- " movntq %%mm4, 32(%1)\n"
30555- " movq 40(%0), %%mm5\n"
30556- " movntq %%mm5, 40(%1)\n"
30557- " movq 48(%0), %%mm6\n"
30558- " movntq %%mm6, 48(%1)\n"
30559- " movq 56(%0), %%mm7\n"
30560- " movntq %%mm7, 56(%1)\n"
30561+ "1: prefetch 320(%1)\n"
30562+ "2: movq (%1), %%mm0\n"
30563+ " movntq %%mm0, (%2)\n"
30564+ " movq 8(%1), %%mm1\n"
30565+ " movntq %%mm1, 8(%2)\n"
30566+ " movq 16(%1), %%mm2\n"
30567+ " movntq %%mm2, 16(%2)\n"
30568+ " movq 24(%1), %%mm3\n"
30569+ " movntq %%mm3, 24(%2)\n"
30570+ " movq 32(%1), %%mm4\n"
30571+ " movntq %%mm4, 32(%2)\n"
30572+ " movq 40(%1), %%mm5\n"
30573+ " movntq %%mm5, 40(%2)\n"
30574+ " movq 48(%1), %%mm6\n"
30575+ " movntq %%mm6, 48(%2)\n"
30576+ " movq 56(%1), %%mm7\n"
30577+ " movntq %%mm7, 56(%2)\n"
30578 ".section .fixup, \"ax\"\n"
30579- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30580+ "3:\n"
30581+
30582+#ifdef CONFIG_PAX_KERNEXEC
30583+ " movl %%cr0, %0\n"
30584+ " movl %0, %%eax\n"
30585+ " andl $0xFFFEFFFF, %%eax\n"
30586+ " movl %%eax, %%cr0\n"
30587+#endif
30588+
30589+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30590+
30591+#ifdef CONFIG_PAX_KERNEXEC
30592+ " movl %0, %%cr0\n"
30593+#endif
30594+
30595 " jmp 2b\n"
30596 ".previous\n"
30597- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30598+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30599
30600 from += 64;
30601 to += 64;
30602@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30603 static void fast_copy_page(void *to, void *from)
30604 {
30605 int i;
30606+ unsigned long cr0;
30607
30608 kernel_fpu_begin();
30609
30610 __asm__ __volatile__ (
30611- "1: prefetch (%0)\n"
30612- " prefetch 64(%0)\n"
30613- " prefetch 128(%0)\n"
30614- " prefetch 192(%0)\n"
30615- " prefetch 256(%0)\n"
30616+ "1: prefetch (%1)\n"
30617+ " prefetch 64(%1)\n"
30618+ " prefetch 128(%1)\n"
30619+ " prefetch 192(%1)\n"
30620+ " prefetch 256(%1)\n"
30621 "2: \n"
30622 ".section .fixup, \"ax\"\n"
30623- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30624+ "3: \n"
30625+
30626+#ifdef CONFIG_PAX_KERNEXEC
30627+ " movl %%cr0, %0\n"
30628+ " movl %0, %%eax\n"
30629+ " andl $0xFFFEFFFF, %%eax\n"
30630+ " movl %%eax, %%cr0\n"
30631+#endif
30632+
30633+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30634+
30635+#ifdef CONFIG_PAX_KERNEXEC
30636+ " movl %0, %%cr0\n"
30637+#endif
30638+
30639 " jmp 2b\n"
30640 ".previous\n"
30641- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30642+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30643
30644 for (i = 0; i < 4096/64; i++) {
30645 __asm__ __volatile__ (
30646- "1: prefetch 320(%0)\n"
30647- "2: movq (%0), %%mm0\n"
30648- " movq 8(%0), %%mm1\n"
30649- " movq 16(%0), %%mm2\n"
30650- " movq 24(%0), %%mm3\n"
30651- " movq %%mm0, (%1)\n"
30652- " movq %%mm1, 8(%1)\n"
30653- " movq %%mm2, 16(%1)\n"
30654- " movq %%mm3, 24(%1)\n"
30655- " movq 32(%0), %%mm0\n"
30656- " movq 40(%0), %%mm1\n"
30657- " movq 48(%0), %%mm2\n"
30658- " movq 56(%0), %%mm3\n"
30659- " movq %%mm0, 32(%1)\n"
30660- " movq %%mm1, 40(%1)\n"
30661- " movq %%mm2, 48(%1)\n"
30662- " movq %%mm3, 56(%1)\n"
30663+ "1: prefetch 320(%1)\n"
30664+ "2: movq (%1), %%mm0\n"
30665+ " movq 8(%1), %%mm1\n"
30666+ " movq 16(%1), %%mm2\n"
30667+ " movq 24(%1), %%mm3\n"
30668+ " movq %%mm0, (%2)\n"
30669+ " movq %%mm1, 8(%2)\n"
30670+ " movq %%mm2, 16(%2)\n"
30671+ " movq %%mm3, 24(%2)\n"
30672+ " movq 32(%1), %%mm0\n"
30673+ " movq 40(%1), %%mm1\n"
30674+ " movq 48(%1), %%mm2\n"
30675+ " movq 56(%1), %%mm3\n"
30676+ " movq %%mm0, 32(%2)\n"
30677+ " movq %%mm1, 40(%2)\n"
30678+ " movq %%mm2, 48(%2)\n"
30679+ " movq %%mm3, 56(%2)\n"
30680 ".section .fixup, \"ax\"\n"
30681- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30682+ "3:\n"
30683+
30684+#ifdef CONFIG_PAX_KERNEXEC
30685+ " movl %%cr0, %0\n"
30686+ " movl %0, %%eax\n"
30687+ " andl $0xFFFEFFFF, %%eax\n"
30688+ " movl %%eax, %%cr0\n"
30689+#endif
30690+
30691+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30692+
30693+#ifdef CONFIG_PAX_KERNEXEC
30694+ " movl %0, %%cr0\n"
30695+#endif
30696+
30697 " jmp 2b\n"
30698 ".previous\n"
30699 _ASM_EXTABLE(1b, 3b)
30700- : : "r" (from), "r" (to) : "memory");
30701+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30702
30703 from += 64;
30704 to += 64;
30705diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30706index f6d13ee..d789440 100644
30707--- a/arch/x86/lib/msr-reg.S
30708+++ b/arch/x86/lib/msr-reg.S
30709@@ -3,6 +3,7 @@
30710 #include <asm/dwarf2.h>
30711 #include <asm/asm.h>
30712 #include <asm/msr.h>
30713+#include <asm/alternative-asm.h>
30714
30715 #ifdef CONFIG_X86_64
30716 /*
30717@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30718 movl %edi, 28(%r10)
30719 popq_cfi %rbp
30720 popq_cfi %rbx
30721+ pax_force_retaddr
30722 ret
30723 3:
30724 CFI_RESTORE_STATE
30725diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30726index fc6ba17..d4d989d 100644
30727--- a/arch/x86/lib/putuser.S
30728+++ b/arch/x86/lib/putuser.S
30729@@ -16,7 +16,9 @@
30730 #include <asm/errno.h>
30731 #include <asm/asm.h>
30732 #include <asm/smap.h>
30733-
30734+#include <asm/segment.h>
30735+#include <asm/pgtable.h>
30736+#include <asm/alternative-asm.h>
30737
30738 /*
30739 * __put_user_X
30740@@ -30,57 +32,125 @@
30741 * as they get called from within inline assembly.
30742 */
30743
30744-#define ENTER CFI_STARTPROC ; \
30745- GET_THREAD_INFO(%_ASM_BX)
30746-#define EXIT ASM_CLAC ; \
30747- ret ; \
30748+#define ENTER CFI_STARTPROC
30749+#define EXIT ASM_CLAC ; \
30750+ pax_force_retaddr ; \
30751+ ret ; \
30752 CFI_ENDPROC
30753
30754+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30755+#define _DEST %_ASM_CX,%_ASM_BX
30756+#else
30757+#define _DEST %_ASM_CX
30758+#endif
30759+
30760+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30761+#define __copyuser_seg gs;
30762+#else
30763+#define __copyuser_seg
30764+#endif
30765+
30766 .text
30767 ENTRY(__put_user_1)
30768 ENTER
30769+
30770+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30771+ GET_THREAD_INFO(%_ASM_BX)
30772 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30773 jae bad_put_user
30774 ASM_STAC
30775-1: movb %al,(%_ASM_CX)
30776+
30777+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30778+ mov pax_user_shadow_base,%_ASM_BX
30779+ cmp %_ASM_BX,%_ASM_CX
30780+ jb 1234f
30781+ xor %ebx,%ebx
30782+1234:
30783+#endif
30784+
30785+#endif
30786+
30787+1: __copyuser_seg movb %al,(_DEST)
30788 xor %eax,%eax
30789 EXIT
30790 ENDPROC(__put_user_1)
30791
30792 ENTRY(__put_user_2)
30793 ENTER
30794+
30795+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30796+ GET_THREAD_INFO(%_ASM_BX)
30797 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30798 sub $1,%_ASM_BX
30799 cmp %_ASM_BX,%_ASM_CX
30800 jae bad_put_user
30801 ASM_STAC
30802-2: movw %ax,(%_ASM_CX)
30803+
30804+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30805+ mov pax_user_shadow_base,%_ASM_BX
30806+ cmp %_ASM_BX,%_ASM_CX
30807+ jb 1234f
30808+ xor %ebx,%ebx
30809+1234:
30810+#endif
30811+
30812+#endif
30813+
30814+2: __copyuser_seg movw %ax,(_DEST)
30815 xor %eax,%eax
30816 EXIT
30817 ENDPROC(__put_user_2)
30818
30819 ENTRY(__put_user_4)
30820 ENTER
30821+
30822+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30823+ GET_THREAD_INFO(%_ASM_BX)
30824 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30825 sub $3,%_ASM_BX
30826 cmp %_ASM_BX,%_ASM_CX
30827 jae bad_put_user
30828 ASM_STAC
30829-3: movl %eax,(%_ASM_CX)
30830+
30831+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30832+ mov pax_user_shadow_base,%_ASM_BX
30833+ cmp %_ASM_BX,%_ASM_CX
30834+ jb 1234f
30835+ xor %ebx,%ebx
30836+1234:
30837+#endif
30838+
30839+#endif
30840+
30841+3: __copyuser_seg movl %eax,(_DEST)
30842 xor %eax,%eax
30843 EXIT
30844 ENDPROC(__put_user_4)
30845
30846 ENTRY(__put_user_8)
30847 ENTER
30848+
30849+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30850+ GET_THREAD_INFO(%_ASM_BX)
30851 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30852 sub $7,%_ASM_BX
30853 cmp %_ASM_BX,%_ASM_CX
30854 jae bad_put_user
30855 ASM_STAC
30856-4: mov %_ASM_AX,(%_ASM_CX)
30857+
30858+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30859+ mov pax_user_shadow_base,%_ASM_BX
30860+ cmp %_ASM_BX,%_ASM_CX
30861+ jb 1234f
30862+ xor %ebx,%ebx
30863+1234:
30864+#endif
30865+
30866+#endif
30867+
30868+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30869 #ifdef CONFIG_X86_32
30870-5: movl %edx,4(%_ASM_CX)
30871+5: __copyuser_seg movl %edx,4(_DEST)
30872 #endif
30873 xor %eax,%eax
30874 EXIT
30875diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30876index 5dff5f0..cadebf4 100644
30877--- a/arch/x86/lib/rwsem.S
30878+++ b/arch/x86/lib/rwsem.S
30879@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30880 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30881 CFI_RESTORE __ASM_REG(dx)
30882 restore_common_regs
30883+ pax_force_retaddr
30884 ret
30885 CFI_ENDPROC
30886 ENDPROC(call_rwsem_down_read_failed)
30887@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30888 movq %rax,%rdi
30889 call rwsem_down_write_failed
30890 restore_common_regs
30891+ pax_force_retaddr
30892 ret
30893 CFI_ENDPROC
30894 ENDPROC(call_rwsem_down_write_failed)
30895@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30896 movq %rax,%rdi
30897 call rwsem_wake
30898 restore_common_regs
30899-1: ret
30900+1: pax_force_retaddr
30901+ ret
30902 CFI_ENDPROC
30903 ENDPROC(call_rwsem_wake)
30904
30905@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30906 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30907 CFI_RESTORE __ASM_REG(dx)
30908 restore_common_regs
30909+ pax_force_retaddr
30910 ret
30911 CFI_ENDPROC
30912 ENDPROC(call_rwsem_downgrade_wake)
30913diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
30914index b30b5eb..2b57052 100644
30915--- a/arch/x86/lib/thunk_64.S
30916+++ b/arch/x86/lib/thunk_64.S
30917@@ -9,6 +9,7 @@
30918 #include <asm/dwarf2.h>
30919 #include <asm/calling.h>
30920 #include <asm/asm.h>
30921+#include <asm/alternative-asm.h>
30922
30923 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
30924 .macro THUNK name, func, put_ret_addr_in_rdi=0
30925@@ -16,11 +17,11 @@
30926 \name:
30927 CFI_STARTPROC
30928
30929- /* this one pushes 9 elems, the next one would be %rIP */
30930- SAVE_ARGS
30931+ /* this one pushes 15+1 elems, the next one would be %rIP */
30932+ SAVE_ARGS 8
30933
30934 .if \put_ret_addr_in_rdi
30935- movq_cfi_restore 9*8, rdi
30936+ movq_cfi_restore RIP, rdi
30937 .endif
30938
30939 call \func
30940@@ -47,9 +48,10 @@
30941
30942 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
30943 CFI_STARTPROC
30944- SAVE_ARGS
30945+ SAVE_ARGS 8
30946 restore:
30947- RESTORE_ARGS
30948+ RESTORE_ARGS 1,8
30949+ pax_force_retaddr
30950 ret
30951 CFI_ENDPROC
30952 _ASM_NOKPROBE(restore)
30953diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
30954index e2f5e21..4b22130 100644
30955--- a/arch/x86/lib/usercopy_32.c
30956+++ b/arch/x86/lib/usercopy_32.c
30957@@ -42,11 +42,13 @@ do { \
30958 int __d0; \
30959 might_fault(); \
30960 __asm__ __volatile__( \
30961+ __COPYUSER_SET_ES \
30962 ASM_STAC "\n" \
30963 "0: rep; stosl\n" \
30964 " movl %2,%0\n" \
30965 "1: rep; stosb\n" \
30966 "2: " ASM_CLAC "\n" \
30967+ __COPYUSER_RESTORE_ES \
30968 ".section .fixup,\"ax\"\n" \
30969 "3: lea 0(%2,%0,4),%0\n" \
30970 " jmp 2b\n" \
30971@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
30972
30973 #ifdef CONFIG_X86_INTEL_USERCOPY
30974 static unsigned long
30975-__copy_user_intel(void __user *to, const void *from, unsigned long size)
30976+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
30977 {
30978 int d0, d1;
30979 __asm__ __volatile__(
30980@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
30981 " .align 2,0x90\n"
30982 "3: movl 0(%4), %%eax\n"
30983 "4: movl 4(%4), %%edx\n"
30984- "5: movl %%eax, 0(%3)\n"
30985- "6: movl %%edx, 4(%3)\n"
30986+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
30987+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
30988 "7: movl 8(%4), %%eax\n"
30989 "8: movl 12(%4),%%edx\n"
30990- "9: movl %%eax, 8(%3)\n"
30991- "10: movl %%edx, 12(%3)\n"
30992+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
30993+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
30994 "11: movl 16(%4), %%eax\n"
30995 "12: movl 20(%4), %%edx\n"
30996- "13: movl %%eax, 16(%3)\n"
30997- "14: movl %%edx, 20(%3)\n"
30998+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
30999+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31000 "15: movl 24(%4), %%eax\n"
31001 "16: movl 28(%4), %%edx\n"
31002- "17: movl %%eax, 24(%3)\n"
31003- "18: movl %%edx, 28(%3)\n"
31004+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31005+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31006 "19: movl 32(%4), %%eax\n"
31007 "20: movl 36(%4), %%edx\n"
31008- "21: movl %%eax, 32(%3)\n"
31009- "22: movl %%edx, 36(%3)\n"
31010+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31011+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31012 "23: movl 40(%4), %%eax\n"
31013 "24: movl 44(%4), %%edx\n"
31014- "25: movl %%eax, 40(%3)\n"
31015- "26: movl %%edx, 44(%3)\n"
31016+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31017+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31018 "27: movl 48(%4), %%eax\n"
31019 "28: movl 52(%4), %%edx\n"
31020- "29: movl %%eax, 48(%3)\n"
31021- "30: movl %%edx, 52(%3)\n"
31022+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31023+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31024 "31: movl 56(%4), %%eax\n"
31025 "32: movl 60(%4), %%edx\n"
31026- "33: movl %%eax, 56(%3)\n"
31027- "34: movl %%edx, 60(%3)\n"
31028+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31029+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31030 " addl $-64, %0\n"
31031 " addl $64, %4\n"
31032 " addl $64, %3\n"
31033@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31034 " shrl $2, %0\n"
31035 " andl $3, %%eax\n"
31036 " cld\n"
31037+ __COPYUSER_SET_ES
31038 "99: rep; movsl\n"
31039 "36: movl %%eax, %0\n"
31040 "37: rep; movsb\n"
31041 "100:\n"
31042+ __COPYUSER_RESTORE_ES
31043+ ".section .fixup,\"ax\"\n"
31044+ "101: lea 0(%%eax,%0,4),%0\n"
31045+ " jmp 100b\n"
31046+ ".previous\n"
31047+ _ASM_EXTABLE(1b,100b)
31048+ _ASM_EXTABLE(2b,100b)
31049+ _ASM_EXTABLE(3b,100b)
31050+ _ASM_EXTABLE(4b,100b)
31051+ _ASM_EXTABLE(5b,100b)
31052+ _ASM_EXTABLE(6b,100b)
31053+ _ASM_EXTABLE(7b,100b)
31054+ _ASM_EXTABLE(8b,100b)
31055+ _ASM_EXTABLE(9b,100b)
31056+ _ASM_EXTABLE(10b,100b)
31057+ _ASM_EXTABLE(11b,100b)
31058+ _ASM_EXTABLE(12b,100b)
31059+ _ASM_EXTABLE(13b,100b)
31060+ _ASM_EXTABLE(14b,100b)
31061+ _ASM_EXTABLE(15b,100b)
31062+ _ASM_EXTABLE(16b,100b)
31063+ _ASM_EXTABLE(17b,100b)
31064+ _ASM_EXTABLE(18b,100b)
31065+ _ASM_EXTABLE(19b,100b)
31066+ _ASM_EXTABLE(20b,100b)
31067+ _ASM_EXTABLE(21b,100b)
31068+ _ASM_EXTABLE(22b,100b)
31069+ _ASM_EXTABLE(23b,100b)
31070+ _ASM_EXTABLE(24b,100b)
31071+ _ASM_EXTABLE(25b,100b)
31072+ _ASM_EXTABLE(26b,100b)
31073+ _ASM_EXTABLE(27b,100b)
31074+ _ASM_EXTABLE(28b,100b)
31075+ _ASM_EXTABLE(29b,100b)
31076+ _ASM_EXTABLE(30b,100b)
31077+ _ASM_EXTABLE(31b,100b)
31078+ _ASM_EXTABLE(32b,100b)
31079+ _ASM_EXTABLE(33b,100b)
31080+ _ASM_EXTABLE(34b,100b)
31081+ _ASM_EXTABLE(35b,100b)
31082+ _ASM_EXTABLE(36b,100b)
31083+ _ASM_EXTABLE(37b,100b)
31084+ _ASM_EXTABLE(99b,101b)
31085+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31086+ : "1"(to), "2"(from), "0"(size)
31087+ : "eax", "edx", "memory");
31088+ return size;
31089+}
31090+
31091+static unsigned long
31092+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31093+{
31094+ int d0, d1;
31095+ __asm__ __volatile__(
31096+ " .align 2,0x90\n"
31097+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31098+ " cmpl $67, %0\n"
31099+ " jbe 3f\n"
31100+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31101+ " .align 2,0x90\n"
31102+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31103+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31104+ "5: movl %%eax, 0(%3)\n"
31105+ "6: movl %%edx, 4(%3)\n"
31106+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31107+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31108+ "9: movl %%eax, 8(%3)\n"
31109+ "10: movl %%edx, 12(%3)\n"
31110+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31111+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31112+ "13: movl %%eax, 16(%3)\n"
31113+ "14: movl %%edx, 20(%3)\n"
31114+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31115+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31116+ "17: movl %%eax, 24(%3)\n"
31117+ "18: movl %%edx, 28(%3)\n"
31118+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31119+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31120+ "21: movl %%eax, 32(%3)\n"
31121+ "22: movl %%edx, 36(%3)\n"
31122+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31123+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31124+ "25: movl %%eax, 40(%3)\n"
31125+ "26: movl %%edx, 44(%3)\n"
31126+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31127+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31128+ "29: movl %%eax, 48(%3)\n"
31129+ "30: movl %%edx, 52(%3)\n"
31130+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31131+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31132+ "33: movl %%eax, 56(%3)\n"
31133+ "34: movl %%edx, 60(%3)\n"
31134+ " addl $-64, %0\n"
31135+ " addl $64, %4\n"
31136+ " addl $64, %3\n"
31137+ " cmpl $63, %0\n"
31138+ " ja 1b\n"
31139+ "35: movl %0, %%eax\n"
31140+ " shrl $2, %0\n"
31141+ " andl $3, %%eax\n"
31142+ " cld\n"
31143+ "99: rep; "__copyuser_seg" movsl\n"
31144+ "36: movl %%eax, %0\n"
31145+ "37: rep; "__copyuser_seg" movsb\n"
31146+ "100:\n"
31147 ".section .fixup,\"ax\"\n"
31148 "101: lea 0(%%eax,%0,4),%0\n"
31149 " jmp 100b\n"
31150@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31151 int d0, d1;
31152 __asm__ __volatile__(
31153 " .align 2,0x90\n"
31154- "0: movl 32(%4), %%eax\n"
31155+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31156 " cmpl $67, %0\n"
31157 " jbe 2f\n"
31158- "1: movl 64(%4), %%eax\n"
31159+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31160 " .align 2,0x90\n"
31161- "2: movl 0(%4), %%eax\n"
31162- "21: movl 4(%4), %%edx\n"
31163+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31164+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31165 " movl %%eax, 0(%3)\n"
31166 " movl %%edx, 4(%3)\n"
31167- "3: movl 8(%4), %%eax\n"
31168- "31: movl 12(%4),%%edx\n"
31169+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31170+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31171 " movl %%eax, 8(%3)\n"
31172 " movl %%edx, 12(%3)\n"
31173- "4: movl 16(%4), %%eax\n"
31174- "41: movl 20(%4), %%edx\n"
31175+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31176+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31177 " movl %%eax, 16(%3)\n"
31178 " movl %%edx, 20(%3)\n"
31179- "10: movl 24(%4), %%eax\n"
31180- "51: movl 28(%4), %%edx\n"
31181+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31182+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31183 " movl %%eax, 24(%3)\n"
31184 " movl %%edx, 28(%3)\n"
31185- "11: movl 32(%4), %%eax\n"
31186- "61: movl 36(%4), %%edx\n"
31187+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31188+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31189 " movl %%eax, 32(%3)\n"
31190 " movl %%edx, 36(%3)\n"
31191- "12: movl 40(%4), %%eax\n"
31192- "71: movl 44(%4), %%edx\n"
31193+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31194+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31195 " movl %%eax, 40(%3)\n"
31196 " movl %%edx, 44(%3)\n"
31197- "13: movl 48(%4), %%eax\n"
31198- "81: movl 52(%4), %%edx\n"
31199+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31200+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31201 " movl %%eax, 48(%3)\n"
31202 " movl %%edx, 52(%3)\n"
31203- "14: movl 56(%4), %%eax\n"
31204- "91: movl 60(%4), %%edx\n"
31205+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31206+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31207 " movl %%eax, 56(%3)\n"
31208 " movl %%edx, 60(%3)\n"
31209 " addl $-64, %0\n"
31210@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31211 " shrl $2, %0\n"
31212 " andl $3, %%eax\n"
31213 " cld\n"
31214- "6: rep; movsl\n"
31215+ "6: rep; "__copyuser_seg" movsl\n"
31216 " movl %%eax,%0\n"
31217- "7: rep; movsb\n"
31218+ "7: rep; "__copyuser_seg" movsb\n"
31219 "8:\n"
31220 ".section .fixup,\"ax\"\n"
31221 "9: lea 0(%%eax,%0,4),%0\n"
31222@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31223
31224 __asm__ __volatile__(
31225 " .align 2,0x90\n"
31226- "0: movl 32(%4), %%eax\n"
31227+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31228 " cmpl $67, %0\n"
31229 " jbe 2f\n"
31230- "1: movl 64(%4), %%eax\n"
31231+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31232 " .align 2,0x90\n"
31233- "2: movl 0(%4), %%eax\n"
31234- "21: movl 4(%4), %%edx\n"
31235+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31236+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31237 " movnti %%eax, 0(%3)\n"
31238 " movnti %%edx, 4(%3)\n"
31239- "3: movl 8(%4), %%eax\n"
31240- "31: movl 12(%4),%%edx\n"
31241+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31242+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31243 " movnti %%eax, 8(%3)\n"
31244 " movnti %%edx, 12(%3)\n"
31245- "4: movl 16(%4), %%eax\n"
31246- "41: movl 20(%4), %%edx\n"
31247+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31248+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31249 " movnti %%eax, 16(%3)\n"
31250 " movnti %%edx, 20(%3)\n"
31251- "10: movl 24(%4), %%eax\n"
31252- "51: movl 28(%4), %%edx\n"
31253+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31254+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31255 " movnti %%eax, 24(%3)\n"
31256 " movnti %%edx, 28(%3)\n"
31257- "11: movl 32(%4), %%eax\n"
31258- "61: movl 36(%4), %%edx\n"
31259+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31260+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31261 " movnti %%eax, 32(%3)\n"
31262 " movnti %%edx, 36(%3)\n"
31263- "12: movl 40(%4), %%eax\n"
31264- "71: movl 44(%4), %%edx\n"
31265+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31266+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31267 " movnti %%eax, 40(%3)\n"
31268 " movnti %%edx, 44(%3)\n"
31269- "13: movl 48(%4), %%eax\n"
31270- "81: movl 52(%4), %%edx\n"
31271+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31272+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31273 " movnti %%eax, 48(%3)\n"
31274 " movnti %%edx, 52(%3)\n"
31275- "14: movl 56(%4), %%eax\n"
31276- "91: movl 60(%4), %%edx\n"
31277+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31278+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31279 " movnti %%eax, 56(%3)\n"
31280 " movnti %%edx, 60(%3)\n"
31281 " addl $-64, %0\n"
31282@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31283 " shrl $2, %0\n"
31284 " andl $3, %%eax\n"
31285 " cld\n"
31286- "6: rep; movsl\n"
31287+ "6: rep; "__copyuser_seg" movsl\n"
31288 " movl %%eax,%0\n"
31289- "7: rep; movsb\n"
31290+ "7: rep; "__copyuser_seg" movsb\n"
31291 "8:\n"
31292 ".section .fixup,\"ax\"\n"
31293 "9: lea 0(%%eax,%0,4),%0\n"
31294@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31295
31296 __asm__ __volatile__(
31297 " .align 2,0x90\n"
31298- "0: movl 32(%4), %%eax\n"
31299+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31300 " cmpl $67, %0\n"
31301 " jbe 2f\n"
31302- "1: movl 64(%4), %%eax\n"
31303+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31304 " .align 2,0x90\n"
31305- "2: movl 0(%4), %%eax\n"
31306- "21: movl 4(%4), %%edx\n"
31307+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31308+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31309 " movnti %%eax, 0(%3)\n"
31310 " movnti %%edx, 4(%3)\n"
31311- "3: movl 8(%4), %%eax\n"
31312- "31: movl 12(%4),%%edx\n"
31313+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31314+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31315 " movnti %%eax, 8(%3)\n"
31316 " movnti %%edx, 12(%3)\n"
31317- "4: movl 16(%4), %%eax\n"
31318- "41: movl 20(%4), %%edx\n"
31319+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31320+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31321 " movnti %%eax, 16(%3)\n"
31322 " movnti %%edx, 20(%3)\n"
31323- "10: movl 24(%4), %%eax\n"
31324- "51: movl 28(%4), %%edx\n"
31325+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31326+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31327 " movnti %%eax, 24(%3)\n"
31328 " movnti %%edx, 28(%3)\n"
31329- "11: movl 32(%4), %%eax\n"
31330- "61: movl 36(%4), %%edx\n"
31331+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31332+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31333 " movnti %%eax, 32(%3)\n"
31334 " movnti %%edx, 36(%3)\n"
31335- "12: movl 40(%4), %%eax\n"
31336- "71: movl 44(%4), %%edx\n"
31337+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31338+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31339 " movnti %%eax, 40(%3)\n"
31340 " movnti %%edx, 44(%3)\n"
31341- "13: movl 48(%4), %%eax\n"
31342- "81: movl 52(%4), %%edx\n"
31343+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31344+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31345 " movnti %%eax, 48(%3)\n"
31346 " movnti %%edx, 52(%3)\n"
31347- "14: movl 56(%4), %%eax\n"
31348- "91: movl 60(%4), %%edx\n"
31349+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31350+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31351 " movnti %%eax, 56(%3)\n"
31352 " movnti %%edx, 60(%3)\n"
31353 " addl $-64, %0\n"
31354@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31355 " shrl $2, %0\n"
31356 " andl $3, %%eax\n"
31357 " cld\n"
31358- "6: rep; movsl\n"
31359+ "6: rep; "__copyuser_seg" movsl\n"
31360 " movl %%eax,%0\n"
31361- "7: rep; movsb\n"
31362+ "7: rep; "__copyuser_seg" movsb\n"
31363 "8:\n"
31364 ".section .fixup,\"ax\"\n"
31365 "9: lea 0(%%eax,%0,4),%0\n"
31366@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31367 */
31368 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31369 unsigned long size);
31370-unsigned long __copy_user_intel(void __user *to, const void *from,
31371+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31372+ unsigned long size);
31373+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31374 unsigned long size);
31375 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31376 const void __user *from, unsigned long size);
31377 #endif /* CONFIG_X86_INTEL_USERCOPY */
31378
31379 /* Generic arbitrary sized copy. */
31380-#define __copy_user(to, from, size) \
31381+#define __copy_user(to, from, size, prefix, set, restore) \
31382 do { \
31383 int __d0, __d1, __d2; \
31384 __asm__ __volatile__( \
31385+ set \
31386 " cmp $7,%0\n" \
31387 " jbe 1f\n" \
31388 " movl %1,%0\n" \
31389 " negl %0\n" \
31390 " andl $7,%0\n" \
31391 " subl %0,%3\n" \
31392- "4: rep; movsb\n" \
31393+ "4: rep; "prefix"movsb\n" \
31394 " movl %3,%0\n" \
31395 " shrl $2,%0\n" \
31396 " andl $3,%3\n" \
31397 " .align 2,0x90\n" \
31398- "0: rep; movsl\n" \
31399+ "0: rep; "prefix"movsl\n" \
31400 " movl %3,%0\n" \
31401- "1: rep; movsb\n" \
31402+ "1: rep; "prefix"movsb\n" \
31403 "2:\n" \
31404+ restore \
31405 ".section .fixup,\"ax\"\n" \
31406 "5: addl %3,%0\n" \
31407 " jmp 2b\n" \
31408@@ -538,14 +650,14 @@ do { \
31409 " negl %0\n" \
31410 " andl $7,%0\n" \
31411 " subl %0,%3\n" \
31412- "4: rep; movsb\n" \
31413+ "4: rep; "__copyuser_seg"movsb\n" \
31414 " movl %3,%0\n" \
31415 " shrl $2,%0\n" \
31416 " andl $3,%3\n" \
31417 " .align 2,0x90\n" \
31418- "0: rep; movsl\n" \
31419+ "0: rep; "__copyuser_seg"movsl\n" \
31420 " movl %3,%0\n" \
31421- "1: rep; movsb\n" \
31422+ "1: rep; "__copyuser_seg"movsb\n" \
31423 "2:\n" \
31424 ".section .fixup,\"ax\"\n" \
31425 "5: addl %3,%0\n" \
31426@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31427 {
31428 stac();
31429 if (movsl_is_ok(to, from, n))
31430- __copy_user(to, from, n);
31431+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31432 else
31433- n = __copy_user_intel(to, from, n);
31434+ n = __generic_copy_to_user_intel(to, from, n);
31435 clac();
31436 return n;
31437 }
31438@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31439 {
31440 stac();
31441 if (movsl_is_ok(to, from, n))
31442- __copy_user(to, from, n);
31443+ __copy_user(to, from, n, __copyuser_seg, "", "");
31444 else
31445- n = __copy_user_intel((void __user *)to,
31446- (const void *)from, n);
31447+ n = __generic_copy_from_user_intel(to, from, n);
31448 clac();
31449 return n;
31450 }
31451@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31452 if (n > 64 && cpu_has_xmm2)
31453 n = __copy_user_intel_nocache(to, from, n);
31454 else
31455- __copy_user(to, from, n);
31456+ __copy_user(to, from, n, __copyuser_seg, "", "");
31457 #else
31458- __copy_user(to, from, n);
31459+ __copy_user(to, from, n, __copyuser_seg, "", "");
31460 #endif
31461 clac();
31462 return n;
31463 }
31464 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31465
31466-/**
31467- * copy_to_user: - Copy a block of data into user space.
31468- * @to: Destination address, in user space.
31469- * @from: Source address, in kernel space.
31470- * @n: Number of bytes to copy.
31471- *
31472- * Context: User context only. This function may sleep.
31473- *
31474- * Copy data from kernel space to user space.
31475- *
31476- * Returns number of bytes that could not be copied.
31477- * On success, this will be zero.
31478- */
31479-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31480+#ifdef CONFIG_PAX_MEMORY_UDEREF
31481+void __set_fs(mm_segment_t x)
31482 {
31483- if (access_ok(VERIFY_WRITE, to, n))
31484- n = __copy_to_user(to, from, n);
31485- return n;
31486+ switch (x.seg) {
31487+ case 0:
31488+ loadsegment(gs, 0);
31489+ break;
31490+ case TASK_SIZE_MAX:
31491+ loadsegment(gs, __USER_DS);
31492+ break;
31493+ case -1UL:
31494+ loadsegment(gs, __KERNEL_DS);
31495+ break;
31496+ default:
31497+ BUG();
31498+ }
31499 }
31500-EXPORT_SYMBOL(_copy_to_user);
31501+EXPORT_SYMBOL(__set_fs);
31502
31503-/**
31504- * copy_from_user: - Copy a block of data from user space.
31505- * @to: Destination address, in kernel space.
31506- * @from: Source address, in user space.
31507- * @n: Number of bytes to copy.
31508- *
31509- * Context: User context only. This function may sleep.
31510- *
31511- * Copy data from user space to kernel space.
31512- *
31513- * Returns number of bytes that could not be copied.
31514- * On success, this will be zero.
31515- *
31516- * If some data could not be copied, this function will pad the copied
31517- * data to the requested size using zero bytes.
31518- */
31519-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31520+void set_fs(mm_segment_t x)
31521 {
31522- if (access_ok(VERIFY_READ, from, n))
31523- n = __copy_from_user(to, from, n);
31524- else
31525- memset(to, 0, n);
31526- return n;
31527+ current_thread_info()->addr_limit = x;
31528+ __set_fs(x);
31529 }
31530-EXPORT_SYMBOL(_copy_from_user);
31531+EXPORT_SYMBOL(set_fs);
31532+#endif
31533diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31534index c905e89..01ab928 100644
31535--- a/arch/x86/lib/usercopy_64.c
31536+++ b/arch/x86/lib/usercopy_64.c
31537@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31538 might_fault();
31539 /* no memory constraint because it doesn't change any memory gcc knows
31540 about */
31541+ pax_open_userland();
31542 stac();
31543 asm volatile(
31544 " testq %[size8],%[size8]\n"
31545@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31546 _ASM_EXTABLE(0b,3b)
31547 _ASM_EXTABLE(1b,2b)
31548 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31549- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31550+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31551 [zero] "r" (0UL), [eight] "r" (8UL));
31552 clac();
31553+ pax_close_userland();
31554 return size;
31555 }
31556 EXPORT_SYMBOL(__clear_user);
31557@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31558 }
31559 EXPORT_SYMBOL(clear_user);
31560
31561-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31562+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31563 {
31564- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31565- return copy_user_generic((__force void *)to, (__force void *)from, len);
31566- }
31567- return len;
31568+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31569+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31570+ return len;
31571 }
31572 EXPORT_SYMBOL(copy_in_user);
31573
31574@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31575 * it is not necessary to optimize tail handling.
31576 */
31577 __visible unsigned long
31578-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31579+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31580 {
31581 char c;
31582 unsigned zero_len;
31583
31584+ clac();
31585+ pax_close_userland();
31586 for (; len; --len, to++) {
31587 if (__get_user_nocheck(c, from++, sizeof(char)))
31588 break;
31589@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31590 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31591 if (__put_user_nocheck(c, to++, sizeof(char)))
31592 break;
31593- clac();
31594 return len;
31595 }
31596diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31597index ecfdc46..55b9309 100644
31598--- a/arch/x86/mm/Makefile
31599+++ b/arch/x86/mm/Makefile
31600@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31601 obj-$(CONFIG_MEMTEST) += memtest.o
31602
31603 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31604+
31605+quote:="
31606+obj-$(CONFIG_X86_64) += uderef_64.o
31607+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31608diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31609index 903ec1e..c4166b2 100644
31610--- a/arch/x86/mm/extable.c
31611+++ b/arch/x86/mm/extable.c
31612@@ -6,12 +6,24 @@
31613 static inline unsigned long
31614 ex_insn_addr(const struct exception_table_entry *x)
31615 {
31616- return (unsigned long)&x->insn + x->insn;
31617+ unsigned long reloc = 0;
31618+
31619+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31620+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31621+#endif
31622+
31623+ return (unsigned long)&x->insn + x->insn + reloc;
31624 }
31625 static inline unsigned long
31626 ex_fixup_addr(const struct exception_table_entry *x)
31627 {
31628- return (unsigned long)&x->fixup + x->fixup;
31629+ unsigned long reloc = 0;
31630+
31631+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31632+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31633+#endif
31634+
31635+ return (unsigned long)&x->fixup + x->fixup + reloc;
31636 }
31637
31638 int fixup_exception(struct pt_regs *regs)
31639@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31640 unsigned long new_ip;
31641
31642 #ifdef CONFIG_PNPBIOS
31643- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31644+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31645 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31646 extern u32 pnp_bios_is_utter_crap;
31647 pnp_bios_is_utter_crap = 1;
31648@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31649 i += 4;
31650 p->fixup -= i;
31651 i += 4;
31652+
31653+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31654+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31655+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31656+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31657+#endif
31658+
31659 }
31660 }
31661
31662diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31663index e3ff27a..f38f7c0 100644
31664--- a/arch/x86/mm/fault.c
31665+++ b/arch/x86/mm/fault.c
31666@@ -13,12 +13,19 @@
31667 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31668 #include <linux/prefetch.h> /* prefetchw */
31669 #include <linux/context_tracking.h> /* exception_enter(), ... */
31670+#include <linux/unistd.h>
31671+#include <linux/compiler.h>
31672
31673 #include <asm/traps.h> /* dotraplinkage, ... */
31674 #include <asm/pgalloc.h> /* pgd_*(), ... */
31675 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31676 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31677 #include <asm/vsyscall.h> /* emulate_vsyscall */
31678+#include <asm/tlbflush.h>
31679+
31680+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31681+#include <asm/stacktrace.h>
31682+#endif
31683
31684 #define CREATE_TRACE_POINTS
31685 #include <asm/trace/exceptions.h>
31686@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31687 int ret = 0;
31688
31689 /* kprobe_running() needs smp_processor_id() */
31690- if (kprobes_built_in() && !user_mode_vm(regs)) {
31691+ if (kprobes_built_in() && !user_mode(regs)) {
31692 preempt_disable();
31693 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31694 ret = 1;
31695@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31696 return !instr_lo || (instr_lo>>1) == 1;
31697 case 0x00:
31698 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31699- if (probe_kernel_address(instr, opcode))
31700+ if (user_mode(regs)) {
31701+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31702+ return 0;
31703+ } else if (probe_kernel_address(instr, opcode))
31704 return 0;
31705
31706 *prefetch = (instr_lo == 0xF) &&
31707@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31708 while (instr < max_instr) {
31709 unsigned char opcode;
31710
31711- if (probe_kernel_address(instr, opcode))
31712+ if (user_mode(regs)) {
31713+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31714+ break;
31715+ } else if (probe_kernel_address(instr, opcode))
31716 break;
31717
31718 instr++;
31719@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31720 force_sig_info(si_signo, &info, tsk);
31721 }
31722
31723+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31724+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31725+#endif
31726+
31727+#ifdef CONFIG_PAX_EMUTRAMP
31728+static int pax_handle_fetch_fault(struct pt_regs *regs);
31729+#endif
31730+
31731+#ifdef CONFIG_PAX_PAGEEXEC
31732+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31733+{
31734+ pgd_t *pgd;
31735+ pud_t *pud;
31736+ pmd_t *pmd;
31737+
31738+ pgd = pgd_offset(mm, address);
31739+ if (!pgd_present(*pgd))
31740+ return NULL;
31741+ pud = pud_offset(pgd, address);
31742+ if (!pud_present(*pud))
31743+ return NULL;
31744+ pmd = pmd_offset(pud, address);
31745+ if (!pmd_present(*pmd))
31746+ return NULL;
31747+ return pmd;
31748+}
31749+#endif
31750+
31751 DEFINE_SPINLOCK(pgd_lock);
31752 LIST_HEAD(pgd_list);
31753
31754@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31755 for (address = VMALLOC_START & PMD_MASK;
31756 address >= TASK_SIZE && address < FIXADDR_TOP;
31757 address += PMD_SIZE) {
31758+
31759+#ifdef CONFIG_PAX_PER_CPU_PGD
31760+ unsigned long cpu;
31761+#else
31762 struct page *page;
31763+#endif
31764
31765 spin_lock(&pgd_lock);
31766+
31767+#ifdef CONFIG_PAX_PER_CPU_PGD
31768+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31769+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31770+ pmd_t *ret;
31771+
31772+ ret = vmalloc_sync_one(pgd, address);
31773+ if (!ret)
31774+ break;
31775+ pgd = get_cpu_pgd(cpu, kernel);
31776+#else
31777 list_for_each_entry(page, &pgd_list, lru) {
31778+ pgd_t *pgd;
31779 spinlock_t *pgt_lock;
31780 pmd_t *ret;
31781
31782@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31783 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31784
31785 spin_lock(pgt_lock);
31786- ret = vmalloc_sync_one(page_address(page), address);
31787+ pgd = page_address(page);
31788+#endif
31789+
31790+ ret = vmalloc_sync_one(pgd, address);
31791+
31792+#ifndef CONFIG_PAX_PER_CPU_PGD
31793 spin_unlock(pgt_lock);
31794+#endif
31795
31796 if (!ret)
31797 break;
31798@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31799 * an interrupt in the middle of a task switch..
31800 */
31801 pgd_paddr = read_cr3();
31802+
31803+#ifdef CONFIG_PAX_PER_CPU_PGD
31804+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31805+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31806+#endif
31807+
31808 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31809 if (!pmd_k)
31810 return -1;
31811@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31812 * happen within a race in page table update. In the later
31813 * case just flush:
31814 */
31815- pgd = pgd_offset(current->active_mm, address);
31816+
31817 pgd_ref = pgd_offset_k(address);
31818 if (pgd_none(*pgd_ref))
31819 return -1;
31820
31821+#ifdef CONFIG_PAX_PER_CPU_PGD
31822+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31823+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31824+ if (pgd_none(*pgd)) {
31825+ set_pgd(pgd, *pgd_ref);
31826+ arch_flush_lazy_mmu_mode();
31827+ } else {
31828+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31829+ }
31830+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31831+#else
31832+ pgd = pgd_offset(current->active_mm, address);
31833+#endif
31834+
31835 if (pgd_none(*pgd)) {
31836 set_pgd(pgd, *pgd_ref);
31837 arch_flush_lazy_mmu_mode();
31838@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31839 static int is_errata100(struct pt_regs *regs, unsigned long address)
31840 {
31841 #ifdef CONFIG_X86_64
31842- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31843+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31844 return 1;
31845 #endif
31846 return 0;
31847@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31848 }
31849
31850 static const char nx_warning[] = KERN_CRIT
31851-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31852+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31853 static const char smep_warning[] = KERN_CRIT
31854-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31855+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31856
31857 static void
31858 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31859@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31860 if (!oops_may_print())
31861 return;
31862
31863- if (error_code & PF_INSTR) {
31864+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31865 unsigned int level;
31866 pgd_t *pgd;
31867 pte_t *pte;
31868@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31869 pte = lookup_address_in_pgd(pgd, address, &level);
31870
31871 if (pte && pte_present(*pte) && !pte_exec(*pte))
31872- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31873+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31874 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31875 (pgd_flags(*pgd) & _PAGE_USER) &&
31876 (read_cr4() & X86_CR4_SMEP))
31877- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31878+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31879 }
31880
31881+#ifdef CONFIG_PAX_KERNEXEC
31882+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31883+ if (current->signal->curr_ip)
31884+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31885+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31886+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31887+ else
31888+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31889+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31890+ }
31891+#endif
31892+
31893 printk(KERN_ALERT "BUG: unable to handle kernel ");
31894 if (address < PAGE_SIZE)
31895 printk(KERN_CONT "NULL pointer dereference");
31896@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31897 return;
31898 }
31899 #endif
31900+
31901+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31902+ if (pax_is_fetch_fault(regs, error_code, address)) {
31903+
31904+#ifdef CONFIG_PAX_EMUTRAMP
31905+ switch (pax_handle_fetch_fault(regs)) {
31906+ case 2:
31907+ return;
31908+ }
31909+#endif
31910+
31911+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31912+ do_group_exit(SIGKILL);
31913+ }
31914+#endif
31915+
31916 /* Kernel addresses are always protection faults: */
31917 if (address >= TASK_SIZE)
31918 error_code |= PF_PROT;
31919@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
31920 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
31921 printk(KERN_ERR
31922 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
31923- tsk->comm, tsk->pid, address);
31924+ tsk->comm, task_pid_nr(tsk), address);
31925 code = BUS_MCEERR_AR;
31926 }
31927 #endif
31928@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
31929 return 1;
31930 }
31931
31932+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
31933+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
31934+{
31935+ pte_t *pte;
31936+ pmd_t *pmd;
31937+ spinlock_t *ptl;
31938+ unsigned char pte_mask;
31939+
31940+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
31941+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
31942+ return 0;
31943+
31944+ /* PaX: it's our fault, let's handle it if we can */
31945+
31946+ /* PaX: take a look at read faults before acquiring any locks */
31947+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
31948+ /* instruction fetch attempt from a protected page in user mode */
31949+ up_read(&mm->mmap_sem);
31950+
31951+#ifdef CONFIG_PAX_EMUTRAMP
31952+ switch (pax_handle_fetch_fault(regs)) {
31953+ case 2:
31954+ return 1;
31955+ }
31956+#endif
31957+
31958+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31959+ do_group_exit(SIGKILL);
31960+ }
31961+
31962+ pmd = pax_get_pmd(mm, address);
31963+ if (unlikely(!pmd))
31964+ return 0;
31965+
31966+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
31967+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
31968+ pte_unmap_unlock(pte, ptl);
31969+ return 0;
31970+ }
31971+
31972+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
31973+ /* write attempt to a protected page in user mode */
31974+ pte_unmap_unlock(pte, ptl);
31975+ return 0;
31976+ }
31977+
31978+#ifdef CONFIG_SMP
31979+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
31980+#else
31981+ if (likely(address > get_limit(regs->cs)))
31982+#endif
31983+ {
31984+ set_pte(pte, pte_mkread(*pte));
31985+ __flush_tlb_one(address);
31986+ pte_unmap_unlock(pte, ptl);
31987+ up_read(&mm->mmap_sem);
31988+ return 1;
31989+ }
31990+
31991+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
31992+
31993+ /*
31994+ * PaX: fill DTLB with user rights and retry
31995+ */
31996+ __asm__ __volatile__ (
31997+ "orb %2,(%1)\n"
31998+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
31999+/*
32000+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32001+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32002+ * page fault when examined during a TLB load attempt. this is true not only
32003+ * for PTEs holding a non-present entry but also present entries that will
32004+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32005+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32006+ * for our target pages since their PTEs are simply not in the TLBs at all.
32007+
32008+ * the best thing in omitting it is that we gain around 15-20% speed in the
32009+ * fast path of the page fault handler and can get rid of tracing since we
32010+ * can no longer flush unintended entries.
32011+ */
32012+ "invlpg (%0)\n"
32013+#endif
32014+ __copyuser_seg"testb $0,(%0)\n"
32015+ "xorb %3,(%1)\n"
32016+ :
32017+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32018+ : "memory", "cc");
32019+ pte_unmap_unlock(pte, ptl);
32020+ up_read(&mm->mmap_sem);
32021+ return 1;
32022+}
32023+#endif
32024+
32025 /*
32026 * Handle a spurious fault caused by a stale TLB entry.
32027 *
32028@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32029 static inline int
32030 access_error(unsigned long error_code, struct vm_area_struct *vma)
32031 {
32032+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32033+ return 1;
32034+
32035 if (error_code & PF_WRITE) {
32036 /* write, present and write, not present: */
32037 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32038@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32039 if (error_code & PF_USER)
32040 return false;
32041
32042- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32043+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32044 return false;
32045
32046 return true;
32047@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32048 tsk = current;
32049 mm = tsk->mm;
32050
32051+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32052+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32053+ if (!search_exception_tables(regs->ip)) {
32054+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32055+ bad_area_nosemaphore(regs, error_code, address);
32056+ return;
32057+ }
32058+ if (address < pax_user_shadow_base) {
32059+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32060+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32061+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32062+ } else
32063+ address -= pax_user_shadow_base;
32064+ }
32065+#endif
32066+
32067 /*
32068 * Detect and handle instructions that would cause a page fault for
32069 * both a tracked kernel page and a userspace page.
32070@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32071 * User-mode registers count as a user access even for any
32072 * potential system fault or CPU buglet:
32073 */
32074- if (user_mode_vm(regs)) {
32075+ if (user_mode(regs)) {
32076 local_irq_enable();
32077 error_code |= PF_USER;
32078 flags |= FAULT_FLAG_USER;
32079@@ -1187,6 +1411,11 @@ retry:
32080 might_sleep();
32081 }
32082
32083+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32084+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32085+ return;
32086+#endif
32087+
32088 vma = find_vma(mm, address);
32089 if (unlikely(!vma)) {
32090 bad_area(regs, error_code, address);
32091@@ -1198,18 +1427,24 @@ retry:
32092 bad_area(regs, error_code, address);
32093 return;
32094 }
32095- if (error_code & PF_USER) {
32096- /*
32097- * Accessing the stack below %sp is always a bug.
32098- * The large cushion allows instructions like enter
32099- * and pusha to work. ("enter $65535, $31" pushes
32100- * 32 pointers and then decrements %sp by 65535.)
32101- */
32102- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32103- bad_area(regs, error_code, address);
32104- return;
32105- }
32106+ /*
32107+ * Accessing the stack below %sp is always a bug.
32108+ * The large cushion allows instructions like enter
32109+ * and pusha to work. ("enter $65535, $31" pushes
32110+ * 32 pointers and then decrements %sp by 65535.)
32111+ */
32112+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32113+ bad_area(regs, error_code, address);
32114+ return;
32115 }
32116+
32117+#ifdef CONFIG_PAX_SEGMEXEC
32118+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32119+ bad_area(regs, error_code, address);
32120+ return;
32121+ }
32122+#endif
32123+
32124 if (unlikely(expand_stack(vma, address))) {
32125 bad_area(regs, error_code, address);
32126 return;
32127@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32128 }
32129 NOKPROBE_SYMBOL(trace_do_page_fault);
32130 #endif /* CONFIG_TRACING */
32131+
32132+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32133+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32134+{
32135+ struct mm_struct *mm = current->mm;
32136+ unsigned long ip = regs->ip;
32137+
32138+ if (v8086_mode(regs))
32139+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32140+
32141+#ifdef CONFIG_PAX_PAGEEXEC
32142+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32143+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32144+ return true;
32145+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32146+ return true;
32147+ return false;
32148+ }
32149+#endif
32150+
32151+#ifdef CONFIG_PAX_SEGMEXEC
32152+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32153+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32154+ return true;
32155+ return false;
32156+ }
32157+#endif
32158+
32159+ return false;
32160+}
32161+#endif
32162+
32163+#ifdef CONFIG_PAX_EMUTRAMP
32164+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32165+{
32166+ int err;
32167+
32168+ do { /* PaX: libffi trampoline emulation */
32169+ unsigned char mov, jmp;
32170+ unsigned int addr1, addr2;
32171+
32172+#ifdef CONFIG_X86_64
32173+ if ((regs->ip + 9) >> 32)
32174+ break;
32175+#endif
32176+
32177+ err = get_user(mov, (unsigned char __user *)regs->ip);
32178+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32179+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32180+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32181+
32182+ if (err)
32183+ break;
32184+
32185+ if (mov == 0xB8 && jmp == 0xE9) {
32186+ regs->ax = addr1;
32187+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32188+ return 2;
32189+ }
32190+ } while (0);
32191+
32192+ do { /* PaX: gcc trampoline emulation #1 */
32193+ unsigned char mov1, mov2;
32194+ unsigned short jmp;
32195+ unsigned int addr1, addr2;
32196+
32197+#ifdef CONFIG_X86_64
32198+ if ((regs->ip + 11) >> 32)
32199+ break;
32200+#endif
32201+
32202+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32203+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32204+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32205+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32206+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32207+
32208+ if (err)
32209+ break;
32210+
32211+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32212+ regs->cx = addr1;
32213+ regs->ax = addr2;
32214+ regs->ip = addr2;
32215+ return 2;
32216+ }
32217+ } while (0);
32218+
32219+ do { /* PaX: gcc trampoline emulation #2 */
32220+ unsigned char mov, jmp;
32221+ unsigned int addr1, addr2;
32222+
32223+#ifdef CONFIG_X86_64
32224+ if ((regs->ip + 9) >> 32)
32225+ break;
32226+#endif
32227+
32228+ err = get_user(mov, (unsigned char __user *)regs->ip);
32229+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32230+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32231+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32232+
32233+ if (err)
32234+ break;
32235+
32236+ if (mov == 0xB9 && jmp == 0xE9) {
32237+ regs->cx = addr1;
32238+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32239+ return 2;
32240+ }
32241+ } while (0);
32242+
32243+ return 1; /* PaX in action */
32244+}
32245+
32246+#ifdef CONFIG_X86_64
32247+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32248+{
32249+ int err;
32250+
32251+ do { /* PaX: libffi trampoline emulation */
32252+ unsigned short mov1, mov2, jmp1;
32253+ unsigned char stcclc, jmp2;
32254+ unsigned long addr1, addr2;
32255+
32256+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32257+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32258+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32259+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32260+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32261+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32262+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32263+
32264+ if (err)
32265+ break;
32266+
32267+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32268+ regs->r11 = addr1;
32269+ regs->r10 = addr2;
32270+ if (stcclc == 0xF8)
32271+ regs->flags &= ~X86_EFLAGS_CF;
32272+ else
32273+ regs->flags |= X86_EFLAGS_CF;
32274+ regs->ip = addr1;
32275+ return 2;
32276+ }
32277+ } while (0);
32278+
32279+ do { /* PaX: gcc trampoline emulation #1 */
32280+ unsigned short mov1, mov2, jmp1;
32281+ unsigned char jmp2;
32282+ unsigned int addr1;
32283+ unsigned long addr2;
32284+
32285+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32286+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32287+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32288+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32289+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32290+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32291+
32292+ if (err)
32293+ break;
32294+
32295+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32296+ regs->r11 = addr1;
32297+ regs->r10 = addr2;
32298+ regs->ip = addr1;
32299+ return 2;
32300+ }
32301+ } while (0);
32302+
32303+ do { /* PaX: gcc trampoline emulation #2 */
32304+ unsigned short mov1, mov2, jmp1;
32305+ unsigned char jmp2;
32306+ unsigned long addr1, addr2;
32307+
32308+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32309+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32310+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32311+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32312+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32313+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32314+
32315+ if (err)
32316+ break;
32317+
32318+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32319+ regs->r11 = addr1;
32320+ regs->r10 = addr2;
32321+ regs->ip = addr1;
32322+ return 2;
32323+ }
32324+ } while (0);
32325+
32326+ return 1; /* PaX in action */
32327+}
32328+#endif
32329+
32330+/*
32331+ * PaX: decide what to do with offenders (regs->ip = fault address)
32332+ *
32333+ * returns 1 when task should be killed
32334+ * 2 when gcc trampoline was detected
32335+ */
32336+static int pax_handle_fetch_fault(struct pt_regs *regs)
32337+{
32338+ if (v8086_mode(regs))
32339+ return 1;
32340+
32341+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32342+ return 1;
32343+
32344+#ifdef CONFIG_X86_32
32345+ return pax_handle_fetch_fault_32(regs);
32346+#else
32347+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32348+ return pax_handle_fetch_fault_32(regs);
32349+ else
32350+ return pax_handle_fetch_fault_64(regs);
32351+#endif
32352+}
32353+#endif
32354+
32355+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32356+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32357+{
32358+ long i;
32359+
32360+ printk(KERN_ERR "PAX: bytes at PC: ");
32361+ for (i = 0; i < 20; i++) {
32362+ unsigned char c;
32363+ if (get_user(c, (unsigned char __force_user *)pc+i))
32364+ printk(KERN_CONT "?? ");
32365+ else
32366+ printk(KERN_CONT "%02x ", c);
32367+ }
32368+ printk("\n");
32369+
32370+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32371+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32372+ unsigned long c;
32373+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32374+#ifdef CONFIG_X86_32
32375+ printk(KERN_CONT "???????? ");
32376+#else
32377+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32378+ printk(KERN_CONT "???????? ???????? ");
32379+ else
32380+ printk(KERN_CONT "???????????????? ");
32381+#endif
32382+ } else {
32383+#ifdef CONFIG_X86_64
32384+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32385+ printk(KERN_CONT "%08x ", (unsigned int)c);
32386+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32387+ } else
32388+#endif
32389+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32390+ }
32391+ }
32392+ printk("\n");
32393+}
32394+#endif
32395+
32396+/**
32397+ * probe_kernel_write(): safely attempt to write to a location
32398+ * @dst: address to write to
32399+ * @src: pointer to the data that shall be written
32400+ * @size: size of the data chunk
32401+ *
32402+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32403+ * happens, handle that and return -EFAULT.
32404+ */
32405+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32406+{
32407+ long ret;
32408+ mm_segment_t old_fs = get_fs();
32409+
32410+ set_fs(KERNEL_DS);
32411+ pagefault_disable();
32412+ pax_open_kernel();
32413+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32414+ pax_close_kernel();
32415+ pagefault_enable();
32416+ set_fs(old_fs);
32417+
32418+ return ret ? -EFAULT : 0;
32419+}
32420diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32421index 224b142..c2c9423 100644
32422--- a/arch/x86/mm/gup.c
32423+++ b/arch/x86/mm/gup.c
32424@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32425 addr = start;
32426 len = (unsigned long) nr_pages << PAGE_SHIFT;
32427 end = start + len;
32428- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32429+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32430 (void __user *)start, len)))
32431 return 0;
32432
32433@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32434 goto slow_irqon;
32435 #endif
32436
32437+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32438+ (void __user *)start, len)))
32439+ return 0;
32440+
32441 /*
32442 * XXX: batch / limit 'nr', to avoid large irq off latency
32443 * needs some instrumenting to determine the common sizes used by
32444diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32445index 4500142..53a363c 100644
32446--- a/arch/x86/mm/highmem_32.c
32447+++ b/arch/x86/mm/highmem_32.c
32448@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32449 idx = type + KM_TYPE_NR*smp_processor_id();
32450 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32451 BUG_ON(!pte_none(*(kmap_pte-idx)));
32452+
32453+ pax_open_kernel();
32454 set_pte(kmap_pte-idx, mk_pte(page, prot));
32455+ pax_close_kernel();
32456+
32457 arch_flush_lazy_mmu_mode();
32458
32459 return (void *)vaddr;
32460diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32461index 006cc91..bf05a83 100644
32462--- a/arch/x86/mm/hugetlbpage.c
32463+++ b/arch/x86/mm/hugetlbpage.c
32464@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
32465 #ifdef CONFIG_HUGETLB_PAGE
32466 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32467 unsigned long addr, unsigned long len,
32468- unsigned long pgoff, unsigned long flags)
32469+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32470 {
32471 struct hstate *h = hstate_file(file);
32472 struct vm_unmapped_area_info info;
32473-
32474+
32475 info.flags = 0;
32476 info.length = len;
32477 info.low_limit = current->mm->mmap_legacy_base;
32478 info.high_limit = TASK_SIZE;
32479 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32480 info.align_offset = 0;
32481+ info.threadstack_offset = offset;
32482 return vm_unmapped_area(&info);
32483 }
32484
32485 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32486 unsigned long addr0, unsigned long len,
32487- unsigned long pgoff, unsigned long flags)
32488+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32489 {
32490 struct hstate *h = hstate_file(file);
32491 struct vm_unmapped_area_info info;
32492@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32493 info.high_limit = current->mm->mmap_base;
32494 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32495 info.align_offset = 0;
32496+ info.threadstack_offset = offset;
32497 addr = vm_unmapped_area(&info);
32498
32499 /*
32500@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32501 VM_BUG_ON(addr != -ENOMEM);
32502 info.flags = 0;
32503 info.low_limit = TASK_UNMAPPED_BASE;
32504+
32505+#ifdef CONFIG_PAX_RANDMMAP
32506+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32507+ info.low_limit += current->mm->delta_mmap;
32508+#endif
32509+
32510 info.high_limit = TASK_SIZE;
32511 addr = vm_unmapped_area(&info);
32512 }
32513@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32514 struct hstate *h = hstate_file(file);
32515 struct mm_struct *mm = current->mm;
32516 struct vm_area_struct *vma;
32517+ unsigned long pax_task_size = TASK_SIZE;
32518+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32519
32520 if (len & ~huge_page_mask(h))
32521 return -EINVAL;
32522- if (len > TASK_SIZE)
32523+
32524+#ifdef CONFIG_PAX_SEGMEXEC
32525+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32526+ pax_task_size = SEGMEXEC_TASK_SIZE;
32527+#endif
32528+
32529+ pax_task_size -= PAGE_SIZE;
32530+
32531+ if (len > pax_task_size)
32532 return -ENOMEM;
32533
32534 if (flags & MAP_FIXED) {
32535@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32536 return addr;
32537 }
32538
32539+#ifdef CONFIG_PAX_RANDMMAP
32540+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32541+#endif
32542+
32543 if (addr) {
32544 addr = ALIGN(addr, huge_page_size(h));
32545 vma = find_vma(mm, addr);
32546- if (TASK_SIZE - len >= addr &&
32547- (!vma || addr + len <= vma->vm_start))
32548+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32549 return addr;
32550 }
32551 if (mm->get_unmapped_area == arch_get_unmapped_area)
32552 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32553- pgoff, flags);
32554+ pgoff, flags, offset);
32555 else
32556 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32557- pgoff, flags);
32558+ pgoff, flags, offset);
32559 }
32560 #endif /* CONFIG_HUGETLB_PAGE */
32561
32562diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32563index 079c3b6..7069023 100644
32564--- a/arch/x86/mm/init.c
32565+++ b/arch/x86/mm/init.c
32566@@ -4,6 +4,7 @@
32567 #include <linux/swap.h>
32568 #include <linux/memblock.h>
32569 #include <linux/bootmem.h> /* for max_low_pfn */
32570+#include <linux/tboot.h>
32571
32572 #include <asm/cacheflush.h>
32573 #include <asm/e820.h>
32574@@ -17,6 +18,8 @@
32575 #include <asm/proto.h>
32576 #include <asm/dma.h> /* for MAX_DMA_PFN */
32577 #include <asm/microcode.h>
32578+#include <asm/desc.h>
32579+#include <asm/bios_ebda.h>
32580
32581 /*
32582 * We need to define the tracepoints somewhere, and tlb.c
32583@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
32584 early_ioremap_page_table_range_init();
32585 #endif
32586
32587+#ifdef CONFIG_PAX_PER_CPU_PGD
32588+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32589+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32590+ KERNEL_PGD_PTRS);
32591+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32592+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32593+ KERNEL_PGD_PTRS);
32594+ load_cr3(get_cpu_pgd(0, kernel));
32595+#else
32596 load_cr3(swapper_pg_dir);
32597+#endif
32598+
32599 __flush_tlb_all();
32600
32601 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32602@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
32603 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32604 * mmio resources as well as potential bios/acpi data regions.
32605 */
32606+
32607+#ifdef CONFIG_GRKERNSEC_KMEM
32608+static unsigned int ebda_start __read_only;
32609+static unsigned int ebda_end __read_only;
32610+#endif
32611+
32612 int devmem_is_allowed(unsigned long pagenr)
32613 {
32614- if (pagenr < 256)
32615+#ifdef CONFIG_GRKERNSEC_KMEM
32616+ /* allow BDA */
32617+ if (!pagenr)
32618 return 1;
32619+ /* allow EBDA */
32620+ if (pagenr >= ebda_start && pagenr < ebda_end)
32621+ return 1;
32622+ /* if tboot is in use, allow access to its hardcoded serial log range */
32623+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32624+ return 1;
32625+#else
32626+ if (!pagenr)
32627+ return 1;
32628+#ifdef CONFIG_VM86
32629+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32630+ return 1;
32631+#endif
32632+#endif
32633+
32634+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32635+ return 1;
32636+#ifdef CONFIG_GRKERNSEC_KMEM
32637+ /* throw out everything else below 1MB */
32638+ if (pagenr <= 256)
32639+ return 0;
32640+#endif
32641 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32642 return 0;
32643 if (!page_is_ram(pagenr))
32644@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32645 #endif
32646 }
32647
32648+#ifdef CONFIG_GRKERNSEC_KMEM
32649+static inline void gr_init_ebda(void)
32650+{
32651+ unsigned int ebda_addr;
32652+ unsigned int ebda_size = 0;
32653+
32654+ ebda_addr = get_bios_ebda();
32655+ if (ebda_addr) {
32656+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32657+ ebda_size <<= 10;
32658+ }
32659+ if (ebda_addr && ebda_size) {
32660+ ebda_start = ebda_addr >> PAGE_SHIFT;
32661+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32662+ } else {
32663+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32664+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32665+ }
32666+}
32667+#else
32668+static inline void gr_init_ebda(void) { }
32669+#endif
32670+
32671 void free_initmem(void)
32672 {
32673+#ifdef CONFIG_PAX_KERNEXEC
32674+#ifdef CONFIG_X86_32
32675+ /* PaX: limit KERNEL_CS to actual size */
32676+ unsigned long addr, limit;
32677+ struct desc_struct d;
32678+ int cpu;
32679+#else
32680+ pgd_t *pgd;
32681+ pud_t *pud;
32682+ pmd_t *pmd;
32683+ unsigned long addr, end;
32684+#endif
32685+#endif
32686+
32687+ gr_init_ebda();
32688+
32689+#ifdef CONFIG_PAX_KERNEXEC
32690+#ifdef CONFIG_X86_32
32691+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32692+ limit = (limit - 1UL) >> PAGE_SHIFT;
32693+
32694+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32695+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32696+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32697+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32698+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32699+ }
32700+
32701+ /* PaX: make KERNEL_CS read-only */
32702+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32703+ if (!paravirt_enabled())
32704+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32705+/*
32706+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32707+ pgd = pgd_offset_k(addr);
32708+ pud = pud_offset(pgd, addr);
32709+ pmd = pmd_offset(pud, addr);
32710+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32711+ }
32712+*/
32713+#ifdef CONFIG_X86_PAE
32714+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32715+/*
32716+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32717+ pgd = pgd_offset_k(addr);
32718+ pud = pud_offset(pgd, addr);
32719+ pmd = pmd_offset(pud, addr);
32720+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32721+ }
32722+*/
32723+#endif
32724+
32725+#ifdef CONFIG_MODULES
32726+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32727+#endif
32728+
32729+#else
32730+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32731+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32732+ pgd = pgd_offset_k(addr);
32733+ pud = pud_offset(pgd, addr);
32734+ pmd = pmd_offset(pud, addr);
32735+ if (!pmd_present(*pmd))
32736+ continue;
32737+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32738+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32739+ else
32740+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32741+ }
32742+
32743+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32744+ end = addr + KERNEL_IMAGE_SIZE;
32745+ for (; addr < end; addr += PMD_SIZE) {
32746+ pgd = pgd_offset_k(addr);
32747+ pud = pud_offset(pgd, addr);
32748+ pmd = pmd_offset(pud, addr);
32749+ if (!pmd_present(*pmd))
32750+ continue;
32751+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32752+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32753+ }
32754+#endif
32755+
32756+ flush_tlb_all();
32757+#endif
32758+
32759 free_init_pages("unused kernel",
32760 (unsigned long)(&__init_begin),
32761 (unsigned long)(&__init_end));
32762diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32763index c8140e1..59257fc 100644
32764--- a/arch/x86/mm/init_32.c
32765+++ b/arch/x86/mm/init_32.c
32766@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32767 bool __read_mostly __vmalloc_start_set = false;
32768
32769 /*
32770- * Creates a middle page table and puts a pointer to it in the
32771- * given global directory entry. This only returns the gd entry
32772- * in non-PAE compilation mode, since the middle layer is folded.
32773- */
32774-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32775-{
32776- pud_t *pud;
32777- pmd_t *pmd_table;
32778-
32779-#ifdef CONFIG_X86_PAE
32780- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32781- pmd_table = (pmd_t *)alloc_low_page();
32782- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32783- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32784- pud = pud_offset(pgd, 0);
32785- BUG_ON(pmd_table != pmd_offset(pud, 0));
32786-
32787- return pmd_table;
32788- }
32789-#endif
32790- pud = pud_offset(pgd, 0);
32791- pmd_table = pmd_offset(pud, 0);
32792-
32793- return pmd_table;
32794-}
32795-
32796-/*
32797 * Create a page table and place a pointer to it in a middle page
32798 * directory entry:
32799 */
32800@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32801 pte_t *page_table = (pte_t *)alloc_low_page();
32802
32803 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32804+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32805+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32806+#else
32807 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32808+#endif
32809 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32810 }
32811
32812 return pte_offset_kernel(pmd, 0);
32813 }
32814
32815+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32816+{
32817+ pud_t *pud;
32818+ pmd_t *pmd_table;
32819+
32820+ pud = pud_offset(pgd, 0);
32821+ pmd_table = pmd_offset(pud, 0);
32822+
32823+ return pmd_table;
32824+}
32825+
32826 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32827 {
32828 int pgd_idx = pgd_index(vaddr);
32829@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32830 int pgd_idx, pmd_idx;
32831 unsigned long vaddr;
32832 pgd_t *pgd;
32833+ pud_t *pud;
32834 pmd_t *pmd;
32835 pte_t *pte = NULL;
32836 unsigned long count = page_table_range_init_count(start, end);
32837@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32838 pgd = pgd_base + pgd_idx;
32839
32840 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32841- pmd = one_md_table_init(pgd);
32842- pmd = pmd + pmd_index(vaddr);
32843+ pud = pud_offset(pgd, vaddr);
32844+ pmd = pmd_offset(pud, vaddr);
32845+
32846+#ifdef CONFIG_X86_PAE
32847+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32848+#endif
32849+
32850 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32851 pmd++, pmd_idx++) {
32852 pte = page_table_kmap_check(one_page_table_init(pmd),
32853@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32854 }
32855 }
32856
32857-static inline int is_kernel_text(unsigned long addr)
32858+static inline int is_kernel_text(unsigned long start, unsigned long end)
32859 {
32860- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32861- return 1;
32862- return 0;
32863+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32864+ end <= ktla_ktva((unsigned long)_stext)) &&
32865+ (start >= ktla_ktva((unsigned long)_einittext) ||
32866+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32867+
32868+#ifdef CONFIG_ACPI_SLEEP
32869+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32870+#endif
32871+
32872+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32873+ return 0;
32874+ return 1;
32875 }
32876
32877 /*
32878@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32879 unsigned long last_map_addr = end;
32880 unsigned long start_pfn, end_pfn;
32881 pgd_t *pgd_base = swapper_pg_dir;
32882- int pgd_idx, pmd_idx, pte_ofs;
32883+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32884 unsigned long pfn;
32885 pgd_t *pgd;
32886+ pud_t *pud;
32887 pmd_t *pmd;
32888 pte_t *pte;
32889 unsigned pages_2m, pages_4k;
32890@@ -291,8 +295,13 @@ repeat:
32891 pfn = start_pfn;
32892 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32893 pgd = pgd_base + pgd_idx;
32894- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32895- pmd = one_md_table_init(pgd);
32896+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32897+ pud = pud_offset(pgd, 0);
32898+ pmd = pmd_offset(pud, 0);
32899+
32900+#ifdef CONFIG_X86_PAE
32901+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32902+#endif
32903
32904 if (pfn >= end_pfn)
32905 continue;
32906@@ -304,14 +313,13 @@ repeat:
32907 #endif
32908 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32909 pmd++, pmd_idx++) {
32910- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32911+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32912
32913 /*
32914 * Map with big pages if possible, otherwise
32915 * create normal page tables:
32916 */
32917 if (use_pse) {
32918- unsigned int addr2;
32919 pgprot_t prot = PAGE_KERNEL_LARGE;
32920 /*
32921 * first pass will use the same initial
32922@@ -322,11 +330,7 @@ repeat:
32923 _PAGE_PSE);
32924
32925 pfn &= PMD_MASK >> PAGE_SHIFT;
32926- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
32927- PAGE_OFFSET + PAGE_SIZE-1;
32928-
32929- if (is_kernel_text(addr) ||
32930- is_kernel_text(addr2))
32931+ if (is_kernel_text(address, address + PMD_SIZE))
32932 prot = PAGE_KERNEL_LARGE_EXEC;
32933
32934 pages_2m++;
32935@@ -343,7 +347,7 @@ repeat:
32936 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32937 pte += pte_ofs;
32938 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
32939- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
32940+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
32941 pgprot_t prot = PAGE_KERNEL;
32942 /*
32943 * first pass will use the same initial
32944@@ -351,7 +355,7 @@ repeat:
32945 */
32946 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
32947
32948- if (is_kernel_text(addr))
32949+ if (is_kernel_text(address, address + PAGE_SIZE))
32950 prot = PAGE_KERNEL_EXEC;
32951
32952 pages_4k++;
32953@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
32954
32955 pud = pud_offset(pgd, va);
32956 pmd = pmd_offset(pud, va);
32957- if (!pmd_present(*pmd))
32958+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
32959 break;
32960
32961 /* should not be large page here */
32962@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
32963
32964 static void __init pagetable_init(void)
32965 {
32966- pgd_t *pgd_base = swapper_pg_dir;
32967-
32968- permanent_kmaps_init(pgd_base);
32969+ permanent_kmaps_init(swapper_pg_dir);
32970 }
32971
32972-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
32973+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
32974 EXPORT_SYMBOL_GPL(__supported_pte_mask);
32975
32976 /* user-defined highmem size */
32977@@ -787,10 +789,10 @@ void __init mem_init(void)
32978 ((unsigned long)&__init_end -
32979 (unsigned long)&__init_begin) >> 10,
32980
32981- (unsigned long)&_etext, (unsigned long)&_edata,
32982- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
32983+ (unsigned long)&_sdata, (unsigned long)&_edata,
32984+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
32985
32986- (unsigned long)&_text, (unsigned long)&_etext,
32987+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
32988 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
32989
32990 /*
32991@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
32992 if (!kernel_set_to_readonly)
32993 return;
32994
32995+ start = ktla_ktva(start);
32996 pr_debug("Set kernel text: %lx - %lx for read write\n",
32997 start, start+size);
32998
32999@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33000 if (!kernel_set_to_readonly)
33001 return;
33002
33003+ start = ktla_ktva(start);
33004 pr_debug("Set kernel text: %lx - %lx for read only\n",
33005 start, start+size);
33006
33007@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33008 unsigned long start = PFN_ALIGN(_text);
33009 unsigned long size = PFN_ALIGN(_etext) - start;
33010
33011+ start = ktla_ktva(start);
33012 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33013 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33014 size >> 10);
33015diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33016index 30eb05a..ae671ac 100644
33017--- a/arch/x86/mm/init_64.c
33018+++ b/arch/x86/mm/init_64.c
33019@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33020 * around without checking the pgd every time.
33021 */
33022
33023-pteval_t __supported_pte_mask __read_mostly = ~0;
33024+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33025 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33026
33027 int force_personality32;
33028@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33029
33030 for (address = start; address <= end; address += PGDIR_SIZE) {
33031 const pgd_t *pgd_ref = pgd_offset_k(address);
33032+
33033+#ifdef CONFIG_PAX_PER_CPU_PGD
33034+ unsigned long cpu;
33035+#else
33036 struct page *page;
33037+#endif
33038
33039 /*
33040 * When it is called after memory hot remove, pgd_none()
33041@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33042 continue;
33043
33044 spin_lock(&pgd_lock);
33045+
33046+#ifdef CONFIG_PAX_PER_CPU_PGD
33047+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33048+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33049+
33050+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33051+ BUG_ON(pgd_page_vaddr(*pgd)
33052+ != pgd_page_vaddr(*pgd_ref));
33053+
33054+ if (removed) {
33055+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33056+ pgd_clear(pgd);
33057+ } else {
33058+ if (pgd_none(*pgd))
33059+ set_pgd(pgd, *pgd_ref);
33060+ }
33061+
33062+ pgd = pgd_offset_cpu(cpu, kernel, address);
33063+#else
33064 list_for_each_entry(page, &pgd_list, lru) {
33065 pgd_t *pgd;
33066 spinlock_t *pgt_lock;
33067@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33068 /* the pgt_lock only for Xen */
33069 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33070 spin_lock(pgt_lock);
33071+#endif
33072
33073 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33074 BUG_ON(pgd_page_vaddr(*pgd)
33075@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33076 set_pgd(pgd, *pgd_ref);
33077 }
33078
33079+#ifndef CONFIG_PAX_PER_CPU_PGD
33080 spin_unlock(pgt_lock);
33081+#endif
33082+
33083 }
33084 spin_unlock(&pgd_lock);
33085 }
33086@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33087 {
33088 if (pgd_none(*pgd)) {
33089 pud_t *pud = (pud_t *)spp_getpage();
33090- pgd_populate(&init_mm, pgd, pud);
33091+ pgd_populate_kernel(&init_mm, pgd, pud);
33092 if (pud != pud_offset(pgd, 0))
33093 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33094 pud, pud_offset(pgd, 0));
33095@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33096 {
33097 if (pud_none(*pud)) {
33098 pmd_t *pmd = (pmd_t *) spp_getpage();
33099- pud_populate(&init_mm, pud, pmd);
33100+ pud_populate_kernel(&init_mm, pud, pmd);
33101 if (pmd != pmd_offset(pud, 0))
33102 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33103 pmd, pmd_offset(pud, 0));
33104@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33105 pmd = fill_pmd(pud, vaddr);
33106 pte = fill_pte(pmd, vaddr);
33107
33108+ pax_open_kernel();
33109 set_pte(pte, new_pte);
33110+ pax_close_kernel();
33111
33112 /*
33113 * It's enough to flush this one mapping.
33114@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33115 pgd = pgd_offset_k((unsigned long)__va(phys));
33116 if (pgd_none(*pgd)) {
33117 pud = (pud_t *) spp_getpage();
33118- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33119- _PAGE_USER));
33120+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33121 }
33122 pud = pud_offset(pgd, (unsigned long)__va(phys));
33123 if (pud_none(*pud)) {
33124 pmd = (pmd_t *) spp_getpage();
33125- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33126- _PAGE_USER));
33127+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33128 }
33129 pmd = pmd_offset(pud, phys);
33130 BUG_ON(!pmd_none(*pmd));
33131@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33132 prot);
33133
33134 spin_lock(&init_mm.page_table_lock);
33135- pud_populate(&init_mm, pud, pmd);
33136+ pud_populate_kernel(&init_mm, pud, pmd);
33137 spin_unlock(&init_mm.page_table_lock);
33138 }
33139 __flush_tlb_all();
33140@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33141 page_size_mask);
33142
33143 spin_lock(&init_mm.page_table_lock);
33144- pgd_populate(&init_mm, pgd, pud);
33145+ pgd_populate_kernel(&init_mm, pgd, pud);
33146 spin_unlock(&init_mm.page_table_lock);
33147 pgd_changed = true;
33148 }
33149diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33150index 9ca35fc..4b2b7b7 100644
33151--- a/arch/x86/mm/iomap_32.c
33152+++ b/arch/x86/mm/iomap_32.c
33153@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33154 type = kmap_atomic_idx_push();
33155 idx = type + KM_TYPE_NR * smp_processor_id();
33156 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33157+
33158+ pax_open_kernel();
33159 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33160+ pax_close_kernel();
33161+
33162 arch_flush_lazy_mmu_mode();
33163
33164 return (void *)vaddr;
33165diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33166index fdf617c..b9e85bc 100644
33167--- a/arch/x86/mm/ioremap.c
33168+++ b/arch/x86/mm/ioremap.c
33169@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33170 unsigned long i;
33171
33172 for (i = 0; i < nr_pages; ++i)
33173- if (pfn_valid(start_pfn + i) &&
33174- !PageReserved(pfn_to_page(start_pfn + i)))
33175+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33176+ !PageReserved(pfn_to_page(start_pfn + i))))
33177 return 1;
33178
33179 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33180@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33181 *
33182 * Caller must ensure there is only one unmapping for the same pointer.
33183 */
33184-void iounmap(volatile void __iomem *addr)
33185+void iounmap(const volatile void __iomem *addr)
33186 {
33187 struct vm_struct *p, *o;
33188
33189@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33190 */
33191 void *xlate_dev_mem_ptr(phys_addr_t phys)
33192 {
33193- void *addr;
33194- unsigned long start = phys & PAGE_MASK;
33195-
33196 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33197- if (page_is_ram(start >> PAGE_SHIFT))
33198+ if (page_is_ram(phys >> PAGE_SHIFT))
33199+#ifdef CONFIG_HIGHMEM
33200+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33201+#endif
33202 return __va(phys);
33203
33204- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33205- if (addr)
33206- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33207-
33208- return addr;
33209+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33210 }
33211
33212 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33213 {
33214 if (page_is_ram(phys >> PAGE_SHIFT))
33215+#ifdef CONFIG_HIGHMEM
33216+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33217+#endif
33218 return;
33219
33220 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33221 return;
33222 }
33223
33224-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33225+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33226
33227 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33228 {
33229@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33230 early_ioremap_setup();
33231
33232 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33233- memset(bm_pte, 0, sizeof(bm_pte));
33234- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33235+ pmd_populate_user(&init_mm, pmd, bm_pte);
33236
33237 /*
33238 * The boot-ioremap range spans multiple pmds, for which
33239diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33240index b4f2e7e..96c9c3e 100644
33241--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33242+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33243@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33244 * memory (e.g. tracked pages)? For now, we need this to avoid
33245 * invoking kmemcheck for PnP BIOS calls.
33246 */
33247- if (regs->flags & X86_VM_MASK)
33248+ if (v8086_mode(regs))
33249 return false;
33250- if (regs->cs != __KERNEL_CS)
33251+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33252 return false;
33253
33254 pte = kmemcheck_pte_lookup(address);
33255diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33256index df4552b..12c129c 100644
33257--- a/arch/x86/mm/mmap.c
33258+++ b/arch/x86/mm/mmap.c
33259@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33260 * Leave an at least ~128 MB hole with possible stack randomization.
33261 */
33262 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33263-#define MAX_GAP (TASK_SIZE/6*5)
33264+#define MAX_GAP (pax_task_size/6*5)
33265
33266 static int mmap_is_legacy(void)
33267 {
33268@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33269 return rnd << PAGE_SHIFT;
33270 }
33271
33272-static unsigned long mmap_base(void)
33273+static unsigned long mmap_base(struct mm_struct *mm)
33274 {
33275 unsigned long gap = rlimit(RLIMIT_STACK);
33276+ unsigned long pax_task_size = TASK_SIZE;
33277+
33278+#ifdef CONFIG_PAX_SEGMEXEC
33279+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33280+ pax_task_size = SEGMEXEC_TASK_SIZE;
33281+#endif
33282
33283 if (gap < MIN_GAP)
33284 gap = MIN_GAP;
33285 else if (gap > MAX_GAP)
33286 gap = MAX_GAP;
33287
33288- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33289+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33290 }
33291
33292 /*
33293 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33294 * does, but not when emulating X86_32
33295 */
33296-static unsigned long mmap_legacy_base(void)
33297+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33298 {
33299- if (mmap_is_ia32())
33300+ if (mmap_is_ia32()) {
33301+
33302+#ifdef CONFIG_PAX_SEGMEXEC
33303+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33304+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33305+ else
33306+#endif
33307+
33308 return TASK_UNMAPPED_BASE;
33309- else
33310+ } else
33311 return TASK_UNMAPPED_BASE + mmap_rnd();
33312 }
33313
33314@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33315 */
33316 void arch_pick_mmap_layout(struct mm_struct *mm)
33317 {
33318- mm->mmap_legacy_base = mmap_legacy_base();
33319- mm->mmap_base = mmap_base();
33320+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33321+ mm->mmap_base = mmap_base(mm);
33322+
33323+#ifdef CONFIG_PAX_RANDMMAP
33324+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33325+ mm->mmap_legacy_base += mm->delta_mmap;
33326+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33327+ }
33328+#endif
33329
33330 if (mmap_is_legacy()) {
33331 mm->mmap_base = mm->mmap_legacy_base;
33332diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33333index 0057a7a..95c7edd 100644
33334--- a/arch/x86/mm/mmio-mod.c
33335+++ b/arch/x86/mm/mmio-mod.c
33336@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33337 break;
33338 default:
33339 {
33340- unsigned char *ip = (unsigned char *)instptr;
33341+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33342 my_trace->opcode = MMIO_UNKNOWN_OP;
33343 my_trace->width = 0;
33344 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33345@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33346 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33347 void __iomem *addr)
33348 {
33349- static atomic_t next_id;
33350+ static atomic_unchecked_t next_id;
33351 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33352 /* These are page-unaligned. */
33353 struct mmiotrace_map map = {
33354@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33355 .private = trace
33356 },
33357 .phys = offset,
33358- .id = atomic_inc_return(&next_id)
33359+ .id = atomic_inc_return_unchecked(&next_id)
33360 };
33361 map.map_id = trace->id;
33362
33363@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33364 ioremap_trace_core(offset, size, addr);
33365 }
33366
33367-static void iounmap_trace_core(volatile void __iomem *addr)
33368+static void iounmap_trace_core(const volatile void __iomem *addr)
33369 {
33370 struct mmiotrace_map map = {
33371 .phys = 0,
33372@@ -328,7 +328,7 @@ not_enabled:
33373 }
33374 }
33375
33376-void mmiotrace_iounmap(volatile void __iomem *addr)
33377+void mmiotrace_iounmap(const volatile void __iomem *addr)
33378 {
33379 might_sleep();
33380 if (is_enabled()) /* recheck and proper locking in *_core() */
33381diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33382index 1a88370..3f598b5 100644
33383--- a/arch/x86/mm/numa.c
33384+++ b/arch/x86/mm/numa.c
33385@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33386 }
33387 }
33388
33389-static int __init numa_register_memblks(struct numa_meminfo *mi)
33390+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33391 {
33392 unsigned long uninitialized_var(pfn_align);
33393 int i, nid;
33394diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33395index 536ea2f..f42c293 100644
33396--- a/arch/x86/mm/pageattr.c
33397+++ b/arch/x86/mm/pageattr.c
33398@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33399 */
33400 #ifdef CONFIG_PCI_BIOS
33401 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33402- pgprot_val(forbidden) |= _PAGE_NX;
33403+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33404 #endif
33405
33406 /*
33407@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33408 * Does not cover __inittext since that is gone later on. On
33409 * 64bit we do not enforce !NX on the low mapping
33410 */
33411- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33412- pgprot_val(forbidden) |= _PAGE_NX;
33413+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33414+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33415
33416+#ifdef CONFIG_DEBUG_RODATA
33417 /*
33418 * The .rodata section needs to be read-only. Using the pfn
33419 * catches all aliases.
33420@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33421 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33422 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33423 pgprot_val(forbidden) |= _PAGE_RW;
33424+#endif
33425
33426 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33427 /*
33428@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33429 }
33430 #endif
33431
33432+#ifdef CONFIG_PAX_KERNEXEC
33433+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33434+ pgprot_val(forbidden) |= _PAGE_RW;
33435+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33436+ }
33437+#endif
33438+
33439 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33440
33441 return prot;
33442@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33443 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33444 {
33445 /* change init_mm */
33446+ pax_open_kernel();
33447 set_pte_atomic(kpte, pte);
33448+
33449 #ifdef CONFIG_X86_32
33450 if (!SHARED_KERNEL_PMD) {
33451+
33452+#ifdef CONFIG_PAX_PER_CPU_PGD
33453+ unsigned long cpu;
33454+#else
33455 struct page *page;
33456+#endif
33457
33458+#ifdef CONFIG_PAX_PER_CPU_PGD
33459+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33460+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33461+#else
33462 list_for_each_entry(page, &pgd_list, lru) {
33463- pgd_t *pgd;
33464+ pgd_t *pgd = (pgd_t *)page_address(page);
33465+#endif
33466+
33467 pud_t *pud;
33468 pmd_t *pmd;
33469
33470- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33471+ pgd += pgd_index(address);
33472 pud = pud_offset(pgd, address);
33473 pmd = pmd_offset(pud, address);
33474 set_pte_atomic((pte_t *)pmd, pte);
33475 }
33476 }
33477 #endif
33478+ pax_close_kernel();
33479 }
33480
33481 static int
33482diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33483index 7ac6869..c0ba541 100644
33484--- a/arch/x86/mm/pat.c
33485+++ b/arch/x86/mm/pat.c
33486@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33487 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33488
33489 if (pg_flags == _PGMT_DEFAULT)
33490- return -1;
33491+ return _PAGE_CACHE_MODE_NUM;
33492 else if (pg_flags == _PGMT_WC)
33493 return _PAGE_CACHE_MODE_WC;
33494 else if (pg_flags == _PGMT_UC_MINUS)
33495@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33496
33497 page = pfn_to_page(pfn);
33498 type = get_page_memtype(page);
33499- if (type != -1) {
33500+ if (type != _PAGE_CACHE_MODE_NUM) {
33501 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33502 start, end - 1, type, req_type);
33503 if (new_type)
33504@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33505
33506 if (!entry) {
33507 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33508- current->comm, current->pid, start, end - 1);
33509+ current->comm, task_pid_nr(current), start, end - 1);
33510 return -EINVAL;
33511 }
33512
33513@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33514 page = pfn_to_page(paddr >> PAGE_SHIFT);
33515 rettype = get_page_memtype(page);
33516 /*
33517- * -1 from get_page_memtype() implies RAM page is in its
33518+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33519 * default state and not reserved, and hence of type WB
33520 */
33521- if (rettype == -1)
33522+ if (rettype == _PAGE_CACHE_MODE_NUM)
33523 rettype = _PAGE_CACHE_MODE_WB;
33524
33525 return rettype;
33526@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33527
33528 while (cursor < to) {
33529 if (!devmem_is_allowed(pfn)) {
33530- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33531- current->comm, from, to - 1);
33532+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33533+ current->comm, from, to - 1, cursor);
33534 return 0;
33535 }
33536 cursor += PAGE_SIZE;
33537@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33538 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33539 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33540 "for [mem %#010Lx-%#010Lx]\n",
33541- current->comm, current->pid,
33542+ current->comm, task_pid_nr(current),
33543 cattr_name(pcm),
33544 base, (unsigned long long)(base + size-1));
33545 return -EINVAL;
33546@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33547 pcm = lookup_memtype(paddr);
33548 if (want_pcm != pcm) {
33549 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33550- current->comm, current->pid,
33551+ current->comm, task_pid_nr(current),
33552 cattr_name(want_pcm),
33553 (unsigned long long)paddr,
33554 (unsigned long long)(paddr + size - 1),
33555@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33556 free_memtype(paddr, paddr + size);
33557 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33558 " for [mem %#010Lx-%#010Lx], got %s\n",
33559- current->comm, current->pid,
33560+ current->comm, task_pid_nr(current),
33561 cattr_name(want_pcm),
33562 (unsigned long long)paddr,
33563 (unsigned long long)(paddr + size - 1),
33564diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33565index 6582adc..fcc5d0b 100644
33566--- a/arch/x86/mm/pat_rbtree.c
33567+++ b/arch/x86/mm/pat_rbtree.c
33568@@ -161,7 +161,7 @@ success:
33569
33570 failure:
33571 printk(KERN_INFO "%s:%d conflicting memory types "
33572- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33573+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33574 end, cattr_name(found_type), cattr_name(match->type));
33575 return -EBUSY;
33576 }
33577diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33578index 9f0614d..92ae64a 100644
33579--- a/arch/x86/mm/pf_in.c
33580+++ b/arch/x86/mm/pf_in.c
33581@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33582 int i;
33583 enum reason_type rv = OTHERS;
33584
33585- p = (unsigned char *)ins_addr;
33586+ p = (unsigned char *)ktla_ktva(ins_addr);
33587 p += skip_prefix(p, &prf);
33588 p += get_opcode(p, &opcode);
33589
33590@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33591 struct prefix_bits prf;
33592 int i;
33593
33594- p = (unsigned char *)ins_addr;
33595+ p = (unsigned char *)ktla_ktva(ins_addr);
33596 p += skip_prefix(p, &prf);
33597 p += get_opcode(p, &opcode);
33598
33599@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33600 struct prefix_bits prf;
33601 int i;
33602
33603- p = (unsigned char *)ins_addr;
33604+ p = (unsigned char *)ktla_ktva(ins_addr);
33605 p += skip_prefix(p, &prf);
33606 p += get_opcode(p, &opcode);
33607
33608@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33609 struct prefix_bits prf;
33610 int i;
33611
33612- p = (unsigned char *)ins_addr;
33613+ p = (unsigned char *)ktla_ktva(ins_addr);
33614 p += skip_prefix(p, &prf);
33615 p += get_opcode(p, &opcode);
33616 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33617@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33618 struct prefix_bits prf;
33619 int i;
33620
33621- p = (unsigned char *)ins_addr;
33622+ p = (unsigned char *)ktla_ktva(ins_addr);
33623 p += skip_prefix(p, &prf);
33624 p += get_opcode(p, &opcode);
33625 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33626diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33627index 6fb6927..4fc13c0 100644
33628--- a/arch/x86/mm/pgtable.c
33629+++ b/arch/x86/mm/pgtable.c
33630@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33631 list_del(&page->lru);
33632 }
33633
33634-#define UNSHARED_PTRS_PER_PGD \
33635- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33636+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33637+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33638
33639+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33640+{
33641+ unsigned int count = USER_PGD_PTRS;
33642
33643+ if (!pax_user_shadow_base)
33644+ return;
33645+
33646+ while (count--)
33647+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33648+}
33649+#endif
33650+
33651+#ifdef CONFIG_PAX_PER_CPU_PGD
33652+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33653+{
33654+ unsigned int count = USER_PGD_PTRS;
33655+
33656+ while (count--) {
33657+ pgd_t pgd;
33658+
33659+#ifdef CONFIG_X86_64
33660+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33661+#else
33662+ pgd = *src++;
33663+#endif
33664+
33665+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33666+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33667+#endif
33668+
33669+ *dst++ = pgd;
33670+ }
33671+
33672+}
33673+#endif
33674+
33675+#ifdef CONFIG_X86_64
33676+#define pxd_t pud_t
33677+#define pyd_t pgd_t
33678+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33679+#define pgtable_pxd_page_ctor(page) true
33680+#define pgtable_pxd_page_dtor(page)
33681+#define pxd_free(mm, pud) pud_free((mm), (pud))
33682+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33683+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33684+#define PYD_SIZE PGDIR_SIZE
33685+#else
33686+#define pxd_t pmd_t
33687+#define pyd_t pud_t
33688+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33689+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33690+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33691+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33692+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33693+#define pyd_offset(mm, address) pud_offset((mm), (address))
33694+#define PYD_SIZE PUD_SIZE
33695+#endif
33696+
33697+#ifdef CONFIG_PAX_PER_CPU_PGD
33698+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33699+static inline void pgd_dtor(pgd_t *pgd) {}
33700+#else
33701 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33702 {
33703 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33704@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33705 pgd_list_del(pgd);
33706 spin_unlock(&pgd_lock);
33707 }
33708+#endif
33709
33710 /*
33711 * List of all pgd's needed for non-PAE so it can invalidate entries
33712@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33713 * -- nyc
33714 */
33715
33716-#ifdef CONFIG_X86_PAE
33717+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33718 /*
33719 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33720 * updating the top-level pagetable entries to guarantee the
33721@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33722 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33723 * and initialize the kernel pmds here.
33724 */
33725-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33726+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33727
33728 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33729 {
33730@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33731 */
33732 flush_tlb_mm(mm);
33733 }
33734+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33735+#define PREALLOCATED_PXDS USER_PGD_PTRS
33736 #else /* !CONFIG_X86_PAE */
33737
33738 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33739-#define PREALLOCATED_PMDS 0
33740+#define PREALLOCATED_PXDS 0
33741
33742 #endif /* CONFIG_X86_PAE */
33743
33744-static void free_pmds(pmd_t *pmds[])
33745+static void free_pxds(pxd_t *pxds[])
33746 {
33747 int i;
33748
33749- for(i = 0; i < PREALLOCATED_PMDS; i++)
33750- if (pmds[i]) {
33751- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33752- free_page((unsigned long)pmds[i]);
33753+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33754+ if (pxds[i]) {
33755+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33756+ free_page((unsigned long)pxds[i]);
33757 }
33758 }
33759
33760-static int preallocate_pmds(pmd_t *pmds[])
33761+static int preallocate_pxds(pxd_t *pxds[])
33762 {
33763 int i;
33764 bool failed = false;
33765
33766- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33767- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33768- if (!pmd)
33769+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33770+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33771+ if (!pxd)
33772 failed = true;
33773- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33774- free_page((unsigned long)pmd);
33775- pmd = NULL;
33776+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33777+ free_page((unsigned long)pxd);
33778+ pxd = NULL;
33779 failed = true;
33780 }
33781- pmds[i] = pmd;
33782+ pxds[i] = pxd;
33783 }
33784
33785 if (failed) {
33786- free_pmds(pmds);
33787+ free_pxds(pxds);
33788 return -ENOMEM;
33789 }
33790
33791@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33792 * preallocate which never got a corresponding vma will need to be
33793 * freed manually.
33794 */
33795-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33796+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33797 {
33798 int i;
33799
33800- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33801+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33802 pgd_t pgd = pgdp[i];
33803
33804 if (pgd_val(pgd) != 0) {
33805- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33806+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33807
33808- pgdp[i] = native_make_pgd(0);
33809+ set_pgd(pgdp + i, native_make_pgd(0));
33810
33811- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33812- pmd_free(mm, pmd);
33813+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33814+ pxd_free(mm, pxd);
33815 }
33816 }
33817 }
33818
33819-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33820+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33821 {
33822- pud_t *pud;
33823+ pyd_t *pyd;
33824 int i;
33825
33826- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33827+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33828 return;
33829
33830- pud = pud_offset(pgd, 0);
33831-
33832- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33833- pmd_t *pmd = pmds[i];
33834+#ifdef CONFIG_X86_64
33835+ pyd = pyd_offset(mm, 0L);
33836+#else
33837+ pyd = pyd_offset(pgd, 0L);
33838+#endif
33839
33840+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33841+ pxd_t *pxd = pxds[i];
33842 if (i >= KERNEL_PGD_BOUNDARY)
33843- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33844- sizeof(pmd_t) * PTRS_PER_PMD);
33845+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33846+ sizeof(pxd_t) * PTRS_PER_PMD);
33847
33848- pud_populate(mm, pud, pmd);
33849+ pyd_populate(mm, pyd, pxd);
33850 }
33851 }
33852
33853 pgd_t *pgd_alloc(struct mm_struct *mm)
33854 {
33855 pgd_t *pgd;
33856- pmd_t *pmds[PREALLOCATED_PMDS];
33857+ pxd_t *pxds[PREALLOCATED_PXDS];
33858
33859 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33860
33861@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33862
33863 mm->pgd = pgd;
33864
33865- if (preallocate_pmds(pmds) != 0)
33866+ if (preallocate_pxds(pxds) != 0)
33867 goto out_free_pgd;
33868
33869 if (paravirt_pgd_alloc(mm) != 0)
33870- goto out_free_pmds;
33871+ goto out_free_pxds;
33872
33873 /*
33874 * Make sure that pre-populating the pmds is atomic with
33875@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33876 spin_lock(&pgd_lock);
33877
33878 pgd_ctor(mm, pgd);
33879- pgd_prepopulate_pmd(mm, pgd, pmds);
33880+ pgd_prepopulate_pxd(mm, pgd, pxds);
33881
33882 spin_unlock(&pgd_lock);
33883
33884 return pgd;
33885
33886-out_free_pmds:
33887- free_pmds(pmds);
33888+out_free_pxds:
33889+ free_pxds(pxds);
33890 out_free_pgd:
33891 free_page((unsigned long)pgd);
33892 out:
33893@@ -313,7 +380,7 @@ out:
33894
33895 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33896 {
33897- pgd_mop_up_pmds(mm, pgd);
33898+ pgd_mop_up_pxds(mm, pgd);
33899 pgd_dtor(pgd);
33900 paravirt_pgd_free(mm, pgd);
33901 free_page((unsigned long)pgd);
33902diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
33903index 75cc097..79a097f 100644
33904--- a/arch/x86/mm/pgtable_32.c
33905+++ b/arch/x86/mm/pgtable_32.c
33906@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
33907 return;
33908 }
33909 pte = pte_offset_kernel(pmd, vaddr);
33910+
33911+ pax_open_kernel();
33912 if (pte_val(pteval))
33913 set_pte_at(&init_mm, vaddr, pte, pteval);
33914 else
33915 pte_clear(&init_mm, vaddr, pte);
33916+ pax_close_kernel();
33917
33918 /*
33919 * It's enough to flush this one mapping.
33920diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
33921index e666cbb..61788c45 100644
33922--- a/arch/x86/mm/physaddr.c
33923+++ b/arch/x86/mm/physaddr.c
33924@@ -10,7 +10,7 @@
33925 #ifdef CONFIG_X86_64
33926
33927 #ifdef CONFIG_DEBUG_VIRTUAL
33928-unsigned long __phys_addr(unsigned long x)
33929+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
33930 {
33931 unsigned long y = x - __START_KERNEL_map;
33932
33933@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
33934 #else
33935
33936 #ifdef CONFIG_DEBUG_VIRTUAL
33937-unsigned long __phys_addr(unsigned long x)
33938+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
33939 {
33940 unsigned long phys_addr = x - PAGE_OFFSET;
33941 /* VMALLOC_* aren't constants */
33942diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
33943index 90555bf..f5f1828 100644
33944--- a/arch/x86/mm/setup_nx.c
33945+++ b/arch/x86/mm/setup_nx.c
33946@@ -5,8 +5,10 @@
33947 #include <asm/pgtable.h>
33948 #include <asm/proto.h>
33949
33950+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
33951 static int disable_nx;
33952
33953+#ifndef CONFIG_PAX_PAGEEXEC
33954 /*
33955 * noexec = on|off
33956 *
33957@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
33958 return 0;
33959 }
33960 early_param("noexec", noexec_setup);
33961+#endif
33962+
33963+#endif
33964
33965 void x86_configure_nx(void)
33966 {
33967+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
33968 if (cpu_has_nx && !disable_nx)
33969 __supported_pte_mask |= _PAGE_NX;
33970 else
33971+#endif
33972 __supported_pte_mask &= ~_PAGE_NX;
33973 }
33974
33975diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
33976index ee61c36..e6fedeb 100644
33977--- a/arch/x86/mm/tlb.c
33978+++ b/arch/x86/mm/tlb.c
33979@@ -48,7 +48,11 @@ void leave_mm(int cpu)
33980 BUG();
33981 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
33982 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
33983+
33984+#ifndef CONFIG_PAX_PER_CPU_PGD
33985 load_cr3(swapper_pg_dir);
33986+#endif
33987+
33988 /*
33989 * This gets called in the idle path where RCU
33990 * functions differently. Tracing normally
33991diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
33992new file mode 100644
33993index 0000000..dace51c
33994--- /dev/null
33995+++ b/arch/x86/mm/uderef_64.c
33996@@ -0,0 +1,37 @@
33997+#include <linux/mm.h>
33998+#include <asm/pgtable.h>
33999+#include <asm/uaccess.h>
34000+
34001+#ifdef CONFIG_PAX_MEMORY_UDEREF
34002+/* PaX: due to the special call convention these functions must
34003+ * - remain leaf functions under all configurations,
34004+ * - never be called directly, only dereferenced from the wrappers.
34005+ */
34006+void __pax_open_userland(void)
34007+{
34008+ unsigned int cpu;
34009+
34010+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34011+ return;
34012+
34013+ cpu = raw_get_cpu();
34014+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34015+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34016+ raw_put_cpu_no_resched();
34017+}
34018+EXPORT_SYMBOL(__pax_open_userland);
34019+
34020+void __pax_close_userland(void)
34021+{
34022+ unsigned int cpu;
34023+
34024+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34025+ return;
34026+
34027+ cpu = raw_get_cpu();
34028+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34029+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34030+ raw_put_cpu_no_resched();
34031+}
34032+EXPORT_SYMBOL(__pax_close_userland);
34033+#endif
34034diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34035index 6440221..f84b5c7 100644
34036--- a/arch/x86/net/bpf_jit.S
34037+++ b/arch/x86/net/bpf_jit.S
34038@@ -9,6 +9,7 @@
34039 */
34040 #include <linux/linkage.h>
34041 #include <asm/dwarf2.h>
34042+#include <asm/alternative-asm.h>
34043
34044 /*
34045 * Calling convention :
34046@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34047 jle bpf_slow_path_word
34048 mov (SKBDATA,%rsi),%eax
34049 bswap %eax /* ntohl() */
34050+ pax_force_retaddr
34051 ret
34052
34053 sk_load_half:
34054@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34055 jle bpf_slow_path_half
34056 movzwl (SKBDATA,%rsi),%eax
34057 rol $8,%ax # ntohs()
34058+ pax_force_retaddr
34059 ret
34060
34061 sk_load_byte:
34062@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34063 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34064 jle bpf_slow_path_byte
34065 movzbl (SKBDATA,%rsi),%eax
34066+ pax_force_retaddr
34067 ret
34068
34069 /* rsi contains offset and can be scratched */
34070@@ -90,6 +94,7 @@ bpf_slow_path_word:
34071 js bpf_error
34072 mov - MAX_BPF_STACK + 32(%rbp),%eax
34073 bswap %eax
34074+ pax_force_retaddr
34075 ret
34076
34077 bpf_slow_path_half:
34078@@ -98,12 +103,14 @@ bpf_slow_path_half:
34079 mov - MAX_BPF_STACK + 32(%rbp),%ax
34080 rol $8,%ax
34081 movzwl %ax,%eax
34082+ pax_force_retaddr
34083 ret
34084
34085 bpf_slow_path_byte:
34086 bpf_slow_path_common(1)
34087 js bpf_error
34088 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34089+ pax_force_retaddr
34090 ret
34091
34092 #define sk_negative_common(SIZE) \
34093@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34094 sk_negative_common(4)
34095 mov (%rax), %eax
34096 bswap %eax
34097+ pax_force_retaddr
34098 ret
34099
34100 bpf_slow_path_half_neg:
34101@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34102 mov (%rax),%ax
34103 rol $8,%ax
34104 movzwl %ax,%eax
34105+ pax_force_retaddr
34106 ret
34107
34108 bpf_slow_path_byte_neg:
34109@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34110 .globl sk_load_byte_negative_offset
34111 sk_negative_common(1)
34112 movzbl (%rax), %eax
34113+ pax_force_retaddr
34114 ret
34115
34116 bpf_error:
34117@@ -156,4 +166,5 @@ bpf_error:
34118 mov - MAX_BPF_STACK + 16(%rbp),%r14
34119 mov - MAX_BPF_STACK + 24(%rbp),%r15
34120 leaveq
34121+ pax_force_retaddr
34122 ret
34123diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34124index 9875143..00f6656 100644
34125--- a/arch/x86/net/bpf_jit_comp.c
34126+++ b/arch/x86/net/bpf_jit_comp.c
34127@@ -13,7 +13,11 @@
34128 #include <linux/if_vlan.h>
34129 #include <asm/cacheflush.h>
34130
34131+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34132+int bpf_jit_enable __read_only;
34133+#else
34134 int bpf_jit_enable __read_mostly;
34135+#endif
34136
34137 /*
34138 * assembly code in arch/x86/net/bpf_jit.S
34139@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34140 static void jit_fill_hole(void *area, unsigned int size)
34141 {
34142 /* fill whole space with int3 instructions */
34143+ pax_open_kernel();
34144 memset(area, 0xcc, size);
34145+ pax_close_kernel();
34146 }
34147
34148 struct jit_context {
34149@@ -896,7 +902,9 @@ common_load:
34150 pr_err("bpf_jit_compile fatal error\n");
34151 return -EFAULT;
34152 }
34153+ pax_open_kernel();
34154 memcpy(image + proglen, temp, ilen);
34155+ pax_close_kernel();
34156 }
34157 proglen += ilen;
34158 addrs[i] = proglen;
34159@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34160
34161 if (image) {
34162 bpf_flush_icache(header, image + proglen);
34163- set_memory_ro((unsigned long)header, header->pages);
34164 prog->bpf_func = (void *)image;
34165 prog->jited = true;
34166 }
34167@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34168 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34169 struct bpf_binary_header *header = (void *)addr;
34170
34171- if (!fp->jited)
34172- goto free_filter;
34173+ if (fp->jited)
34174+ bpf_jit_binary_free(header);
34175
34176- set_memory_rw(addr, header->pages);
34177- bpf_jit_binary_free(header);
34178-
34179-free_filter:
34180 bpf_prog_unlock_free(fp);
34181 }
34182diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34183index 5d04be5..2beeaa2 100644
34184--- a/arch/x86/oprofile/backtrace.c
34185+++ b/arch/x86/oprofile/backtrace.c
34186@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34187 struct stack_frame_ia32 *fp;
34188 unsigned long bytes;
34189
34190- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34191+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34192 if (bytes != 0)
34193 return NULL;
34194
34195- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34196+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34197
34198 oprofile_add_trace(bufhead[0].return_address);
34199
34200@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34201 struct stack_frame bufhead[2];
34202 unsigned long bytes;
34203
34204- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34205+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34206 if (bytes != 0)
34207 return NULL;
34208
34209@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34210 {
34211 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34212
34213- if (!user_mode_vm(regs)) {
34214+ if (!user_mode(regs)) {
34215 unsigned long stack = kernel_stack_pointer(regs);
34216 if (depth)
34217 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34218diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34219index 1d2e639..f6ef82a 100644
34220--- a/arch/x86/oprofile/nmi_int.c
34221+++ b/arch/x86/oprofile/nmi_int.c
34222@@ -23,6 +23,7 @@
34223 #include <asm/nmi.h>
34224 #include <asm/msr.h>
34225 #include <asm/apic.h>
34226+#include <asm/pgtable.h>
34227
34228 #include "op_counter.h"
34229 #include "op_x86_model.h"
34230@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34231 if (ret)
34232 return ret;
34233
34234- if (!model->num_virt_counters)
34235- model->num_virt_counters = model->num_counters;
34236+ if (!model->num_virt_counters) {
34237+ pax_open_kernel();
34238+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34239+ pax_close_kernel();
34240+ }
34241
34242 mux_init(ops);
34243
34244diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34245index 50d86c0..7985318 100644
34246--- a/arch/x86/oprofile/op_model_amd.c
34247+++ b/arch/x86/oprofile/op_model_amd.c
34248@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34249 num_counters = AMD64_NUM_COUNTERS;
34250 }
34251
34252- op_amd_spec.num_counters = num_counters;
34253- op_amd_spec.num_controls = num_counters;
34254- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34255+ pax_open_kernel();
34256+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34257+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34258+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34259+ pax_close_kernel();
34260
34261 return 0;
34262 }
34263diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34264index d90528e..0127e2b 100644
34265--- a/arch/x86/oprofile/op_model_ppro.c
34266+++ b/arch/x86/oprofile/op_model_ppro.c
34267@@ -19,6 +19,7 @@
34268 #include <asm/msr.h>
34269 #include <asm/apic.h>
34270 #include <asm/nmi.h>
34271+#include <asm/pgtable.h>
34272
34273 #include "op_x86_model.h"
34274 #include "op_counter.h"
34275@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34276
34277 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34278
34279- op_arch_perfmon_spec.num_counters = num_counters;
34280- op_arch_perfmon_spec.num_controls = num_counters;
34281+ pax_open_kernel();
34282+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34283+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34284+ pax_close_kernel();
34285 }
34286
34287 static int arch_perfmon_init(struct oprofile_operations *ignore)
34288diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34289index 71e8a67..6a313bb 100644
34290--- a/arch/x86/oprofile/op_x86_model.h
34291+++ b/arch/x86/oprofile/op_x86_model.h
34292@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34293 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34294 struct op_msrs const * const msrs);
34295 #endif
34296-};
34297+} __do_const;
34298
34299 struct op_counter_config;
34300
34301diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34302index 44b9271..4c5a988 100644
34303--- a/arch/x86/pci/intel_mid_pci.c
34304+++ b/arch/x86/pci/intel_mid_pci.c
34305@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34306 pci_mmcfg_late_init();
34307 pcibios_enable_irq = intel_mid_pci_irq_enable;
34308 pcibios_disable_irq = intel_mid_pci_irq_disable;
34309- pci_root_ops = intel_mid_pci_ops;
34310+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34311 pci_soc_mode = 1;
34312 /* Continue with standard init */
34313 return 1;
34314diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34315index 5dc6ca5..25c03f5 100644
34316--- a/arch/x86/pci/irq.c
34317+++ b/arch/x86/pci/irq.c
34318@@ -51,7 +51,7 @@ struct irq_router {
34319 struct irq_router_handler {
34320 u16 vendor;
34321 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34322-};
34323+} __do_const;
34324
34325 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34326 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34327@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34328 return 0;
34329 }
34330
34331-static __initdata struct irq_router_handler pirq_routers[] = {
34332+static __initconst const struct irq_router_handler pirq_routers[] = {
34333 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34334 { PCI_VENDOR_ID_AL, ali_router_probe },
34335 { PCI_VENDOR_ID_ITE, ite_router_probe },
34336@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34337 static void __init pirq_find_router(struct irq_router *r)
34338 {
34339 struct irq_routing_table *rt = pirq_table;
34340- struct irq_router_handler *h;
34341+ const struct irq_router_handler *h;
34342
34343 #ifdef CONFIG_PCI_BIOS
34344 if (!rt->signature) {
34345@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34346 return 0;
34347 }
34348
34349-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34350+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34351 {
34352 .callback = fix_broken_hp_bios_irq9,
34353 .ident = "HP Pavilion N5400 Series Laptop",
34354diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34355index 9b83b90..4112152 100644
34356--- a/arch/x86/pci/pcbios.c
34357+++ b/arch/x86/pci/pcbios.c
34358@@ -79,7 +79,7 @@ union bios32 {
34359 static struct {
34360 unsigned long address;
34361 unsigned short segment;
34362-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34363+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34364
34365 /*
34366 * Returns the entry point for the given service, NULL on error
34367@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34368 unsigned long length; /* %ecx */
34369 unsigned long entry; /* %edx */
34370 unsigned long flags;
34371+ struct desc_struct d, *gdt;
34372
34373 local_irq_save(flags);
34374- __asm__("lcall *(%%edi); cld"
34375+
34376+ gdt = get_cpu_gdt_table(smp_processor_id());
34377+
34378+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34379+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34380+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34381+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34382+
34383+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34384 : "=a" (return_code),
34385 "=b" (address),
34386 "=c" (length),
34387 "=d" (entry)
34388 : "0" (service),
34389 "1" (0),
34390- "D" (&bios32_indirect));
34391+ "D" (&bios32_indirect),
34392+ "r"(__PCIBIOS_DS)
34393+ : "memory");
34394+
34395+ pax_open_kernel();
34396+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34397+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34398+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34399+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34400+ pax_close_kernel();
34401+
34402 local_irq_restore(flags);
34403
34404 switch (return_code) {
34405- case 0:
34406- return address + entry;
34407- case 0x80: /* Not present */
34408- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34409- return 0;
34410- default: /* Shouldn't happen */
34411- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34412- service, return_code);
34413+ case 0: {
34414+ int cpu;
34415+ unsigned char flags;
34416+
34417+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34418+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34419+ printk(KERN_WARNING "bios32_service: not valid\n");
34420 return 0;
34421+ }
34422+ address = address + PAGE_OFFSET;
34423+ length += 16UL; /* some BIOSs underreport this... */
34424+ flags = 4;
34425+ if (length >= 64*1024*1024) {
34426+ length >>= PAGE_SHIFT;
34427+ flags |= 8;
34428+ }
34429+
34430+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34431+ gdt = get_cpu_gdt_table(cpu);
34432+ pack_descriptor(&d, address, length, 0x9b, flags);
34433+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34434+ pack_descriptor(&d, address, length, 0x93, flags);
34435+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34436+ }
34437+ return entry;
34438+ }
34439+ case 0x80: /* Not present */
34440+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34441+ return 0;
34442+ default: /* Shouldn't happen */
34443+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34444+ service, return_code);
34445+ return 0;
34446 }
34447 }
34448
34449 static struct {
34450 unsigned long address;
34451 unsigned short segment;
34452-} pci_indirect = { 0, __KERNEL_CS };
34453+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34454
34455-static int pci_bios_present;
34456+static int pci_bios_present __read_only;
34457
34458 static int __init check_pcibios(void)
34459 {
34460@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34461 unsigned long flags, pcibios_entry;
34462
34463 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34464- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34465+ pci_indirect.address = pcibios_entry;
34466
34467 local_irq_save(flags);
34468- __asm__(
34469- "lcall *(%%edi); cld\n\t"
34470+ __asm__("movw %w6, %%ds\n\t"
34471+ "lcall *%%ss:(%%edi); cld\n\t"
34472+ "push %%ss\n\t"
34473+ "pop %%ds\n\t"
34474 "jc 1f\n\t"
34475 "xor %%ah, %%ah\n"
34476 "1:"
34477@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34478 "=b" (ebx),
34479 "=c" (ecx)
34480 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34481- "D" (&pci_indirect)
34482+ "D" (&pci_indirect),
34483+ "r" (__PCIBIOS_DS)
34484 : "memory");
34485 local_irq_restore(flags);
34486
34487@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34488
34489 switch (len) {
34490 case 1:
34491- __asm__("lcall *(%%esi); cld\n\t"
34492+ __asm__("movw %w6, %%ds\n\t"
34493+ "lcall *%%ss:(%%esi); cld\n\t"
34494+ "push %%ss\n\t"
34495+ "pop %%ds\n\t"
34496 "jc 1f\n\t"
34497 "xor %%ah, %%ah\n"
34498 "1:"
34499@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34500 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34501 "b" (bx),
34502 "D" ((long)reg),
34503- "S" (&pci_indirect));
34504+ "S" (&pci_indirect),
34505+ "r" (__PCIBIOS_DS));
34506 /*
34507 * Zero-extend the result beyond 8 bits, do not trust the
34508 * BIOS having done it:
34509@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34510 *value &= 0xff;
34511 break;
34512 case 2:
34513- __asm__("lcall *(%%esi); cld\n\t"
34514+ __asm__("movw %w6, %%ds\n\t"
34515+ "lcall *%%ss:(%%esi); cld\n\t"
34516+ "push %%ss\n\t"
34517+ "pop %%ds\n\t"
34518 "jc 1f\n\t"
34519 "xor %%ah, %%ah\n"
34520 "1:"
34521@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34522 : "1" (PCIBIOS_READ_CONFIG_WORD),
34523 "b" (bx),
34524 "D" ((long)reg),
34525- "S" (&pci_indirect));
34526+ "S" (&pci_indirect),
34527+ "r" (__PCIBIOS_DS));
34528 /*
34529 * Zero-extend the result beyond 16 bits, do not trust the
34530 * BIOS having done it:
34531@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34532 *value &= 0xffff;
34533 break;
34534 case 4:
34535- __asm__("lcall *(%%esi); cld\n\t"
34536+ __asm__("movw %w6, %%ds\n\t"
34537+ "lcall *%%ss:(%%esi); cld\n\t"
34538+ "push %%ss\n\t"
34539+ "pop %%ds\n\t"
34540 "jc 1f\n\t"
34541 "xor %%ah, %%ah\n"
34542 "1:"
34543@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34544 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34545 "b" (bx),
34546 "D" ((long)reg),
34547- "S" (&pci_indirect));
34548+ "S" (&pci_indirect),
34549+ "r" (__PCIBIOS_DS));
34550 break;
34551 }
34552
34553@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34554
34555 switch (len) {
34556 case 1:
34557- __asm__("lcall *(%%esi); cld\n\t"
34558+ __asm__("movw %w6, %%ds\n\t"
34559+ "lcall *%%ss:(%%esi); cld\n\t"
34560+ "push %%ss\n\t"
34561+ "pop %%ds\n\t"
34562 "jc 1f\n\t"
34563 "xor %%ah, %%ah\n"
34564 "1:"
34565@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34566 "c" (value),
34567 "b" (bx),
34568 "D" ((long)reg),
34569- "S" (&pci_indirect));
34570+ "S" (&pci_indirect),
34571+ "r" (__PCIBIOS_DS));
34572 break;
34573 case 2:
34574- __asm__("lcall *(%%esi); cld\n\t"
34575+ __asm__("movw %w6, %%ds\n\t"
34576+ "lcall *%%ss:(%%esi); cld\n\t"
34577+ "push %%ss\n\t"
34578+ "pop %%ds\n\t"
34579 "jc 1f\n\t"
34580 "xor %%ah, %%ah\n"
34581 "1:"
34582@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34583 "c" (value),
34584 "b" (bx),
34585 "D" ((long)reg),
34586- "S" (&pci_indirect));
34587+ "S" (&pci_indirect),
34588+ "r" (__PCIBIOS_DS));
34589 break;
34590 case 4:
34591- __asm__("lcall *(%%esi); cld\n\t"
34592+ __asm__("movw %w6, %%ds\n\t"
34593+ "lcall *%%ss:(%%esi); cld\n\t"
34594+ "push %%ss\n\t"
34595+ "pop %%ds\n\t"
34596 "jc 1f\n\t"
34597 "xor %%ah, %%ah\n"
34598 "1:"
34599@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34600 "c" (value),
34601 "b" (bx),
34602 "D" ((long)reg),
34603- "S" (&pci_indirect));
34604+ "S" (&pci_indirect),
34605+ "r" (__PCIBIOS_DS));
34606 break;
34607 }
34608
34609@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34610
34611 DBG("PCI: Fetching IRQ routing table... ");
34612 __asm__("push %%es\n\t"
34613+ "movw %w8, %%ds\n\t"
34614 "push %%ds\n\t"
34615 "pop %%es\n\t"
34616- "lcall *(%%esi); cld\n\t"
34617+ "lcall *%%ss:(%%esi); cld\n\t"
34618 "pop %%es\n\t"
34619+ "push %%ss\n\t"
34620+ "pop %%ds\n"
34621 "jc 1f\n\t"
34622 "xor %%ah, %%ah\n"
34623 "1:"
34624@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34625 "1" (0),
34626 "D" ((long) &opt),
34627 "S" (&pci_indirect),
34628- "m" (opt)
34629+ "m" (opt),
34630+ "r" (__PCIBIOS_DS)
34631 : "memory");
34632 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34633 if (ret & 0xff00)
34634@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34635 {
34636 int ret;
34637
34638- __asm__("lcall *(%%esi); cld\n\t"
34639+ __asm__("movw %w5, %%ds\n\t"
34640+ "lcall *%%ss:(%%esi); cld\n\t"
34641+ "push %%ss\n\t"
34642+ "pop %%ds\n"
34643 "jc 1f\n\t"
34644 "xor %%ah, %%ah\n"
34645 "1:"
34646@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34647 : "0" (PCIBIOS_SET_PCI_HW_INT),
34648 "b" ((dev->bus->number << 8) | dev->devfn),
34649 "c" ((irq << 8) | (pin + 10)),
34650- "S" (&pci_indirect));
34651+ "S" (&pci_indirect),
34652+ "r" (__PCIBIOS_DS));
34653 return !(ret & 0xff00);
34654 }
34655 EXPORT_SYMBOL(pcibios_set_irq_routing);
34656diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34657index 40e7cda..c7e6672 100644
34658--- a/arch/x86/platform/efi/efi_32.c
34659+++ b/arch/x86/platform/efi/efi_32.c
34660@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34661 {
34662 struct desc_ptr gdt_descr;
34663
34664+#ifdef CONFIG_PAX_KERNEXEC
34665+ struct desc_struct d;
34666+#endif
34667+
34668 local_irq_save(efi_rt_eflags);
34669
34670 load_cr3(initial_page_table);
34671 __flush_tlb_all();
34672
34673+#ifdef CONFIG_PAX_KERNEXEC
34674+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34675+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34676+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34677+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34678+#endif
34679+
34680 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34681 gdt_descr.size = GDT_SIZE - 1;
34682 load_gdt(&gdt_descr);
34683@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34684 {
34685 struct desc_ptr gdt_descr;
34686
34687+#ifdef CONFIG_PAX_KERNEXEC
34688+ struct desc_struct d;
34689+
34690+ memset(&d, 0, sizeof d);
34691+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34692+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34693+#endif
34694+
34695 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34696 gdt_descr.size = GDT_SIZE - 1;
34697 load_gdt(&gdt_descr);
34698
34699+#ifdef CONFIG_PAX_PER_CPU_PGD
34700+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34701+#else
34702 load_cr3(swapper_pg_dir);
34703+#endif
34704+
34705 __flush_tlb_all();
34706
34707 local_irq_restore(efi_rt_eflags);
34708diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34709index 17e80d8..9fa6e41 100644
34710--- a/arch/x86/platform/efi/efi_64.c
34711+++ b/arch/x86/platform/efi/efi_64.c
34712@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34713 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34714 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34715 }
34716+
34717+#ifdef CONFIG_PAX_PER_CPU_PGD
34718+ load_cr3(swapper_pg_dir);
34719+#endif
34720+
34721 __flush_tlb_all();
34722 }
34723
34724@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34725 for (pgd = 0; pgd < n_pgds; pgd++)
34726 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34727 kfree(save_pgd);
34728+
34729+#ifdef CONFIG_PAX_PER_CPU_PGD
34730+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34731+#endif
34732+
34733 __flush_tlb_all();
34734 local_irq_restore(efi_flags);
34735 early_code_mapping_set_exec(0);
34736@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34737 unsigned npages;
34738 pgd_t *pgd;
34739
34740- if (efi_enabled(EFI_OLD_MEMMAP))
34741+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34742+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34743+ * able to execute the EFI services.
34744+ */
34745+ if (__supported_pte_mask & _PAGE_NX) {
34746+ unsigned long addr = (unsigned long) __va(0);
34747+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34748+
34749+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34750+#ifdef CONFIG_PAX_PER_CPU_PGD
34751+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34752+#endif
34753+ set_pgd(pgd_offset_k(addr), pe);
34754+ }
34755+
34756 return 0;
34757+ }
34758
34759 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34760 pgd = __va(efi_scratch.efi_pgt);
34761diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34762index 040192b..7d3300f 100644
34763--- a/arch/x86/platform/efi/efi_stub_32.S
34764+++ b/arch/x86/platform/efi/efi_stub_32.S
34765@@ -6,7 +6,9 @@
34766 */
34767
34768 #include <linux/linkage.h>
34769+#include <linux/init.h>
34770 #include <asm/page_types.h>
34771+#include <asm/segment.h>
34772
34773 /*
34774 * efi_call_phys(void *, ...) is a function with variable parameters.
34775@@ -20,7 +22,7 @@
34776 * service functions will comply with gcc calling convention, too.
34777 */
34778
34779-.text
34780+__INIT
34781 ENTRY(efi_call_phys)
34782 /*
34783 * 0. The function can only be called in Linux kernel. So CS has been
34784@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34785 * The mapping of lower virtual memory has been created in prolog and
34786 * epilog.
34787 */
34788- movl $1f, %edx
34789- subl $__PAGE_OFFSET, %edx
34790- jmp *%edx
34791+#ifdef CONFIG_PAX_KERNEXEC
34792+ movl $(__KERNEXEC_EFI_DS), %edx
34793+ mov %edx, %ds
34794+ mov %edx, %es
34795+ mov %edx, %ss
34796+ addl $2f,(1f)
34797+ ljmp *(1f)
34798+
34799+__INITDATA
34800+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34801+.previous
34802+
34803+2:
34804+ subl $2b,(1b)
34805+#else
34806+ jmp 1f-__PAGE_OFFSET
34807 1:
34808+#endif
34809
34810 /*
34811 * 2. Now on the top of stack is the return
34812@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34813 * parameter 2, ..., param n. To make things easy, we save the return
34814 * address of efi_call_phys in a global variable.
34815 */
34816- popl %edx
34817- movl %edx, saved_return_addr
34818- /* get the function pointer into ECX*/
34819- popl %ecx
34820- movl %ecx, efi_rt_function_ptr
34821- movl $2f, %edx
34822- subl $__PAGE_OFFSET, %edx
34823- pushl %edx
34824+ popl (saved_return_addr)
34825+ popl (efi_rt_function_ptr)
34826
34827 /*
34828 * 3. Clear PG bit in %CR0.
34829@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34830 /*
34831 * 5. Call the physical function.
34832 */
34833- jmp *%ecx
34834+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34835
34836-2:
34837 /*
34838 * 6. After EFI runtime service returns, control will return to
34839 * following instruction. We'd better readjust stack pointer first.
34840@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34841 movl %cr0, %edx
34842 orl $0x80000000, %edx
34843 movl %edx, %cr0
34844- jmp 1f
34845-1:
34846+
34847 /*
34848 * 8. Now restore the virtual mode from flat mode by
34849 * adding EIP with PAGE_OFFSET.
34850 */
34851- movl $1f, %edx
34852- jmp *%edx
34853+#ifdef CONFIG_PAX_KERNEXEC
34854+ movl $(__KERNEL_DS), %edx
34855+ mov %edx, %ds
34856+ mov %edx, %es
34857+ mov %edx, %ss
34858+ ljmp $(__KERNEL_CS),$1f
34859+#else
34860+ jmp 1f+__PAGE_OFFSET
34861+#endif
34862 1:
34863
34864 /*
34865 * 9. Balance the stack. And because EAX contain the return value,
34866 * we'd better not clobber it.
34867 */
34868- leal efi_rt_function_ptr, %edx
34869- movl (%edx), %ecx
34870- pushl %ecx
34871+ pushl (efi_rt_function_ptr)
34872
34873 /*
34874- * 10. Push the saved return address onto the stack and return.
34875+ * 10. Return to the saved return address.
34876 */
34877- leal saved_return_addr, %edx
34878- movl (%edx), %ecx
34879- pushl %ecx
34880- ret
34881+ jmpl *(saved_return_addr)
34882 ENDPROC(efi_call_phys)
34883 .previous
34884
34885-.data
34886+__INITDATA
34887 saved_return_addr:
34888 .long 0
34889 efi_rt_function_ptr:
34890diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34891index 86d0f9e..6d499f4 100644
34892--- a/arch/x86/platform/efi/efi_stub_64.S
34893+++ b/arch/x86/platform/efi/efi_stub_64.S
34894@@ -11,6 +11,7 @@
34895 #include <asm/msr.h>
34896 #include <asm/processor-flags.h>
34897 #include <asm/page_types.h>
34898+#include <asm/alternative-asm.h>
34899
34900 #define SAVE_XMM \
34901 mov %rsp, %rax; \
34902@@ -88,6 +89,7 @@ ENTRY(efi_call)
34903 RESTORE_PGT
34904 addq $48, %rsp
34905 RESTORE_XMM
34906+ pax_force_retaddr 0, 1
34907 ret
34908 ENDPROC(efi_call)
34909
34910diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34911index 1bbedc4..eb795b5 100644
34912--- a/arch/x86/platform/intel-mid/intel-mid.c
34913+++ b/arch/x86/platform/intel-mid/intel-mid.c
34914@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
34915 {
34916 };
34917
34918-static void intel_mid_reboot(void)
34919+static void __noreturn intel_mid_reboot(void)
34920 {
34921 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
34922+ BUG();
34923 }
34924
34925 static unsigned long __init intel_mid_calibrate_tsc(void)
34926diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
34927index 3c1c386..59a68ed 100644
34928--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
34929+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
34930@@ -13,6 +13,6 @@
34931 /* For every CPU addition a new get_<cpuname>_ops interface needs
34932 * to be added.
34933 */
34934-extern void *get_penwell_ops(void);
34935-extern void *get_cloverview_ops(void);
34936-extern void *get_tangier_ops(void);
34937+extern const void *get_penwell_ops(void);
34938+extern const void *get_cloverview_ops(void);
34939+extern const void *get_tangier_ops(void);
34940diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
34941index 23381d2..8ddc10e 100644
34942--- a/arch/x86/platform/intel-mid/mfld.c
34943+++ b/arch/x86/platform/intel-mid/mfld.c
34944@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
34945 pm_power_off = mfld_power_off;
34946 }
34947
34948-void *get_penwell_ops(void)
34949+const void *get_penwell_ops(void)
34950 {
34951 return &penwell_ops;
34952 }
34953
34954-void *get_cloverview_ops(void)
34955+const void *get_cloverview_ops(void)
34956 {
34957 return &penwell_ops;
34958 }
34959diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
34960index aaca917..66eadbc 100644
34961--- a/arch/x86/platform/intel-mid/mrfl.c
34962+++ b/arch/x86/platform/intel-mid/mrfl.c
34963@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
34964 .arch_setup = tangier_arch_setup,
34965 };
34966
34967-void *get_tangier_ops(void)
34968+const void *get_tangier_ops(void)
34969 {
34970 return &tangier_ops;
34971 }
34972diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
34973index d6ee929..3637cb5 100644
34974--- a/arch/x86/platform/olpc/olpc_dt.c
34975+++ b/arch/x86/platform/olpc/olpc_dt.c
34976@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
34977 return res;
34978 }
34979
34980-static struct of_pdt_ops prom_olpc_ops __initdata = {
34981+static struct of_pdt_ops prom_olpc_ops __initconst = {
34982 .nextprop = olpc_dt_nextprop,
34983 .getproplen = olpc_dt_getproplen,
34984 .getproperty = olpc_dt_getproperty,
34985diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
34986index 6ec7910..ecdbb11 100644
34987--- a/arch/x86/power/cpu.c
34988+++ b/arch/x86/power/cpu.c
34989@@ -137,11 +137,8 @@ static void do_fpu_end(void)
34990 static void fix_processor_context(void)
34991 {
34992 int cpu = smp_processor_id();
34993- struct tss_struct *t = &per_cpu(init_tss, cpu);
34994-#ifdef CONFIG_X86_64
34995- struct desc_struct *desc = get_cpu_gdt_table(cpu);
34996- tss_desc tss;
34997-#endif
34998+ struct tss_struct *t = init_tss + cpu;
34999+
35000 set_tss_desc(cpu, t); /*
35001 * This just modifies memory; should not be
35002 * necessary. But... This is necessary, because
35003@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35004 */
35005
35006 #ifdef CONFIG_X86_64
35007- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35008- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35009- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35010-
35011 syscall_init(); /* This sets MSR_*STAR and related */
35012 #endif
35013 load_TR_desc(); /* This does ltr */
35014diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35015index bad628a..a102610 100644
35016--- a/arch/x86/realmode/init.c
35017+++ b/arch/x86/realmode/init.c
35018@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35019 __va(real_mode_header->trampoline_header);
35020
35021 #ifdef CONFIG_X86_32
35022- trampoline_header->start = __pa_symbol(startup_32_smp);
35023+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35024+
35025+#ifdef CONFIG_PAX_KERNEXEC
35026+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35027+#endif
35028+
35029+ trampoline_header->boot_cs = __BOOT_CS;
35030 trampoline_header->gdt_limit = __BOOT_DS + 7;
35031 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35032 #else
35033@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35034 *trampoline_cr4_features = read_cr4();
35035
35036 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35037- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35038+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35039 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35040 #endif
35041 }
35042diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35043index 7c0d7be..d24dc88 100644
35044--- a/arch/x86/realmode/rm/Makefile
35045+++ b/arch/x86/realmode/rm/Makefile
35046@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35047
35048 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35049 -I$(srctree)/arch/x86/boot
35050+ifdef CONSTIFY_PLUGIN
35051+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35052+endif
35053 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35054 GCOV_PROFILE := n
35055diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35056index a28221d..93c40f1 100644
35057--- a/arch/x86/realmode/rm/header.S
35058+++ b/arch/x86/realmode/rm/header.S
35059@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35060 #endif
35061 /* APM/BIOS reboot */
35062 .long pa_machine_real_restart_asm
35063-#ifdef CONFIG_X86_64
35064+#ifdef CONFIG_X86_32
35065+ .long __KERNEL_CS
35066+#else
35067 .long __KERNEL32_CS
35068 #endif
35069 END(real_mode_header)
35070diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35071index 48ddd76..c26749f 100644
35072--- a/arch/x86/realmode/rm/trampoline_32.S
35073+++ b/arch/x86/realmode/rm/trampoline_32.S
35074@@ -24,6 +24,12 @@
35075 #include <asm/page_types.h>
35076 #include "realmode.h"
35077
35078+#ifdef CONFIG_PAX_KERNEXEC
35079+#define ta(X) (X)
35080+#else
35081+#define ta(X) (pa_ ## X)
35082+#endif
35083+
35084 .text
35085 .code16
35086
35087@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35088
35089 cli # We should be safe anyway
35090
35091- movl tr_start, %eax # where we need to go
35092-
35093 movl $0xA5A5A5A5, trampoline_status
35094 # write marker for master knows we're running
35095
35096@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35097 movw $1, %dx # protected mode (PE) bit
35098 lmsw %dx # into protected mode
35099
35100- ljmpl $__BOOT_CS, $pa_startup_32
35101+ ljmpl *(trampoline_header)
35102
35103 .section ".text32","ax"
35104 .code32
35105@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35106 .balign 8
35107 GLOBAL(trampoline_header)
35108 tr_start: .space 4
35109- tr_gdt_pad: .space 2
35110+ tr_boot_cs: .space 2
35111 tr_gdt: .space 6
35112 END(trampoline_header)
35113
35114diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35115index dac7b20..72dbaca 100644
35116--- a/arch/x86/realmode/rm/trampoline_64.S
35117+++ b/arch/x86/realmode/rm/trampoline_64.S
35118@@ -93,6 +93,7 @@ ENTRY(startup_32)
35119 movl %edx, %gs
35120
35121 movl pa_tr_cr4, %eax
35122+ andl $~X86_CR4_PCIDE, %eax
35123 movl %eax, %cr4 # Enable PAE mode
35124
35125 # Setup trampoline 4 level pagetables
35126@@ -106,7 +107,7 @@ ENTRY(startup_32)
35127 wrmsr
35128
35129 # Enable paging and in turn activate Long Mode
35130- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35131+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35132 movl %eax, %cr0
35133
35134 /*
35135diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35136index 9e7e147..25a4158 100644
35137--- a/arch/x86/realmode/rm/wakeup_asm.S
35138+++ b/arch/x86/realmode/rm/wakeup_asm.S
35139@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35140 lgdtl pmode_gdt
35141
35142 /* This really couldn't... */
35143- movl pmode_entry, %eax
35144 movl pmode_cr0, %ecx
35145 movl %ecx, %cr0
35146- ljmpl $__KERNEL_CS, $pa_startup_32
35147- /* -> jmp *%eax in trampoline_32.S */
35148+
35149+ ljmpl *pmode_entry
35150 #else
35151 jmp trampoline_start
35152 #endif
35153diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35154index 604a37e..e49702a 100644
35155--- a/arch/x86/tools/Makefile
35156+++ b/arch/x86/tools/Makefile
35157@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35158
35159 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35160
35161-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35162+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35163 hostprogs-y += relocs
35164 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35165 PHONY += relocs
35166diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35167index 0c2fae8..88036b7 100644
35168--- a/arch/x86/tools/relocs.c
35169+++ b/arch/x86/tools/relocs.c
35170@@ -1,5 +1,7 @@
35171 /* This is included from relocs_32/64.c */
35172
35173+#include "../../../include/generated/autoconf.h"
35174+
35175 #define ElfW(type) _ElfW(ELF_BITS, type)
35176 #define _ElfW(bits, type) __ElfW(bits, type)
35177 #define __ElfW(bits, type) Elf##bits##_##type
35178@@ -11,6 +13,7 @@
35179 #define Elf_Sym ElfW(Sym)
35180
35181 static Elf_Ehdr ehdr;
35182+static Elf_Phdr *phdr;
35183
35184 struct relocs {
35185 uint32_t *offset;
35186@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35187 }
35188 }
35189
35190+static void read_phdrs(FILE *fp)
35191+{
35192+ unsigned int i;
35193+
35194+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35195+ if (!phdr) {
35196+ die("Unable to allocate %d program headers\n",
35197+ ehdr.e_phnum);
35198+ }
35199+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35200+ die("Seek to %d failed: %s\n",
35201+ ehdr.e_phoff, strerror(errno));
35202+ }
35203+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35204+ die("Cannot read ELF program headers: %s\n",
35205+ strerror(errno));
35206+ }
35207+ for(i = 0; i < ehdr.e_phnum; i++) {
35208+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35209+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35210+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35211+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35212+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35213+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35214+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35215+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35216+ }
35217+
35218+}
35219+
35220 static void read_shdrs(FILE *fp)
35221 {
35222- int i;
35223+ unsigned int i;
35224 Elf_Shdr shdr;
35225
35226 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35227@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35228
35229 static void read_strtabs(FILE *fp)
35230 {
35231- int i;
35232+ unsigned int i;
35233 for (i = 0; i < ehdr.e_shnum; i++) {
35234 struct section *sec = &secs[i];
35235 if (sec->shdr.sh_type != SHT_STRTAB) {
35236@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35237
35238 static void read_symtabs(FILE *fp)
35239 {
35240- int i,j;
35241+ unsigned int i,j;
35242 for (i = 0; i < ehdr.e_shnum; i++) {
35243 struct section *sec = &secs[i];
35244 if (sec->shdr.sh_type != SHT_SYMTAB) {
35245@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35246 }
35247
35248
35249-static void read_relocs(FILE *fp)
35250+static void read_relocs(FILE *fp, int use_real_mode)
35251 {
35252- int i,j;
35253+ unsigned int i,j;
35254+ uint32_t base;
35255+
35256 for (i = 0; i < ehdr.e_shnum; i++) {
35257 struct section *sec = &secs[i];
35258 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35259@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35260 die("Cannot read symbol table: %s\n",
35261 strerror(errno));
35262 }
35263+ base = 0;
35264+
35265+#ifdef CONFIG_X86_32
35266+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35267+ if (phdr[j].p_type != PT_LOAD )
35268+ continue;
35269+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35270+ continue;
35271+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35272+ break;
35273+ }
35274+#endif
35275+
35276 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35277 Elf_Rel *rel = &sec->reltab[j];
35278- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35279+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35280 rel->r_info = elf_xword_to_cpu(rel->r_info);
35281 #if (SHT_REL_TYPE == SHT_RELA)
35282 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35283@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35284
35285 static void print_absolute_symbols(void)
35286 {
35287- int i;
35288+ unsigned int i;
35289 const char *format;
35290
35291 if (ELF_BITS == 64)
35292@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35293 for (i = 0; i < ehdr.e_shnum; i++) {
35294 struct section *sec = &secs[i];
35295 char *sym_strtab;
35296- int j;
35297+ unsigned int j;
35298
35299 if (sec->shdr.sh_type != SHT_SYMTAB) {
35300 continue;
35301@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35302
35303 static void print_absolute_relocs(void)
35304 {
35305- int i, printed = 0;
35306+ unsigned int i, printed = 0;
35307 const char *format;
35308
35309 if (ELF_BITS == 64)
35310@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35311 struct section *sec_applies, *sec_symtab;
35312 char *sym_strtab;
35313 Elf_Sym *sh_symtab;
35314- int j;
35315+ unsigned int j;
35316 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35317 continue;
35318 }
35319@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35320 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35321 Elf_Sym *sym, const char *symname))
35322 {
35323- int i;
35324+ unsigned int i;
35325 /* Walk through the relocations */
35326 for (i = 0; i < ehdr.e_shnum; i++) {
35327 char *sym_strtab;
35328 Elf_Sym *sh_symtab;
35329 struct section *sec_applies, *sec_symtab;
35330- int j;
35331+ unsigned int j;
35332 struct section *sec = &secs[i];
35333
35334 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35335@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35336 {
35337 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35338 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35339+ char *sym_strtab = sec->link->link->strtab;
35340+
35341+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35342+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35343+ return 0;
35344+
35345+#ifdef CONFIG_PAX_KERNEXEC
35346+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35347+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35348+ return 0;
35349+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35350+ return 0;
35351+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35352+ return 0;
35353+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35354+ return 0;
35355+#endif
35356
35357 switch (r_type) {
35358 case R_386_NONE:
35359@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35360
35361 static void emit_relocs(int as_text, int use_real_mode)
35362 {
35363- int i;
35364+ unsigned int i;
35365 int (*write_reloc)(uint32_t, FILE *) = write32;
35366 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35367 const char *symname);
35368@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35369 {
35370 regex_init(use_real_mode);
35371 read_ehdr(fp);
35372+ read_phdrs(fp);
35373 read_shdrs(fp);
35374 read_strtabs(fp);
35375 read_symtabs(fp);
35376- read_relocs(fp);
35377+ read_relocs(fp, use_real_mode);
35378 if (ELF_BITS == 64)
35379 percpu_init();
35380 if (show_absolute_syms) {
35381diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35382index f40281e..92728c9 100644
35383--- a/arch/x86/um/mem_32.c
35384+++ b/arch/x86/um/mem_32.c
35385@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35386 gate_vma.vm_start = FIXADDR_USER_START;
35387 gate_vma.vm_end = FIXADDR_USER_END;
35388 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35389- gate_vma.vm_page_prot = __P101;
35390+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35391
35392 return 0;
35393 }
35394diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35395index 80ffa5b..a33bd15 100644
35396--- a/arch/x86/um/tls_32.c
35397+++ b/arch/x86/um/tls_32.c
35398@@ -260,7 +260,7 @@ out:
35399 if (unlikely(task == current &&
35400 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35401 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35402- "without flushed TLS.", current->pid);
35403+ "without flushed TLS.", task_pid_nr(current));
35404 }
35405
35406 return 0;
35407diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35408index 5a4affe..9e2d522 100644
35409--- a/arch/x86/vdso/Makefile
35410+++ b/arch/x86/vdso/Makefile
35411@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
35412 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35413 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35414
35415-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35416+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35417 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35418 GCOV_PROFILE := n
35419
35420diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35421index 0224987..c7d65a5 100644
35422--- a/arch/x86/vdso/vdso2c.h
35423+++ b/arch/x86/vdso/vdso2c.h
35424@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35425 unsigned long load_size = -1; /* Work around bogus warning */
35426 unsigned long mapping_size;
35427 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35428- int i;
35429+ unsigned int i;
35430 unsigned long j;
35431 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35432 *alt_sec = NULL;
35433diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35434index e904c27..b9eaa03 100644
35435--- a/arch/x86/vdso/vdso32-setup.c
35436+++ b/arch/x86/vdso/vdso32-setup.c
35437@@ -14,6 +14,7 @@
35438 #include <asm/cpufeature.h>
35439 #include <asm/processor.h>
35440 #include <asm/vdso.h>
35441+#include <asm/mman.h>
35442
35443 #ifdef CONFIG_COMPAT_VDSO
35444 #define VDSO_DEFAULT 0
35445diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35446index 1c9f750..cfddb1a 100644
35447--- a/arch/x86/vdso/vma.c
35448+++ b/arch/x86/vdso/vma.c
35449@@ -19,10 +19,7 @@
35450 #include <asm/page.h>
35451 #include <asm/hpet.h>
35452 #include <asm/desc.h>
35453-
35454-#if defined(CONFIG_X86_64)
35455-unsigned int __read_mostly vdso64_enabled = 1;
35456-#endif
35457+#include <asm/mman.h>
35458
35459 void __init init_vdso_image(const struct vdso_image *image)
35460 {
35461@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35462 .pages = no_pages,
35463 };
35464
35465+#ifdef CONFIG_PAX_RANDMMAP
35466+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35467+ calculate_addr = false;
35468+#endif
35469+
35470 if (calculate_addr) {
35471 addr = vdso_addr(current->mm->start_stack,
35472 image->size - image->sym_vvar_start);
35473@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35474 down_write(&mm->mmap_sem);
35475
35476 addr = get_unmapped_area(NULL, addr,
35477- image->size - image->sym_vvar_start, 0, 0);
35478+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35479 if (IS_ERR_VALUE(addr)) {
35480 ret = addr;
35481 goto up_fail;
35482 }
35483
35484 text_start = addr - image->sym_vvar_start;
35485- current->mm->context.vdso = (void __user *)text_start;
35486+ mm->context.vdso = text_start;
35487
35488 /*
35489 * MAYWRITE to allow gdb to COW and set breakpoints
35490@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35491 hpet_address >> PAGE_SHIFT,
35492 PAGE_SIZE,
35493 pgprot_noncached(PAGE_READONLY));
35494-
35495- if (ret)
35496- goto up_fail;
35497 }
35498 #endif
35499
35500 up_fail:
35501 if (ret)
35502- current->mm->context.vdso = NULL;
35503+ current->mm->context.vdso = 0;
35504
35505 up_write(&mm->mmap_sem);
35506 return ret;
35507@@ -191,8 +190,8 @@ static int load_vdso32(void)
35508
35509 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35510 current_thread_info()->sysenter_return =
35511- current->mm->context.vdso +
35512- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35513+ (void __force_user *)(current->mm->context.vdso +
35514+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35515
35516 return 0;
35517 }
35518@@ -201,9 +200,6 @@ static int load_vdso32(void)
35519 #ifdef CONFIG_X86_64
35520 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35521 {
35522- if (!vdso64_enabled)
35523- return 0;
35524-
35525 return map_vdso(&vdso_image_64, true);
35526 }
35527
35528@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35529 int uses_interp)
35530 {
35531 #ifdef CONFIG_X86_X32_ABI
35532- if (test_thread_flag(TIF_X32)) {
35533- if (!vdso64_enabled)
35534- return 0;
35535-
35536+ if (test_thread_flag(TIF_X32))
35537 return map_vdso(&vdso_image_x32, true);
35538- }
35539 #endif
35540
35541 return load_vdso32();
35542@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35543 #endif
35544
35545 #ifdef CONFIG_X86_64
35546-static __init int vdso_setup(char *s)
35547-{
35548- vdso64_enabled = simple_strtoul(s, NULL, 0);
35549- return 0;
35550-}
35551-__setup("vdso=", vdso_setup);
35552-#endif
35553-
35554-#ifdef CONFIG_X86_64
35555 static void vgetcpu_cpu_init(void *arg)
35556 {
35557 int cpu = smp_processor_id();
35558diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35559index e88fda8..76ce7ce 100644
35560--- a/arch/x86/xen/Kconfig
35561+++ b/arch/x86/xen/Kconfig
35562@@ -9,6 +9,7 @@ config XEN
35563 select XEN_HAVE_PVMMU
35564 depends on X86_64 || (X86_32 && X86_PAE)
35565 depends on X86_TSC
35566+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35567 help
35568 This is the Linux Xen port. Enabling this will allow the
35569 kernel to boot in a paravirtualized environment under the
35570diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35571index 78a881b..9994bbb 100644
35572--- a/arch/x86/xen/enlighten.c
35573+++ b/arch/x86/xen/enlighten.c
35574@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35575
35576 struct shared_info xen_dummy_shared_info;
35577
35578-void *xen_initial_gdt;
35579-
35580 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35581 __read_mostly int xen_have_vector_callback;
35582 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35583@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35584 {
35585 unsigned long va = dtr->address;
35586 unsigned int size = dtr->size + 1;
35587- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35588- unsigned long frames[pages];
35589+ unsigned long frames[65536 / PAGE_SIZE];
35590 int f;
35591
35592 /*
35593@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35594 {
35595 unsigned long va = dtr->address;
35596 unsigned int size = dtr->size + 1;
35597- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35598- unsigned long frames[pages];
35599+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35600 int f;
35601
35602 /*
35603@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35604 * 8-byte entries, or 16 4k pages..
35605 */
35606
35607- BUG_ON(size > 65536);
35608+ BUG_ON(size > GDT_SIZE);
35609 BUG_ON(va & ~PAGE_MASK);
35610
35611 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35612@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35613 return 0;
35614 }
35615
35616-static void set_xen_basic_apic_ops(void)
35617+static void __init set_xen_basic_apic_ops(void)
35618 {
35619 apic->read = xen_apic_read;
35620 apic->write = xen_apic_write;
35621@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35622 #endif
35623 };
35624
35625-static void xen_reboot(int reason)
35626+static __noreturn void xen_reboot(int reason)
35627 {
35628 struct sched_shutdown r = { .reason = reason };
35629
35630- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35631- BUG();
35632+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35633+ BUG();
35634 }
35635
35636-static void xen_restart(char *msg)
35637+static __noreturn void xen_restart(char *msg)
35638 {
35639 xen_reboot(SHUTDOWN_reboot);
35640 }
35641
35642-static void xen_emergency_restart(void)
35643+static __noreturn void xen_emergency_restart(void)
35644 {
35645 xen_reboot(SHUTDOWN_reboot);
35646 }
35647
35648-static void xen_machine_halt(void)
35649+static __noreturn void xen_machine_halt(void)
35650 {
35651 xen_reboot(SHUTDOWN_poweroff);
35652 }
35653
35654-static void xen_machine_power_off(void)
35655+static __noreturn void xen_machine_power_off(void)
35656 {
35657 if (pm_power_off)
35658 pm_power_off();
35659@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
35660 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35661 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35662
35663- setup_stack_canary_segment(0);
35664- switch_to_new_gdt(0);
35665+ setup_stack_canary_segment(cpu);
35666+#ifdef CONFIG_X86_64
35667+ load_percpu_segment(cpu);
35668+#endif
35669+ switch_to_new_gdt(cpu);
35670
35671 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35672 pv_cpu_ops.load_gdt = xen_load_gdt;
35673@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35674 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35675
35676 /* Work out if we support NX */
35677- x86_configure_nx();
35678+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35679+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35680+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35681+ unsigned l, h;
35682+
35683+ __supported_pte_mask |= _PAGE_NX;
35684+ rdmsr(MSR_EFER, l, h);
35685+ l |= EFER_NX;
35686+ wrmsr(MSR_EFER, l, h);
35687+ }
35688+#endif
35689
35690 /* Get mfn list */
35691 xen_build_dynamic_phys_to_machine();
35692@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35693
35694 machine_ops = xen_machine_ops;
35695
35696- /*
35697- * The only reliable way to retain the initial address of the
35698- * percpu gdt_page is to remember it here, so we can go and
35699- * mark it RW later, when the initial percpu area is freed.
35700- */
35701- xen_initial_gdt = &per_cpu(gdt_page, 0);
35702-
35703 xen_smp_init();
35704
35705 #ifdef CONFIG_ACPI_NUMA
35706diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35707index 5c1f9ac..0e15f5c 100644
35708--- a/arch/x86/xen/mmu.c
35709+++ b/arch/x86/xen/mmu.c
35710@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35711 return val;
35712 }
35713
35714-static pteval_t pte_pfn_to_mfn(pteval_t val)
35715+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35716 {
35717 if (val & _PAGE_PRESENT) {
35718 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35719@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35720 * L3_k[511] -> level2_fixmap_pgt */
35721 convert_pfn_mfn(level3_kernel_pgt);
35722
35723+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35724+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35725+ convert_pfn_mfn(level3_vmemmap_pgt);
35726 /* L3_k[511][506] -> level1_fixmap_pgt */
35727+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35728 convert_pfn_mfn(level2_fixmap_pgt);
35729 }
35730 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35731@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35732 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35733 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35734 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35735+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35736+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35737+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35738 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35739 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35740+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35741 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35742 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35743 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35744+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35745
35746 /* Pin down new L4 */
35747 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35748@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
35749 pv_mmu_ops.set_pud = xen_set_pud;
35750 #if PAGETABLE_LEVELS == 4
35751 pv_mmu_ops.set_pgd = xen_set_pgd;
35752+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35753 #endif
35754
35755 /* This will work as long as patching hasn't happened yet
35756@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35757 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35758 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35759 .set_pgd = xen_set_pgd_hyper,
35760+ .set_pgd_batched = xen_set_pgd_hyper,
35761
35762 .alloc_pud = xen_alloc_pmd_init,
35763 .release_pud = xen_release_pmd_init,
35764diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35765index 4c071ae..00e7049 100644
35766--- a/arch/x86/xen/smp.c
35767+++ b/arch/x86/xen/smp.c
35768@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35769
35770 if (xen_pv_domain()) {
35771 if (!xen_feature(XENFEAT_writable_page_tables))
35772- /* We've switched to the "real" per-cpu gdt, so make
35773- * sure the old memory can be recycled. */
35774- make_lowmem_page_readwrite(xen_initial_gdt);
35775-
35776 #ifdef CONFIG_X86_32
35777 /*
35778 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35779 * expects __USER_DS
35780 */
35781- loadsegment(ds, __USER_DS);
35782- loadsegment(es, __USER_DS);
35783+ loadsegment(ds, __KERNEL_DS);
35784+ loadsegment(es, __KERNEL_DS);
35785 #endif
35786
35787 xen_filter_cpu_maps();
35788@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35789 #ifdef CONFIG_X86_32
35790 /* Note: PVH is not yet supported on x86_32. */
35791 ctxt->user_regs.fs = __KERNEL_PERCPU;
35792- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35793+ savesegment(gs, ctxt->user_regs.gs);
35794 #endif
35795 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35796
35797@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35798 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35799 ctxt->flags = VGCF_IN_KERNEL;
35800 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35801- ctxt->user_regs.ds = __USER_DS;
35802- ctxt->user_regs.es = __USER_DS;
35803+ ctxt->user_regs.ds = __KERNEL_DS;
35804+ ctxt->user_regs.es = __KERNEL_DS;
35805 ctxt->user_regs.ss = __KERNEL_DS;
35806
35807 xen_copy_trap_info(ctxt->trap_ctxt);
35808@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35809 int rc;
35810
35811 per_cpu(current_task, cpu) = idle;
35812+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35813 #ifdef CONFIG_X86_32
35814 irq_ctx_init(cpu);
35815 #else
35816 clear_tsk_thread_flag(idle, TIF_FORK);
35817 #endif
35818- per_cpu(kernel_stack, cpu) =
35819- (unsigned long)task_stack_page(idle) -
35820- KERNEL_STACK_OFFSET + THREAD_SIZE;
35821+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35822
35823 xen_setup_runstate_info(cpu);
35824 xen_setup_timer(cpu);
35825@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35826
35827 void __init xen_smp_init(void)
35828 {
35829- smp_ops = xen_smp_ops;
35830+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35831 xen_fill_possible_map();
35832 }
35833
35834diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35835index fd92a64..1f72641 100644
35836--- a/arch/x86/xen/xen-asm_32.S
35837+++ b/arch/x86/xen/xen-asm_32.S
35838@@ -99,7 +99,7 @@ ENTRY(xen_iret)
35839 pushw %fs
35840 movl $(__KERNEL_PERCPU), %eax
35841 movl %eax, %fs
35842- movl %fs:xen_vcpu, %eax
35843+ mov PER_CPU_VAR(xen_vcpu), %eax
35844 POP_FS
35845 #else
35846 movl %ss:xen_vcpu, %eax
35847diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35848index 674b2225..f1f5dc1 100644
35849--- a/arch/x86/xen/xen-head.S
35850+++ b/arch/x86/xen/xen-head.S
35851@@ -39,6 +39,17 @@ ENTRY(startup_xen)
35852 #ifdef CONFIG_X86_32
35853 mov %esi,xen_start_info
35854 mov $init_thread_union+THREAD_SIZE,%esp
35855+#ifdef CONFIG_SMP
35856+ movl $cpu_gdt_table,%edi
35857+ movl $__per_cpu_load,%eax
35858+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35859+ rorl $16,%eax
35860+ movb %al,__KERNEL_PERCPU + 4(%edi)
35861+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35862+ movl $__per_cpu_end - 1,%eax
35863+ subl $__per_cpu_start,%eax
35864+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35865+#endif
35866 #else
35867 mov %rsi,xen_start_info
35868 mov $init_thread_union+THREAD_SIZE,%rsp
35869diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35870index 5686bd9..0c8b6ee 100644
35871--- a/arch/x86/xen/xen-ops.h
35872+++ b/arch/x86/xen/xen-ops.h
35873@@ -10,8 +10,6 @@
35874 extern const char xen_hypervisor_callback[];
35875 extern const char xen_failsafe_callback[];
35876
35877-extern void *xen_initial_gdt;
35878-
35879 struct trap_info;
35880 void xen_copy_trap_info(struct trap_info *traps);
35881
35882diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35883index 525bd3d..ef888b1 100644
35884--- a/arch/xtensa/variants/dc232b/include/variant/core.h
35885+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35886@@ -119,9 +119,9 @@
35887 ----------------------------------------------------------------------*/
35888
35889 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35890-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35891 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35892 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35893+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35894
35895 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35896 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35897diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35898index 2f33760..835e50a 100644
35899--- a/arch/xtensa/variants/fsf/include/variant/core.h
35900+++ b/arch/xtensa/variants/fsf/include/variant/core.h
35901@@ -11,6 +11,7 @@
35902 #ifndef _XTENSA_CORE_H
35903 #define _XTENSA_CORE_H
35904
35905+#include <linux/const.h>
35906
35907 /****************************************************************************
35908 Parameters Useful for Any Code, USER or PRIVILEGED
35909@@ -112,9 +113,9 @@
35910 ----------------------------------------------------------------------*/
35911
35912 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35913-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35914 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35915 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35916+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35917
35918 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
35919 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
35920diff --git a/block/bio.c b/block/bio.c
35921index 471d738..bd3da0d 100644
35922--- a/block/bio.c
35923+++ b/block/bio.c
35924@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
35925 /*
35926 * Overflow, abort
35927 */
35928- if (end < start)
35929+ if (end < start || end - start > INT_MAX - nr_pages)
35930 return ERR_PTR(-EINVAL);
35931
35932 nr_pages += end - start;
35933@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
35934 /*
35935 * Overflow, abort
35936 */
35937- if (end < start)
35938+ if (end < start || end - start > INT_MAX - nr_pages)
35939 return ERR_PTR(-EINVAL);
35940
35941 nr_pages += end - start;
35942@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
35943 const int read = bio_data_dir(bio) == READ;
35944 struct bio_map_data *bmd = bio->bi_private;
35945 int i;
35946- char *p = bmd->sgvecs[0].iov_base;
35947+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
35948
35949 bio_for_each_segment_all(bvec, bio, i) {
35950 char *addr = page_address(bvec->bv_page);
35951diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
35952index 0736729..2ec3b48 100644
35953--- a/block/blk-iopoll.c
35954+++ b/block/blk-iopoll.c
35955@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
35956 }
35957 EXPORT_SYMBOL(blk_iopoll_complete);
35958
35959-static void blk_iopoll_softirq(struct softirq_action *h)
35960+static __latent_entropy void blk_iopoll_softirq(void)
35961 {
35962 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
35963 int rearm = 0, budget = blk_iopoll_budget;
35964diff --git a/block/blk-map.c b/block/blk-map.c
35965index f890d43..97b0482 100644
35966--- a/block/blk-map.c
35967+++ b/block/blk-map.c
35968@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
35969 if (!len || !kbuf)
35970 return -EINVAL;
35971
35972- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
35973+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
35974 if (do_copy)
35975 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
35976 else
35977diff --git a/block/blk-softirq.c b/block/blk-softirq.c
35978index 53b1737..08177d2e 100644
35979--- a/block/blk-softirq.c
35980+++ b/block/blk-softirq.c
35981@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
35982 * Softirq action handler - move entries to local list and loop over them
35983 * while passing them to the queue registered handler.
35984 */
35985-static void blk_done_softirq(struct softirq_action *h)
35986+static __latent_entropy void blk_done_softirq(void)
35987 {
35988 struct list_head *cpu_list, local_list;
35989
35990diff --git a/block/bsg.c b/block/bsg.c
35991index 276e869..6fe4c61 100644
35992--- a/block/bsg.c
35993+++ b/block/bsg.c
35994@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
35995 struct sg_io_v4 *hdr, struct bsg_device *bd,
35996 fmode_t has_write_perm)
35997 {
35998+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35999+ unsigned char *cmdptr;
36000+
36001 if (hdr->request_len > BLK_MAX_CDB) {
36002 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36003 if (!rq->cmd)
36004 return -ENOMEM;
36005- }
36006+ cmdptr = rq->cmd;
36007+ } else
36008+ cmdptr = tmpcmd;
36009
36010- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36011+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36012 hdr->request_len))
36013 return -EFAULT;
36014
36015+ if (cmdptr != rq->cmd)
36016+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36017+
36018 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36019 if (blk_verify_command(rq->cmd, has_write_perm))
36020 return -EPERM;
36021diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36022index f678c73..f35aa18 100644
36023--- a/block/compat_ioctl.c
36024+++ b/block/compat_ioctl.c
36025@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36026 cgc = compat_alloc_user_space(sizeof(*cgc));
36027 cgc32 = compat_ptr(arg);
36028
36029- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36030+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36031 get_user(data, &cgc32->buffer) ||
36032 put_user(compat_ptr(data), &cgc->buffer) ||
36033 copy_in_user(&cgc->buflen, &cgc32->buflen,
36034@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36035 err |= __get_user(f->spec1, &uf->spec1);
36036 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36037 err |= __get_user(name, &uf->name);
36038- f->name = compat_ptr(name);
36039+ f->name = (void __force_kernel *)compat_ptr(name);
36040 if (err) {
36041 err = -EFAULT;
36042 goto out;
36043diff --git a/block/genhd.c b/block/genhd.c
36044index 0a536dc..b8f7aca 100644
36045--- a/block/genhd.c
36046+++ b/block/genhd.c
36047@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36048
36049 /*
36050 * Register device numbers dev..(dev+range-1)
36051- * range must be nonzero
36052+ * Noop if @range is zero.
36053 * The hash chain is sorted on range, so that subranges can override.
36054 */
36055 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36056 struct kobject *(*probe)(dev_t, int *, void *),
36057 int (*lock)(dev_t, void *), void *data)
36058 {
36059- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36060+ if (range)
36061+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36062 }
36063
36064 EXPORT_SYMBOL(blk_register_region);
36065
36066+/* undo blk_register_region(), noop if @range is zero */
36067 void blk_unregister_region(dev_t devt, unsigned long range)
36068 {
36069- kobj_unmap(bdev_map, devt, range);
36070+ if (range)
36071+ kobj_unmap(bdev_map, devt, range);
36072 }
36073
36074 EXPORT_SYMBOL(blk_unregister_region);
36075diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36076index 56d08fd..2e07090 100644
36077--- a/block/partitions/efi.c
36078+++ b/block/partitions/efi.c
36079@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36080 if (!gpt)
36081 return NULL;
36082
36083+ if (!le32_to_cpu(gpt->num_partition_entries))
36084+ return NULL;
36085+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36086+ if (!pte)
36087+ return NULL;
36088+
36089 count = le32_to_cpu(gpt->num_partition_entries) *
36090 le32_to_cpu(gpt->sizeof_partition_entry);
36091- if (!count)
36092- return NULL;
36093- pte = kmalloc(count, GFP_KERNEL);
36094- if (!pte)
36095- return NULL;
36096-
36097 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36098 (u8 *) pte, count) < count) {
36099 kfree(pte);
36100diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36101index 28163fa..07190a06 100644
36102--- a/block/scsi_ioctl.c
36103+++ b/block/scsi_ioctl.c
36104@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36105 return put_user(0, p);
36106 }
36107
36108-static int sg_get_timeout(struct request_queue *q)
36109+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36110 {
36111 return jiffies_to_clock_t(q->sg_timeout);
36112 }
36113@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36114 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36115 struct sg_io_hdr *hdr, fmode_t mode)
36116 {
36117- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36118+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36119+ unsigned char *cmdptr;
36120+
36121+ if (rq->cmd != rq->__cmd)
36122+ cmdptr = rq->cmd;
36123+ else
36124+ cmdptr = tmpcmd;
36125+
36126+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36127 return -EFAULT;
36128+
36129+ if (cmdptr != rq->cmd)
36130+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36131+
36132 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36133 return -EPERM;
36134
36135@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36136 int err;
36137 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36138 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36139+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36140+ unsigned char *cmdptr;
36141
36142 if (!sic)
36143 return -EINVAL;
36144@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36145 */
36146 err = -EFAULT;
36147 rq->cmd_len = cmdlen;
36148- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36149+
36150+ if (rq->cmd != rq->__cmd)
36151+ cmdptr = rq->cmd;
36152+ else
36153+ cmdptr = tmpcmd;
36154+
36155+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36156 goto error;
36157
36158+ if (rq->cmd != cmdptr)
36159+ memcpy(rq->cmd, cmdptr, cmdlen);
36160+
36161 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36162 goto error;
36163
36164diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36165index 650afac1..f3307de 100644
36166--- a/crypto/cryptd.c
36167+++ b/crypto/cryptd.c
36168@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36169
36170 struct cryptd_blkcipher_request_ctx {
36171 crypto_completion_t complete;
36172-};
36173+} __no_const;
36174
36175 struct cryptd_hash_ctx {
36176 struct crypto_shash *child;
36177@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36178
36179 struct cryptd_aead_request_ctx {
36180 crypto_completion_t complete;
36181-};
36182+} __no_const;
36183
36184 static void cryptd_queue_worker(struct work_struct *work);
36185
36186diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36187index c305d41..a96de79 100644
36188--- a/crypto/pcrypt.c
36189+++ b/crypto/pcrypt.c
36190@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36191 int ret;
36192
36193 pinst->kobj.kset = pcrypt_kset;
36194- ret = kobject_add(&pinst->kobj, NULL, name);
36195+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36196 if (!ret)
36197 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36198
36199diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36200index 6921c7f..78e1af7 100644
36201--- a/drivers/acpi/acpica/hwxfsleep.c
36202+++ b/drivers/acpi/acpica/hwxfsleep.c
36203@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36204 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36205
36206 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36207- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36208- acpi_hw_extended_sleep},
36209- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36210- acpi_hw_extended_wake_prep},
36211- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36212+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36213+ .extended_function = acpi_hw_extended_sleep},
36214+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36215+ .extended_function = acpi_hw_extended_wake_prep},
36216+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36217+ .extended_function = acpi_hw_extended_wake}
36218 };
36219
36220 /*
36221diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36222index 16129c7..8b675cd 100644
36223--- a/drivers/acpi/apei/apei-internal.h
36224+++ b/drivers/acpi/apei/apei-internal.h
36225@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36226 struct apei_exec_ins_type {
36227 u32 flags;
36228 apei_exec_ins_func_t run;
36229-};
36230+} __do_const;
36231
36232 struct apei_exec_context {
36233 u32 ip;
36234diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36235index e82d097..0c855c1 100644
36236--- a/drivers/acpi/apei/ghes.c
36237+++ b/drivers/acpi/apei/ghes.c
36238@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36239 const struct acpi_hest_generic *generic,
36240 const struct acpi_hest_generic_status *estatus)
36241 {
36242- static atomic_t seqno;
36243+ static atomic_unchecked_t seqno;
36244 unsigned int curr_seqno;
36245 char pfx_seq[64];
36246
36247@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36248 else
36249 pfx = KERN_ERR;
36250 }
36251- curr_seqno = atomic_inc_return(&seqno);
36252+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36253 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36254 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36255 pfx_seq, generic->header.source_id);
36256diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36257index a83e3c6..c3d617f 100644
36258--- a/drivers/acpi/bgrt.c
36259+++ b/drivers/acpi/bgrt.c
36260@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36261 if (!bgrt_image)
36262 return -ENODEV;
36263
36264- bin_attr_image.private = bgrt_image;
36265- bin_attr_image.size = bgrt_image_size;
36266+ pax_open_kernel();
36267+ *(void **)&bin_attr_image.private = bgrt_image;
36268+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36269+ pax_close_kernel();
36270
36271 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36272 if (!bgrt_kobj)
36273diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36274index 9b693d5..8953d54 100644
36275--- a/drivers/acpi/blacklist.c
36276+++ b/drivers/acpi/blacklist.c
36277@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36278 u32 is_critical_error;
36279 };
36280
36281-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36282+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36283
36284 /*
36285 * POLICY: If *anything* doesn't work, put it on the blacklist.
36286@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36287 return 0;
36288 }
36289
36290-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36291+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36292 {
36293 .callback = dmi_disable_osi_vista,
36294 .ident = "Fujitsu Siemens",
36295diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36296index c68e724..e863008 100644
36297--- a/drivers/acpi/custom_method.c
36298+++ b/drivers/acpi/custom_method.c
36299@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36300 struct acpi_table_header table;
36301 acpi_status status;
36302
36303+#ifdef CONFIG_GRKERNSEC_KMEM
36304+ return -EPERM;
36305+#endif
36306+
36307 if (!(*ppos)) {
36308 /* parse the table header to get the table length */
36309 if (count <= sizeof(struct acpi_table_header))
36310diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36311index c0d44d3..5ad8f9a 100644
36312--- a/drivers/acpi/device_pm.c
36313+++ b/drivers/acpi/device_pm.c
36314@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36315
36316 #endif /* CONFIG_PM_SLEEP */
36317
36318+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36319+
36320 static struct dev_pm_domain acpi_general_pm_domain = {
36321 .ops = {
36322 #ifdef CONFIG_PM
36323@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36324 #endif
36325 #endif
36326 },
36327+ .detach = acpi_dev_pm_detach
36328 };
36329
36330 /**
36331@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36332 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36333 }
36334
36335- dev->pm_domain->detach = acpi_dev_pm_detach;
36336 return 0;
36337 }
36338 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36339diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36340index 87b704e..2d1d0c1 100644
36341--- a/drivers/acpi/processor_idle.c
36342+++ b/drivers/acpi/processor_idle.c
36343@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36344 {
36345 int i, count = CPUIDLE_DRIVER_STATE_START;
36346 struct acpi_processor_cx *cx;
36347- struct cpuidle_state *state;
36348+ cpuidle_state_no_const *state;
36349 struct cpuidle_driver *drv = &acpi_idle_driver;
36350
36351 if (!pr->flags.power_setup_done)
36352diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36353index 13e577c..cef11ee 100644
36354--- a/drivers/acpi/sysfs.c
36355+++ b/drivers/acpi/sysfs.c
36356@@ -423,11 +423,11 @@ static u32 num_counters;
36357 static struct attribute **all_attrs;
36358 static u32 acpi_gpe_count;
36359
36360-static struct attribute_group interrupt_stats_attr_group = {
36361+static attribute_group_no_const interrupt_stats_attr_group = {
36362 .name = "interrupts",
36363 };
36364
36365-static struct kobj_attribute *counter_attrs;
36366+static kobj_attribute_no_const *counter_attrs;
36367
36368 static void delete_gpe_attr_array(void)
36369 {
36370diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36371index 61a9c07..ea98fa1 100644
36372--- a/drivers/ata/libahci.c
36373+++ b/drivers/ata/libahci.c
36374@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36375 }
36376 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36377
36378-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36379+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36380 struct ata_taskfile *tf, int is_cmd, u16 flags,
36381 unsigned long timeout_msec)
36382 {
36383diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36384index d1a05f9..eb70e10 100644
36385--- a/drivers/ata/libata-core.c
36386+++ b/drivers/ata/libata-core.c
36387@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36388 static void ata_dev_xfermask(struct ata_device *dev);
36389 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36390
36391-atomic_t ata_print_id = ATOMIC_INIT(0);
36392+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36393
36394 struct ata_force_param {
36395 const char *name;
36396@@ -4831,7 +4831,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36397 struct ata_port *ap;
36398 unsigned int tag;
36399
36400- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36401+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36402 ap = qc->ap;
36403
36404 qc->flags = 0;
36405@@ -4847,7 +4847,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36406 struct ata_port *ap;
36407 struct ata_link *link;
36408
36409- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36410+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36411 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36412 ap = qc->ap;
36413 link = qc->dev->link;
36414@@ -5951,6 +5951,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36415 return;
36416
36417 spin_lock(&lock);
36418+ pax_open_kernel();
36419
36420 for (cur = ops->inherits; cur; cur = cur->inherits) {
36421 void **inherit = (void **)cur;
36422@@ -5964,8 +5965,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36423 if (IS_ERR(*pp))
36424 *pp = NULL;
36425
36426- ops->inherits = NULL;
36427+ *(struct ata_port_operations **)&ops->inherits = NULL;
36428
36429+ pax_close_kernel();
36430 spin_unlock(&lock);
36431 }
36432
36433@@ -6161,7 +6163,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36434
36435 /* give ports names and add SCSI hosts */
36436 for (i = 0; i < host->n_ports; i++) {
36437- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36438+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36439 host->ports[i]->local_port_no = i + 1;
36440 }
36441
36442diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36443index 6abd17a..9961bf7 100644
36444--- a/drivers/ata/libata-scsi.c
36445+++ b/drivers/ata/libata-scsi.c
36446@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
36447
36448 if (rc)
36449 return rc;
36450- ap->print_id = atomic_inc_return(&ata_print_id);
36451+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36452 return 0;
36453 }
36454 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36455diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36456index 5f4e0cc..ff2c347 100644
36457--- a/drivers/ata/libata.h
36458+++ b/drivers/ata/libata.h
36459@@ -53,7 +53,7 @@ enum {
36460 ATA_DNXFER_QUIET = (1 << 31),
36461 };
36462
36463-extern atomic_t ata_print_id;
36464+extern atomic_unchecked_t ata_print_id;
36465 extern int atapi_passthru16;
36466 extern int libata_fua;
36467 extern int libata_noacpi;
36468diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36469index a9b0c82..207d97d 100644
36470--- a/drivers/ata/pata_arasan_cf.c
36471+++ b/drivers/ata/pata_arasan_cf.c
36472@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36473 /* Handle platform specific quirks */
36474 if (quirk) {
36475 if (quirk & CF_BROKEN_PIO) {
36476- ap->ops->set_piomode = NULL;
36477+ pax_open_kernel();
36478+ *(void **)&ap->ops->set_piomode = NULL;
36479+ pax_close_kernel();
36480 ap->pio_mask = 0;
36481 }
36482 if (quirk & CF_BROKEN_MWDMA)
36483diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36484index f9b983a..887b9d8 100644
36485--- a/drivers/atm/adummy.c
36486+++ b/drivers/atm/adummy.c
36487@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36488 vcc->pop(vcc, skb);
36489 else
36490 dev_kfree_skb_any(skb);
36491- atomic_inc(&vcc->stats->tx);
36492+ atomic_inc_unchecked(&vcc->stats->tx);
36493
36494 return 0;
36495 }
36496diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36497index f1a9198..f466a4a 100644
36498--- a/drivers/atm/ambassador.c
36499+++ b/drivers/atm/ambassador.c
36500@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36501 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36502
36503 // VC layer stats
36504- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36505+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36506
36507 // free the descriptor
36508 kfree (tx_descr);
36509@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36510 dump_skb ("<<<", vc, skb);
36511
36512 // VC layer stats
36513- atomic_inc(&atm_vcc->stats->rx);
36514+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36515 __net_timestamp(skb);
36516 // end of our responsibility
36517 atm_vcc->push (atm_vcc, skb);
36518@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36519 } else {
36520 PRINTK (KERN_INFO, "dropped over-size frame");
36521 // should we count this?
36522- atomic_inc(&atm_vcc->stats->rx_drop);
36523+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36524 }
36525
36526 } else {
36527@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36528 }
36529
36530 if (check_area (skb->data, skb->len)) {
36531- atomic_inc(&atm_vcc->stats->tx_err);
36532+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36533 return -ENOMEM; // ?
36534 }
36535
36536diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36537index 480fa6f..947067c 100644
36538--- a/drivers/atm/atmtcp.c
36539+++ b/drivers/atm/atmtcp.c
36540@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36541 if (vcc->pop) vcc->pop(vcc,skb);
36542 else dev_kfree_skb(skb);
36543 if (dev_data) return 0;
36544- atomic_inc(&vcc->stats->tx_err);
36545+ atomic_inc_unchecked(&vcc->stats->tx_err);
36546 return -ENOLINK;
36547 }
36548 size = skb->len+sizeof(struct atmtcp_hdr);
36549@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36550 if (!new_skb) {
36551 if (vcc->pop) vcc->pop(vcc,skb);
36552 else dev_kfree_skb(skb);
36553- atomic_inc(&vcc->stats->tx_err);
36554+ atomic_inc_unchecked(&vcc->stats->tx_err);
36555 return -ENOBUFS;
36556 }
36557 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36558@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36559 if (vcc->pop) vcc->pop(vcc,skb);
36560 else dev_kfree_skb(skb);
36561 out_vcc->push(out_vcc,new_skb);
36562- atomic_inc(&vcc->stats->tx);
36563- atomic_inc(&out_vcc->stats->rx);
36564+ atomic_inc_unchecked(&vcc->stats->tx);
36565+ atomic_inc_unchecked(&out_vcc->stats->rx);
36566 return 0;
36567 }
36568
36569@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36570 read_unlock(&vcc_sklist_lock);
36571 if (!out_vcc) {
36572 result = -EUNATCH;
36573- atomic_inc(&vcc->stats->tx_err);
36574+ atomic_inc_unchecked(&vcc->stats->tx_err);
36575 goto done;
36576 }
36577 skb_pull(skb,sizeof(struct atmtcp_hdr));
36578@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36579 __net_timestamp(new_skb);
36580 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36581 out_vcc->push(out_vcc,new_skb);
36582- atomic_inc(&vcc->stats->tx);
36583- atomic_inc(&out_vcc->stats->rx);
36584+ atomic_inc_unchecked(&vcc->stats->tx);
36585+ atomic_inc_unchecked(&out_vcc->stats->rx);
36586 done:
36587 if (vcc->pop) vcc->pop(vcc,skb);
36588 else dev_kfree_skb(skb);
36589diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36590index c7fab3e..68d0965 100644
36591--- a/drivers/atm/eni.c
36592+++ b/drivers/atm/eni.c
36593@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36594 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36595 vcc->dev->number);
36596 length = 0;
36597- atomic_inc(&vcc->stats->rx_err);
36598+ atomic_inc_unchecked(&vcc->stats->rx_err);
36599 }
36600 else {
36601 length = ATM_CELL_SIZE-1; /* no HEC */
36602@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36603 size);
36604 }
36605 eff = length = 0;
36606- atomic_inc(&vcc->stats->rx_err);
36607+ atomic_inc_unchecked(&vcc->stats->rx_err);
36608 }
36609 else {
36610 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36611@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36612 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36613 vcc->dev->number,vcc->vci,length,size << 2,descr);
36614 length = eff = 0;
36615- atomic_inc(&vcc->stats->rx_err);
36616+ atomic_inc_unchecked(&vcc->stats->rx_err);
36617 }
36618 }
36619 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36620@@ -770,7 +770,7 @@ rx_dequeued++;
36621 vcc->push(vcc,skb);
36622 pushed++;
36623 }
36624- atomic_inc(&vcc->stats->rx);
36625+ atomic_inc_unchecked(&vcc->stats->rx);
36626 }
36627 wake_up(&eni_dev->rx_wait);
36628 }
36629@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36630 PCI_DMA_TODEVICE);
36631 if (vcc->pop) vcc->pop(vcc,skb);
36632 else dev_kfree_skb_irq(skb);
36633- atomic_inc(&vcc->stats->tx);
36634+ atomic_inc_unchecked(&vcc->stats->tx);
36635 wake_up(&eni_dev->tx_wait);
36636 dma_complete++;
36637 }
36638diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36639index 82f2ae0..f205c02 100644
36640--- a/drivers/atm/firestream.c
36641+++ b/drivers/atm/firestream.c
36642@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36643 }
36644 }
36645
36646- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36647+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36648
36649 fs_dprintk (FS_DEBUG_TXMEM, "i");
36650 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36651@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36652 #endif
36653 skb_put (skb, qe->p1 & 0xffff);
36654 ATM_SKB(skb)->vcc = atm_vcc;
36655- atomic_inc(&atm_vcc->stats->rx);
36656+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36657 __net_timestamp(skb);
36658 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36659 atm_vcc->push (atm_vcc, skb);
36660@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36661 kfree (pe);
36662 }
36663 if (atm_vcc)
36664- atomic_inc(&atm_vcc->stats->rx_drop);
36665+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36666 break;
36667 case 0x1f: /* Reassembly abort: no buffers. */
36668 /* Silently increment error counter. */
36669 if (atm_vcc)
36670- atomic_inc(&atm_vcc->stats->rx_drop);
36671+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36672 break;
36673 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36674 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36675diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36676index d5d9eaf..65c0d53 100644
36677--- a/drivers/atm/fore200e.c
36678+++ b/drivers/atm/fore200e.c
36679@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36680 #endif
36681 /* check error condition */
36682 if (*entry->status & STATUS_ERROR)
36683- atomic_inc(&vcc->stats->tx_err);
36684+ atomic_inc_unchecked(&vcc->stats->tx_err);
36685 else
36686- atomic_inc(&vcc->stats->tx);
36687+ atomic_inc_unchecked(&vcc->stats->tx);
36688 }
36689 }
36690
36691@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36692 if (skb == NULL) {
36693 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36694
36695- atomic_inc(&vcc->stats->rx_drop);
36696+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36697 return -ENOMEM;
36698 }
36699
36700@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36701
36702 dev_kfree_skb_any(skb);
36703
36704- atomic_inc(&vcc->stats->rx_drop);
36705+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36706 return -ENOMEM;
36707 }
36708
36709 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36710
36711 vcc->push(vcc, skb);
36712- atomic_inc(&vcc->stats->rx);
36713+ atomic_inc_unchecked(&vcc->stats->rx);
36714
36715 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36716
36717@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36718 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36719 fore200e->atm_dev->number,
36720 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36721- atomic_inc(&vcc->stats->rx_err);
36722+ atomic_inc_unchecked(&vcc->stats->rx_err);
36723 }
36724 }
36725
36726@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36727 goto retry_here;
36728 }
36729
36730- atomic_inc(&vcc->stats->tx_err);
36731+ atomic_inc_unchecked(&vcc->stats->tx_err);
36732
36733 fore200e->tx_sat++;
36734 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36735diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36736index c39702b..785b73b 100644
36737--- a/drivers/atm/he.c
36738+++ b/drivers/atm/he.c
36739@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36740
36741 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36742 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36743- atomic_inc(&vcc->stats->rx_drop);
36744+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36745 goto return_host_buffers;
36746 }
36747
36748@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36749 RBRQ_LEN_ERR(he_dev->rbrq_head)
36750 ? "LEN_ERR" : "",
36751 vcc->vpi, vcc->vci);
36752- atomic_inc(&vcc->stats->rx_err);
36753+ atomic_inc_unchecked(&vcc->stats->rx_err);
36754 goto return_host_buffers;
36755 }
36756
36757@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36758 vcc->push(vcc, skb);
36759 spin_lock(&he_dev->global_lock);
36760
36761- atomic_inc(&vcc->stats->rx);
36762+ atomic_inc_unchecked(&vcc->stats->rx);
36763
36764 return_host_buffers:
36765 ++pdus_assembled;
36766@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36767 tpd->vcc->pop(tpd->vcc, tpd->skb);
36768 else
36769 dev_kfree_skb_any(tpd->skb);
36770- atomic_inc(&tpd->vcc->stats->tx_err);
36771+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36772 }
36773 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36774 return;
36775@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36776 vcc->pop(vcc, skb);
36777 else
36778 dev_kfree_skb_any(skb);
36779- atomic_inc(&vcc->stats->tx_err);
36780+ atomic_inc_unchecked(&vcc->stats->tx_err);
36781 return -EINVAL;
36782 }
36783
36784@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36785 vcc->pop(vcc, skb);
36786 else
36787 dev_kfree_skb_any(skb);
36788- atomic_inc(&vcc->stats->tx_err);
36789+ atomic_inc_unchecked(&vcc->stats->tx_err);
36790 return -EINVAL;
36791 }
36792 #endif
36793@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36794 vcc->pop(vcc, skb);
36795 else
36796 dev_kfree_skb_any(skb);
36797- atomic_inc(&vcc->stats->tx_err);
36798+ atomic_inc_unchecked(&vcc->stats->tx_err);
36799 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36800 return -ENOMEM;
36801 }
36802@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36803 vcc->pop(vcc, skb);
36804 else
36805 dev_kfree_skb_any(skb);
36806- atomic_inc(&vcc->stats->tx_err);
36807+ atomic_inc_unchecked(&vcc->stats->tx_err);
36808 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36809 return -ENOMEM;
36810 }
36811@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36812 __enqueue_tpd(he_dev, tpd, cid);
36813 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36814
36815- atomic_inc(&vcc->stats->tx);
36816+ atomic_inc_unchecked(&vcc->stats->tx);
36817
36818 return 0;
36819 }
36820diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36821index 1dc0519..1aadaf7 100644
36822--- a/drivers/atm/horizon.c
36823+++ b/drivers/atm/horizon.c
36824@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36825 {
36826 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36827 // VC layer stats
36828- atomic_inc(&vcc->stats->rx);
36829+ atomic_inc_unchecked(&vcc->stats->rx);
36830 __net_timestamp(skb);
36831 // end of our responsibility
36832 vcc->push (vcc, skb);
36833@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36834 dev->tx_iovec = NULL;
36835
36836 // VC layer stats
36837- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36838+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36839
36840 // free the skb
36841 hrz_kfree_skb (skb);
36842diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36843index 2b24ed0..b3d6acc 100644
36844--- a/drivers/atm/idt77252.c
36845+++ b/drivers/atm/idt77252.c
36846@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36847 else
36848 dev_kfree_skb(skb);
36849
36850- atomic_inc(&vcc->stats->tx);
36851+ atomic_inc_unchecked(&vcc->stats->tx);
36852 }
36853
36854 atomic_dec(&scq->used);
36855@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36856 if ((sb = dev_alloc_skb(64)) == NULL) {
36857 printk("%s: Can't allocate buffers for aal0.\n",
36858 card->name);
36859- atomic_add(i, &vcc->stats->rx_drop);
36860+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36861 break;
36862 }
36863 if (!atm_charge(vcc, sb->truesize)) {
36864 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36865 card->name);
36866- atomic_add(i - 1, &vcc->stats->rx_drop);
36867+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36868 dev_kfree_skb(sb);
36869 break;
36870 }
36871@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36872 ATM_SKB(sb)->vcc = vcc;
36873 __net_timestamp(sb);
36874 vcc->push(vcc, sb);
36875- atomic_inc(&vcc->stats->rx);
36876+ atomic_inc_unchecked(&vcc->stats->rx);
36877
36878 cell += ATM_CELL_PAYLOAD;
36879 }
36880@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36881 "(CDC: %08x)\n",
36882 card->name, len, rpp->len, readl(SAR_REG_CDC));
36883 recycle_rx_pool_skb(card, rpp);
36884- atomic_inc(&vcc->stats->rx_err);
36885+ atomic_inc_unchecked(&vcc->stats->rx_err);
36886 return;
36887 }
36888 if (stat & SAR_RSQE_CRC) {
36889 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36890 recycle_rx_pool_skb(card, rpp);
36891- atomic_inc(&vcc->stats->rx_err);
36892+ atomic_inc_unchecked(&vcc->stats->rx_err);
36893 return;
36894 }
36895 if (skb_queue_len(&rpp->queue) > 1) {
36896@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36897 RXPRINTK("%s: Can't alloc RX skb.\n",
36898 card->name);
36899 recycle_rx_pool_skb(card, rpp);
36900- atomic_inc(&vcc->stats->rx_err);
36901+ atomic_inc_unchecked(&vcc->stats->rx_err);
36902 return;
36903 }
36904 if (!atm_charge(vcc, skb->truesize)) {
36905@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36906 __net_timestamp(skb);
36907
36908 vcc->push(vcc, skb);
36909- atomic_inc(&vcc->stats->rx);
36910+ atomic_inc_unchecked(&vcc->stats->rx);
36911
36912 return;
36913 }
36914@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36915 __net_timestamp(skb);
36916
36917 vcc->push(vcc, skb);
36918- atomic_inc(&vcc->stats->rx);
36919+ atomic_inc_unchecked(&vcc->stats->rx);
36920
36921 if (skb->truesize > SAR_FB_SIZE_3)
36922 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
36923@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
36924 if (vcc->qos.aal != ATM_AAL0) {
36925 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
36926 card->name, vpi, vci);
36927- atomic_inc(&vcc->stats->rx_drop);
36928+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36929 goto drop;
36930 }
36931
36932 if ((sb = dev_alloc_skb(64)) == NULL) {
36933 printk("%s: Can't allocate buffers for AAL0.\n",
36934 card->name);
36935- atomic_inc(&vcc->stats->rx_err);
36936+ atomic_inc_unchecked(&vcc->stats->rx_err);
36937 goto drop;
36938 }
36939
36940@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
36941 ATM_SKB(sb)->vcc = vcc;
36942 __net_timestamp(sb);
36943 vcc->push(vcc, sb);
36944- atomic_inc(&vcc->stats->rx);
36945+ atomic_inc_unchecked(&vcc->stats->rx);
36946
36947 drop:
36948 skb_pull(queue, 64);
36949@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36950
36951 if (vc == NULL) {
36952 printk("%s: NULL connection in send().\n", card->name);
36953- atomic_inc(&vcc->stats->tx_err);
36954+ atomic_inc_unchecked(&vcc->stats->tx_err);
36955 dev_kfree_skb(skb);
36956 return -EINVAL;
36957 }
36958 if (!test_bit(VCF_TX, &vc->flags)) {
36959 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
36960- atomic_inc(&vcc->stats->tx_err);
36961+ atomic_inc_unchecked(&vcc->stats->tx_err);
36962 dev_kfree_skb(skb);
36963 return -EINVAL;
36964 }
36965@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36966 break;
36967 default:
36968 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
36969- atomic_inc(&vcc->stats->tx_err);
36970+ atomic_inc_unchecked(&vcc->stats->tx_err);
36971 dev_kfree_skb(skb);
36972 return -EINVAL;
36973 }
36974
36975 if (skb_shinfo(skb)->nr_frags != 0) {
36976 printk("%s: No scatter-gather yet.\n", card->name);
36977- atomic_inc(&vcc->stats->tx_err);
36978+ atomic_inc_unchecked(&vcc->stats->tx_err);
36979 dev_kfree_skb(skb);
36980 return -EINVAL;
36981 }
36982@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36983
36984 err = queue_skb(card, vc, skb, oam);
36985 if (err) {
36986- atomic_inc(&vcc->stats->tx_err);
36987+ atomic_inc_unchecked(&vcc->stats->tx_err);
36988 dev_kfree_skb(skb);
36989 return err;
36990 }
36991@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
36992 skb = dev_alloc_skb(64);
36993 if (!skb) {
36994 printk("%s: Out of memory in send_oam().\n", card->name);
36995- atomic_inc(&vcc->stats->tx_err);
36996+ atomic_inc_unchecked(&vcc->stats->tx_err);
36997 return -ENOMEM;
36998 }
36999 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37000diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37001index 4217f29..88f547a 100644
37002--- a/drivers/atm/iphase.c
37003+++ b/drivers/atm/iphase.c
37004@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37005 status = (u_short) (buf_desc_ptr->desc_mode);
37006 if (status & (RX_CER | RX_PTE | RX_OFL))
37007 {
37008- atomic_inc(&vcc->stats->rx_err);
37009+ atomic_inc_unchecked(&vcc->stats->rx_err);
37010 IF_ERR(printk("IA: bad packet, dropping it");)
37011 if (status & RX_CER) {
37012 IF_ERR(printk(" cause: packet CRC error\n");)
37013@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37014 len = dma_addr - buf_addr;
37015 if (len > iadev->rx_buf_sz) {
37016 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37017- atomic_inc(&vcc->stats->rx_err);
37018+ atomic_inc_unchecked(&vcc->stats->rx_err);
37019 goto out_free_desc;
37020 }
37021
37022@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37023 ia_vcc = INPH_IA_VCC(vcc);
37024 if (ia_vcc == NULL)
37025 {
37026- atomic_inc(&vcc->stats->rx_err);
37027+ atomic_inc_unchecked(&vcc->stats->rx_err);
37028 atm_return(vcc, skb->truesize);
37029 dev_kfree_skb_any(skb);
37030 goto INCR_DLE;
37031@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37032 if ((length > iadev->rx_buf_sz) || (length >
37033 (skb->len - sizeof(struct cpcs_trailer))))
37034 {
37035- atomic_inc(&vcc->stats->rx_err);
37036+ atomic_inc_unchecked(&vcc->stats->rx_err);
37037 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37038 length, skb->len);)
37039 atm_return(vcc, skb->truesize);
37040@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37041
37042 IF_RX(printk("rx_dle_intr: skb push");)
37043 vcc->push(vcc,skb);
37044- atomic_inc(&vcc->stats->rx);
37045+ atomic_inc_unchecked(&vcc->stats->rx);
37046 iadev->rx_pkt_cnt++;
37047 }
37048 INCR_DLE:
37049@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37050 {
37051 struct k_sonet_stats *stats;
37052 stats = &PRIV(_ia_dev[board])->sonet_stats;
37053- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37054- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37055- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37056- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37057- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37058- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37059- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37060- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37061- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37062+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37063+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37064+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37065+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37066+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37067+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37068+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37069+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37070+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37071 }
37072 ia_cmds.status = 0;
37073 break;
37074@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37075 if ((desc == 0) || (desc > iadev->num_tx_desc))
37076 {
37077 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37078- atomic_inc(&vcc->stats->tx);
37079+ atomic_inc_unchecked(&vcc->stats->tx);
37080 if (vcc->pop)
37081 vcc->pop(vcc, skb);
37082 else
37083@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37084 ATM_DESC(skb) = vcc->vci;
37085 skb_queue_tail(&iadev->tx_dma_q, skb);
37086
37087- atomic_inc(&vcc->stats->tx);
37088+ atomic_inc_unchecked(&vcc->stats->tx);
37089 iadev->tx_pkt_cnt++;
37090 /* Increment transaction counter */
37091 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37092
37093 #if 0
37094 /* add flow control logic */
37095- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37096+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37097 if (iavcc->vc_desc_cnt > 10) {
37098 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37099 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37100diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37101index 93eaf8d..b4ca7da 100644
37102--- a/drivers/atm/lanai.c
37103+++ b/drivers/atm/lanai.c
37104@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37105 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37106 lanai_endtx(lanai, lvcc);
37107 lanai_free_skb(lvcc->tx.atmvcc, skb);
37108- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37109+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37110 }
37111
37112 /* Try to fill the buffer - don't call unless there is backlog */
37113@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37114 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37115 __net_timestamp(skb);
37116 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37117- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37118+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37119 out:
37120 lvcc->rx.buf.ptr = end;
37121 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37122@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37123 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37124 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37125 lanai->stats.service_rxnotaal5++;
37126- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37127+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37128 return 0;
37129 }
37130 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37131@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37132 int bytes;
37133 read_unlock(&vcc_sklist_lock);
37134 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37135- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37136+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37137 lvcc->stats.x.aal5.service_trash++;
37138 bytes = (SERVICE_GET_END(s) * 16) -
37139 (((unsigned long) lvcc->rx.buf.ptr) -
37140@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37141 }
37142 if (s & SERVICE_STREAM) {
37143 read_unlock(&vcc_sklist_lock);
37144- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37145+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37146 lvcc->stats.x.aal5.service_stream++;
37147 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37148 "PDU on VCI %d!\n", lanai->number, vci);
37149@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37150 return 0;
37151 }
37152 DPRINTK("got rx crc error on vci %d\n", vci);
37153- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37154+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37155 lvcc->stats.x.aal5.service_rxcrc++;
37156 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37157 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37158diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37159index 9988ac9..7c52585 100644
37160--- a/drivers/atm/nicstar.c
37161+++ b/drivers/atm/nicstar.c
37162@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37163 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37164 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37165 card->index);
37166- atomic_inc(&vcc->stats->tx_err);
37167+ atomic_inc_unchecked(&vcc->stats->tx_err);
37168 dev_kfree_skb_any(skb);
37169 return -EINVAL;
37170 }
37171@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37172 if (!vc->tx) {
37173 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37174 card->index);
37175- atomic_inc(&vcc->stats->tx_err);
37176+ atomic_inc_unchecked(&vcc->stats->tx_err);
37177 dev_kfree_skb_any(skb);
37178 return -EINVAL;
37179 }
37180@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37181 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37182 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37183 card->index);
37184- atomic_inc(&vcc->stats->tx_err);
37185+ atomic_inc_unchecked(&vcc->stats->tx_err);
37186 dev_kfree_skb_any(skb);
37187 return -EINVAL;
37188 }
37189
37190 if (skb_shinfo(skb)->nr_frags != 0) {
37191 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37192- atomic_inc(&vcc->stats->tx_err);
37193+ atomic_inc_unchecked(&vcc->stats->tx_err);
37194 dev_kfree_skb_any(skb);
37195 return -EINVAL;
37196 }
37197@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37198 }
37199
37200 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37201- atomic_inc(&vcc->stats->tx_err);
37202+ atomic_inc_unchecked(&vcc->stats->tx_err);
37203 dev_kfree_skb_any(skb);
37204 return -EIO;
37205 }
37206- atomic_inc(&vcc->stats->tx);
37207+ atomic_inc_unchecked(&vcc->stats->tx);
37208
37209 return 0;
37210 }
37211@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37212 printk
37213 ("nicstar%d: Can't allocate buffers for aal0.\n",
37214 card->index);
37215- atomic_add(i, &vcc->stats->rx_drop);
37216+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37217 break;
37218 }
37219 if (!atm_charge(vcc, sb->truesize)) {
37220 RXPRINTK
37221 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37222 card->index);
37223- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37224+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37225 dev_kfree_skb_any(sb);
37226 break;
37227 }
37228@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37229 ATM_SKB(sb)->vcc = vcc;
37230 __net_timestamp(sb);
37231 vcc->push(vcc, sb);
37232- atomic_inc(&vcc->stats->rx);
37233+ atomic_inc_unchecked(&vcc->stats->rx);
37234 cell += ATM_CELL_PAYLOAD;
37235 }
37236
37237@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37238 if (iovb == NULL) {
37239 printk("nicstar%d: Out of iovec buffers.\n",
37240 card->index);
37241- atomic_inc(&vcc->stats->rx_drop);
37242+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37243 recycle_rx_buf(card, skb);
37244 return;
37245 }
37246@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37247 small or large buffer itself. */
37248 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37249 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37250- atomic_inc(&vcc->stats->rx_err);
37251+ atomic_inc_unchecked(&vcc->stats->rx_err);
37252 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37253 NS_MAX_IOVECS);
37254 NS_PRV_IOVCNT(iovb) = 0;
37255@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37256 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37257 card->index);
37258 which_list(card, skb);
37259- atomic_inc(&vcc->stats->rx_err);
37260+ atomic_inc_unchecked(&vcc->stats->rx_err);
37261 recycle_rx_buf(card, skb);
37262 vc->rx_iov = NULL;
37263 recycle_iov_buf(card, iovb);
37264@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37265 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37266 card->index);
37267 which_list(card, skb);
37268- atomic_inc(&vcc->stats->rx_err);
37269+ atomic_inc_unchecked(&vcc->stats->rx_err);
37270 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37271 NS_PRV_IOVCNT(iovb));
37272 vc->rx_iov = NULL;
37273@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37274 printk(" - PDU size mismatch.\n");
37275 else
37276 printk(".\n");
37277- atomic_inc(&vcc->stats->rx_err);
37278+ atomic_inc_unchecked(&vcc->stats->rx_err);
37279 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37280 NS_PRV_IOVCNT(iovb));
37281 vc->rx_iov = NULL;
37282@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37283 /* skb points to a small buffer */
37284 if (!atm_charge(vcc, skb->truesize)) {
37285 push_rxbufs(card, skb);
37286- atomic_inc(&vcc->stats->rx_drop);
37287+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37288 } else {
37289 skb_put(skb, len);
37290 dequeue_sm_buf(card, skb);
37291@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37292 ATM_SKB(skb)->vcc = vcc;
37293 __net_timestamp(skb);
37294 vcc->push(vcc, skb);
37295- atomic_inc(&vcc->stats->rx);
37296+ atomic_inc_unchecked(&vcc->stats->rx);
37297 }
37298 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37299 struct sk_buff *sb;
37300@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37301 if (len <= NS_SMBUFSIZE) {
37302 if (!atm_charge(vcc, sb->truesize)) {
37303 push_rxbufs(card, sb);
37304- atomic_inc(&vcc->stats->rx_drop);
37305+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37306 } else {
37307 skb_put(sb, len);
37308 dequeue_sm_buf(card, sb);
37309@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37310 ATM_SKB(sb)->vcc = vcc;
37311 __net_timestamp(sb);
37312 vcc->push(vcc, sb);
37313- atomic_inc(&vcc->stats->rx);
37314+ atomic_inc_unchecked(&vcc->stats->rx);
37315 }
37316
37317 push_rxbufs(card, skb);
37318@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37319
37320 if (!atm_charge(vcc, skb->truesize)) {
37321 push_rxbufs(card, skb);
37322- atomic_inc(&vcc->stats->rx_drop);
37323+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37324 } else {
37325 dequeue_lg_buf(card, skb);
37326 #ifdef NS_USE_DESTRUCTORS
37327@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37328 ATM_SKB(skb)->vcc = vcc;
37329 __net_timestamp(skb);
37330 vcc->push(vcc, skb);
37331- atomic_inc(&vcc->stats->rx);
37332+ atomic_inc_unchecked(&vcc->stats->rx);
37333 }
37334
37335 push_rxbufs(card, sb);
37336@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37337 printk
37338 ("nicstar%d: Out of huge buffers.\n",
37339 card->index);
37340- atomic_inc(&vcc->stats->rx_drop);
37341+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37342 recycle_iovec_rx_bufs(card,
37343 (struct iovec *)
37344 iovb->data,
37345@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37346 card->hbpool.count++;
37347 } else
37348 dev_kfree_skb_any(hb);
37349- atomic_inc(&vcc->stats->rx_drop);
37350+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37351 } else {
37352 /* Copy the small buffer to the huge buffer */
37353 sb = (struct sk_buff *)iov->iov_base;
37354@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37355 #endif /* NS_USE_DESTRUCTORS */
37356 __net_timestamp(hb);
37357 vcc->push(vcc, hb);
37358- atomic_inc(&vcc->stats->rx);
37359+ atomic_inc_unchecked(&vcc->stats->rx);
37360 }
37361 }
37362
37363diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37364index 21b0bc6..b5f40ba 100644
37365--- a/drivers/atm/solos-pci.c
37366+++ b/drivers/atm/solos-pci.c
37367@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37368 }
37369 atm_charge(vcc, skb->truesize);
37370 vcc->push(vcc, skb);
37371- atomic_inc(&vcc->stats->rx);
37372+ atomic_inc_unchecked(&vcc->stats->rx);
37373 break;
37374
37375 case PKT_STATUS:
37376@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37377 vcc = SKB_CB(oldskb)->vcc;
37378
37379 if (vcc) {
37380- atomic_inc(&vcc->stats->tx);
37381+ atomic_inc_unchecked(&vcc->stats->tx);
37382 solos_pop(vcc, oldskb);
37383 } else {
37384 dev_kfree_skb_irq(oldskb);
37385diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37386index 0215934..ce9f5b1 100644
37387--- a/drivers/atm/suni.c
37388+++ b/drivers/atm/suni.c
37389@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37390
37391
37392 #define ADD_LIMITED(s,v) \
37393- atomic_add((v),&stats->s); \
37394- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37395+ atomic_add_unchecked((v),&stats->s); \
37396+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37397
37398
37399 static void suni_hz(unsigned long from_timer)
37400diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37401index 5120a96..e2572bd 100644
37402--- a/drivers/atm/uPD98402.c
37403+++ b/drivers/atm/uPD98402.c
37404@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37405 struct sonet_stats tmp;
37406 int error = 0;
37407
37408- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37409+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37410 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37411 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37412 if (zero && !error) {
37413@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37414
37415
37416 #define ADD_LIMITED(s,v) \
37417- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37418- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37419- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37420+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37421+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37422+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37423
37424
37425 static void stat_event(struct atm_dev *dev)
37426@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37427 if (reason & uPD98402_INT_PFM) stat_event(dev);
37428 if (reason & uPD98402_INT_PCO) {
37429 (void) GET(PCOCR); /* clear interrupt cause */
37430- atomic_add(GET(HECCT),
37431+ atomic_add_unchecked(GET(HECCT),
37432 &PRIV(dev)->sonet_stats.uncorr_hcs);
37433 }
37434 if ((reason & uPD98402_INT_RFO) &&
37435@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37436 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37437 uPD98402_INT_LOS),PIMR); /* enable them */
37438 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37439- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37440- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37441- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37442+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37443+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37444+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37445 return 0;
37446 }
37447
37448diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37449index 969c3c2..9b72956 100644
37450--- a/drivers/atm/zatm.c
37451+++ b/drivers/atm/zatm.c
37452@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37453 }
37454 if (!size) {
37455 dev_kfree_skb_irq(skb);
37456- if (vcc) atomic_inc(&vcc->stats->rx_err);
37457+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37458 continue;
37459 }
37460 if (!atm_charge(vcc,skb->truesize)) {
37461@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37462 skb->len = size;
37463 ATM_SKB(skb)->vcc = vcc;
37464 vcc->push(vcc,skb);
37465- atomic_inc(&vcc->stats->rx);
37466+ atomic_inc_unchecked(&vcc->stats->rx);
37467 }
37468 zout(pos & 0xffff,MTA(mbx));
37469 #if 0 /* probably a stupid idea */
37470@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37471 skb_queue_head(&zatm_vcc->backlog,skb);
37472 break;
37473 }
37474- atomic_inc(&vcc->stats->tx);
37475+ atomic_inc_unchecked(&vcc->stats->tx);
37476 wake_up(&zatm_vcc->tx_wait);
37477 }
37478
37479diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37480index 876bae5..8978785 100644
37481--- a/drivers/base/bus.c
37482+++ b/drivers/base/bus.c
37483@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37484 return -EINVAL;
37485
37486 mutex_lock(&subsys->p->mutex);
37487- list_add_tail(&sif->node, &subsys->p->interfaces);
37488+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37489 if (sif->add_dev) {
37490 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37491 while ((dev = subsys_dev_iter_next(&iter)))
37492@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37493 subsys = sif->subsys;
37494
37495 mutex_lock(&subsys->p->mutex);
37496- list_del_init(&sif->node);
37497+ pax_list_del_init((struct list_head *)&sif->node);
37498 if (sif->remove_dev) {
37499 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37500 while ((dev = subsys_dev_iter_next(&iter)))
37501diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37502index 25798db..15f130e 100644
37503--- a/drivers/base/devtmpfs.c
37504+++ b/drivers/base/devtmpfs.c
37505@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37506 if (!thread)
37507 return 0;
37508
37509- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37510+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37511 if (err)
37512 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37513 else
37514@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37515 *err = sys_unshare(CLONE_NEWNS);
37516 if (*err)
37517 goto out;
37518- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37519+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37520 if (*err)
37521 goto out;
37522- sys_chdir("/.."); /* will traverse into overmounted root */
37523- sys_chroot(".");
37524+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37525+ sys_chroot((char __force_user *)".");
37526 complete(&setup_done);
37527 while (1) {
37528 spin_lock(&req_lock);
37529diff --git a/drivers/base/node.c b/drivers/base/node.c
37530index a3b82e9..f90a8ce 100644
37531--- a/drivers/base/node.c
37532+++ b/drivers/base/node.c
37533@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37534 struct node_attr {
37535 struct device_attribute attr;
37536 enum node_states state;
37537-};
37538+} __do_const;
37539
37540 static ssize_t show_node_state(struct device *dev,
37541 struct device_attribute *attr, char *buf)
37542diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37543index 0d8780c..0b5df3f 100644
37544--- a/drivers/base/power/domain.c
37545+++ b/drivers/base/power/domain.c
37546@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37547 {
37548 struct cpuidle_driver *cpuidle_drv;
37549 struct gpd_cpuidle_data *cpuidle_data;
37550- struct cpuidle_state *idle_state;
37551+ cpuidle_state_no_const *idle_state;
37552 int ret = 0;
37553
37554 if (IS_ERR_OR_NULL(genpd) || state < 0)
37555@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37556 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37557 {
37558 struct gpd_cpuidle_data *cpuidle_data;
37559- struct cpuidle_state *idle_state;
37560+ cpuidle_state_no_const *idle_state;
37561 int ret = 0;
37562
37563 if (IS_ERR_OR_NULL(genpd))
37564@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
37565 return ret;
37566 }
37567
37568- dev->pm_domain->detach = genpd_dev_pm_detach;
37569+ pax_open_kernel();
37570+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37571+ pax_close_kernel();
37572+
37573 pm_genpd_poweron(pd);
37574
37575 return 0;
37576diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37577index d2be3f9..0a3167a 100644
37578--- a/drivers/base/power/sysfs.c
37579+++ b/drivers/base/power/sysfs.c
37580@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37581 return -EIO;
37582 }
37583 }
37584- return sprintf(buf, p);
37585+ return sprintf(buf, "%s", p);
37586 }
37587
37588 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37589diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37590index c2744b3..08fac19 100644
37591--- a/drivers/base/power/wakeup.c
37592+++ b/drivers/base/power/wakeup.c
37593@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37594 * They need to be modified together atomically, so it's better to use one
37595 * atomic variable to hold them both.
37596 */
37597-static atomic_t combined_event_count = ATOMIC_INIT(0);
37598+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37599
37600 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37601 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37602
37603 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37604 {
37605- unsigned int comb = atomic_read(&combined_event_count);
37606+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37607
37608 *cnt = (comb >> IN_PROGRESS_BITS);
37609 *inpr = comb & MAX_IN_PROGRESS;
37610@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37611 ws->start_prevent_time = ws->last_time;
37612
37613 /* Increment the counter of events in progress. */
37614- cec = atomic_inc_return(&combined_event_count);
37615+ cec = atomic_inc_return_unchecked(&combined_event_count);
37616
37617 trace_wakeup_source_activate(ws->name, cec);
37618 }
37619@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37620 * Increment the counter of registered wakeup events and decrement the
37621 * couter of wakeup events in progress simultaneously.
37622 */
37623- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37624+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37625 trace_wakeup_source_deactivate(ws->name, cec);
37626
37627 split_counters(&cnt, &inpr);
37628diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37629index 8d98a32..61d3165 100644
37630--- a/drivers/base/syscore.c
37631+++ b/drivers/base/syscore.c
37632@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37633 void register_syscore_ops(struct syscore_ops *ops)
37634 {
37635 mutex_lock(&syscore_ops_lock);
37636- list_add_tail(&ops->node, &syscore_ops_list);
37637+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37638 mutex_unlock(&syscore_ops_lock);
37639 }
37640 EXPORT_SYMBOL_GPL(register_syscore_ops);
37641@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37642 void unregister_syscore_ops(struct syscore_ops *ops)
37643 {
37644 mutex_lock(&syscore_ops_lock);
37645- list_del(&ops->node);
37646+ pax_list_del((struct list_head *)&ops->node);
37647 mutex_unlock(&syscore_ops_lock);
37648 }
37649 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37650diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37651index ff20f19..018f1da 100644
37652--- a/drivers/block/cciss.c
37653+++ b/drivers/block/cciss.c
37654@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37655 while (!list_empty(&h->reqQ)) {
37656 c = list_entry(h->reqQ.next, CommandList_struct, list);
37657 /* can't do anything if fifo is full */
37658- if ((h->access.fifo_full(h))) {
37659+ if ((h->access->fifo_full(h))) {
37660 dev_warn(&h->pdev->dev, "fifo full\n");
37661 break;
37662 }
37663@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37664 h->Qdepth--;
37665
37666 /* Tell the controller execute command */
37667- h->access.submit_command(h, c);
37668+ h->access->submit_command(h, c);
37669
37670 /* Put job onto the completed Q */
37671 addQ(&h->cmpQ, c);
37672@@ -3444,17 +3444,17 @@ startio:
37673
37674 static inline unsigned long get_next_completion(ctlr_info_t *h)
37675 {
37676- return h->access.command_completed(h);
37677+ return h->access->command_completed(h);
37678 }
37679
37680 static inline int interrupt_pending(ctlr_info_t *h)
37681 {
37682- return h->access.intr_pending(h);
37683+ return h->access->intr_pending(h);
37684 }
37685
37686 static inline long interrupt_not_for_us(ctlr_info_t *h)
37687 {
37688- return ((h->access.intr_pending(h) == 0) ||
37689+ return ((h->access->intr_pending(h) == 0) ||
37690 (h->interrupts_enabled == 0));
37691 }
37692
37693@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37694 u32 a;
37695
37696 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37697- return h->access.command_completed(h);
37698+ return h->access->command_completed(h);
37699
37700 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37701 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37702@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37703 trans_support & CFGTBL_Trans_use_short_tags);
37704
37705 /* Change the access methods to the performant access methods */
37706- h->access = SA5_performant_access;
37707+ h->access = &SA5_performant_access;
37708 h->transMethod = CFGTBL_Trans_Performant;
37709
37710 return;
37711@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37712 if (prod_index < 0)
37713 return -ENODEV;
37714 h->product_name = products[prod_index].product_name;
37715- h->access = *(products[prod_index].access);
37716+ h->access = products[prod_index].access;
37717
37718 if (cciss_board_disabled(h)) {
37719 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37720@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37721 }
37722
37723 /* make sure the board interrupts are off */
37724- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37725+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37726 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37727 if (rc)
37728 goto clean2;
37729@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37730 * fake ones to scoop up any residual completions.
37731 */
37732 spin_lock_irqsave(&h->lock, flags);
37733- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37734+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37735 spin_unlock_irqrestore(&h->lock, flags);
37736 free_irq(h->intr[h->intr_mode], h);
37737 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37738@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37739 dev_info(&h->pdev->dev, "Board READY.\n");
37740 dev_info(&h->pdev->dev,
37741 "Waiting for stale completions to drain.\n");
37742- h->access.set_intr_mask(h, CCISS_INTR_ON);
37743+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37744 msleep(10000);
37745- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37746+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37747
37748 rc = controller_reset_failed(h->cfgtable);
37749 if (rc)
37750@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37751 cciss_scsi_setup(h);
37752
37753 /* Turn the interrupts on so we can service requests */
37754- h->access.set_intr_mask(h, CCISS_INTR_ON);
37755+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37756
37757 /* Get the firmware version */
37758 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37759@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37760 kfree(flush_buf);
37761 if (return_code != IO_OK)
37762 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37763- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37764+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37765 free_irq(h->intr[h->intr_mode], h);
37766 }
37767
37768diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37769index 7fda30e..2f27946 100644
37770--- a/drivers/block/cciss.h
37771+++ b/drivers/block/cciss.h
37772@@ -101,7 +101,7 @@ struct ctlr_info
37773 /* information about each logical volume */
37774 drive_info_struct *drv[CISS_MAX_LUN];
37775
37776- struct access_method access;
37777+ struct access_method *access;
37778
37779 /* queue and queue Info */
37780 struct list_head reqQ;
37781@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
37782 }
37783
37784 static struct access_method SA5_access = {
37785- SA5_submit_command,
37786- SA5_intr_mask,
37787- SA5_fifo_full,
37788- SA5_intr_pending,
37789- SA5_completed,
37790+ .submit_command = SA5_submit_command,
37791+ .set_intr_mask = SA5_intr_mask,
37792+ .fifo_full = SA5_fifo_full,
37793+ .intr_pending = SA5_intr_pending,
37794+ .command_completed = SA5_completed,
37795 };
37796
37797 static struct access_method SA5B_access = {
37798- SA5_submit_command,
37799- SA5B_intr_mask,
37800- SA5_fifo_full,
37801- SA5B_intr_pending,
37802- SA5_completed,
37803+ .submit_command = SA5_submit_command,
37804+ .set_intr_mask = SA5B_intr_mask,
37805+ .fifo_full = SA5_fifo_full,
37806+ .intr_pending = SA5B_intr_pending,
37807+ .command_completed = SA5_completed,
37808 };
37809
37810 static struct access_method SA5_performant_access = {
37811- SA5_submit_command,
37812- SA5_performant_intr_mask,
37813- SA5_fifo_full,
37814- SA5_performant_intr_pending,
37815- SA5_performant_completed,
37816+ .submit_command = SA5_submit_command,
37817+ .set_intr_mask = SA5_performant_intr_mask,
37818+ .fifo_full = SA5_fifo_full,
37819+ .intr_pending = SA5_performant_intr_pending,
37820+ .command_completed = SA5_performant_completed,
37821 };
37822
37823 struct board_type {
37824diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37825index 2b94403..fd6ad1f 100644
37826--- a/drivers/block/cpqarray.c
37827+++ b/drivers/block/cpqarray.c
37828@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37829 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37830 goto Enomem4;
37831 }
37832- hba[i]->access.set_intr_mask(hba[i], 0);
37833+ hba[i]->access->set_intr_mask(hba[i], 0);
37834 if (request_irq(hba[i]->intr, do_ida_intr,
37835 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37836 {
37837@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37838 add_timer(&hba[i]->timer);
37839
37840 /* Enable IRQ now that spinlock and rate limit timer are set up */
37841- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37842+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37843
37844 for(j=0; j<NWD; j++) {
37845 struct gendisk *disk = ida_gendisk[i][j];
37846@@ -694,7 +694,7 @@ DBGINFO(
37847 for(i=0; i<NR_PRODUCTS; i++) {
37848 if (board_id == products[i].board_id) {
37849 c->product_name = products[i].product_name;
37850- c->access = *(products[i].access);
37851+ c->access = products[i].access;
37852 break;
37853 }
37854 }
37855@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37856 hba[ctlr]->intr = intr;
37857 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37858 hba[ctlr]->product_name = products[j].product_name;
37859- hba[ctlr]->access = *(products[j].access);
37860+ hba[ctlr]->access = products[j].access;
37861 hba[ctlr]->ctlr = ctlr;
37862 hba[ctlr]->board_id = board_id;
37863 hba[ctlr]->pci_dev = NULL; /* not PCI */
37864@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37865
37866 while((c = h->reqQ) != NULL) {
37867 /* Can't do anything if we're busy */
37868- if (h->access.fifo_full(h) == 0)
37869+ if (h->access->fifo_full(h) == 0)
37870 return;
37871
37872 /* Get the first entry from the request Q */
37873@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37874 h->Qdepth--;
37875
37876 /* Tell the controller to do our bidding */
37877- h->access.submit_command(h, c);
37878+ h->access->submit_command(h, c);
37879
37880 /* Get onto the completion Q */
37881 addQ(&h->cmpQ, c);
37882@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37883 unsigned long flags;
37884 __u32 a,a1;
37885
37886- istat = h->access.intr_pending(h);
37887+ istat = h->access->intr_pending(h);
37888 /* Is this interrupt for us? */
37889 if (istat == 0)
37890 return IRQ_NONE;
37891@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37892 */
37893 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37894 if (istat & FIFO_NOT_EMPTY) {
37895- while((a = h->access.command_completed(h))) {
37896+ while((a = h->access->command_completed(h))) {
37897 a1 = a; a &= ~3;
37898 if ((c = h->cmpQ) == NULL)
37899 {
37900@@ -1448,11 +1448,11 @@ static int sendcmd(
37901 /*
37902 * Disable interrupt
37903 */
37904- info_p->access.set_intr_mask(info_p, 0);
37905+ info_p->access->set_intr_mask(info_p, 0);
37906 /* Make sure there is room in the command FIFO */
37907 /* Actually it should be completely empty at this time. */
37908 for (i = 200000; i > 0; i--) {
37909- temp = info_p->access.fifo_full(info_p);
37910+ temp = info_p->access->fifo_full(info_p);
37911 if (temp != 0) {
37912 break;
37913 }
37914@@ -1465,7 +1465,7 @@ DBG(
37915 /*
37916 * Send the cmd
37917 */
37918- info_p->access.submit_command(info_p, c);
37919+ info_p->access->submit_command(info_p, c);
37920 complete = pollcomplete(ctlr);
37921
37922 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
37923@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
37924 * we check the new geometry. Then turn interrupts back on when
37925 * we're done.
37926 */
37927- host->access.set_intr_mask(host, 0);
37928+ host->access->set_intr_mask(host, 0);
37929 getgeometry(ctlr);
37930- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
37931+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
37932
37933 for(i=0; i<NWD; i++) {
37934 struct gendisk *disk = ida_gendisk[ctlr][i];
37935@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
37936 /* Wait (up to 2 seconds) for a command to complete */
37937
37938 for (i = 200000; i > 0; i--) {
37939- done = hba[ctlr]->access.command_completed(hba[ctlr]);
37940+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
37941 if (done == 0) {
37942 udelay(10); /* a short fixed delay */
37943 } else
37944diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
37945index be73e9d..7fbf140 100644
37946--- a/drivers/block/cpqarray.h
37947+++ b/drivers/block/cpqarray.h
37948@@ -99,7 +99,7 @@ struct ctlr_info {
37949 drv_info_t drv[NWD];
37950 struct proc_dir_entry *proc;
37951
37952- struct access_method access;
37953+ struct access_method *access;
37954
37955 cmdlist_t *reqQ;
37956 cmdlist_t *cmpQ;
37957diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
37958index 434c77d..6d3219a 100644
37959--- a/drivers/block/drbd/drbd_bitmap.c
37960+++ b/drivers/block/drbd/drbd_bitmap.c
37961@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
37962 submit_bio(rw, bio);
37963 /* this should not count as user activity and cause the
37964 * resync to throttle -- see drbd_rs_should_slow_down(). */
37965- atomic_add(len >> 9, &device->rs_sect_ev);
37966+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
37967 }
37968 }
37969
37970diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
37971index b905e98..0812ed8 100644
37972--- a/drivers/block/drbd/drbd_int.h
37973+++ b/drivers/block/drbd/drbd_int.h
37974@@ -385,7 +385,7 @@ struct drbd_epoch {
37975 struct drbd_connection *connection;
37976 struct list_head list;
37977 unsigned int barrier_nr;
37978- atomic_t epoch_size; /* increased on every request added. */
37979+ atomic_unchecked_t epoch_size; /* increased on every request added. */
37980 atomic_t active; /* increased on every req. added, and dec on every finished. */
37981 unsigned long flags;
37982 };
37983@@ -946,7 +946,7 @@ struct drbd_device {
37984 unsigned int al_tr_number;
37985 int al_tr_cycle;
37986 wait_queue_head_t seq_wait;
37987- atomic_t packet_seq;
37988+ atomic_unchecked_t packet_seq;
37989 unsigned int peer_seq;
37990 spinlock_t peer_seq_lock;
37991 unsigned long comm_bm_set; /* communicated number of set bits. */
37992@@ -955,8 +955,8 @@ struct drbd_device {
37993 struct mutex own_state_mutex;
37994 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
37995 char congestion_reason; /* Why we where congested... */
37996- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
37997- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
37998+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
37999+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38000 int rs_last_sect_ev; /* counter to compare with */
38001 int rs_last_events; /* counter of read or write "events" (unit sectors)
38002 * on the lower level device when we last looked. */
38003diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38004index 1fc8342..7e7742b 100644
38005--- a/drivers/block/drbd/drbd_main.c
38006+++ b/drivers/block/drbd/drbd_main.c
38007@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38008 p->sector = sector;
38009 p->block_id = block_id;
38010 p->blksize = blksize;
38011- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38012+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38013 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38014 }
38015
38016@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38017 return -EIO;
38018 p->sector = cpu_to_be64(req->i.sector);
38019 p->block_id = (unsigned long)req;
38020- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38021+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38022 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38023 if (device->state.conn >= C_SYNC_SOURCE &&
38024 device->state.conn <= C_PAUSED_SYNC_T)
38025@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38026 atomic_set(&device->unacked_cnt, 0);
38027 atomic_set(&device->local_cnt, 0);
38028 atomic_set(&device->pp_in_use_by_net, 0);
38029- atomic_set(&device->rs_sect_in, 0);
38030- atomic_set(&device->rs_sect_ev, 0);
38031+ atomic_set_unchecked(&device->rs_sect_in, 0);
38032+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38033 atomic_set(&device->ap_in_flight, 0);
38034 atomic_set(&device->md_io.in_use, 0);
38035
38036@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38037 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38038 struct drbd_resource *resource = connection->resource;
38039
38040- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38041- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38042+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38043+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38044 kfree(connection->current_epoch);
38045
38046 idr_destroy(&connection->peer_devices);
38047diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38048index 74df8cf..e41fc24 100644
38049--- a/drivers/block/drbd/drbd_nl.c
38050+++ b/drivers/block/drbd/drbd_nl.c
38051@@ -3637,13 +3637,13 @@ finish:
38052
38053 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38054 {
38055- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38056+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38057 struct sk_buff *msg;
38058 struct drbd_genlmsghdr *d_out;
38059 unsigned seq;
38060 int err = -ENOMEM;
38061
38062- seq = atomic_inc_return(&drbd_genl_seq);
38063+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38064 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38065 if (!msg)
38066 goto failed;
38067diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38068index d169b4a..481463f 100644
38069--- a/drivers/block/drbd/drbd_receiver.c
38070+++ b/drivers/block/drbd/drbd_receiver.c
38071@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38072 struct drbd_device *device = peer_device->device;
38073 int err;
38074
38075- atomic_set(&device->packet_seq, 0);
38076+ atomic_set_unchecked(&device->packet_seq, 0);
38077 device->peer_seq = 0;
38078
38079 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38080@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38081 do {
38082 next_epoch = NULL;
38083
38084- epoch_size = atomic_read(&epoch->epoch_size);
38085+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38086
38087 switch (ev & ~EV_CLEANUP) {
38088 case EV_PUT:
38089@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38090 rv = FE_DESTROYED;
38091 } else {
38092 epoch->flags = 0;
38093- atomic_set(&epoch->epoch_size, 0);
38094+ atomic_set_unchecked(&epoch->epoch_size, 0);
38095 /* atomic_set(&epoch->active, 0); is already zero */
38096 if (rv == FE_STILL_LIVE)
38097 rv = FE_RECYCLED;
38098@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38099 conn_wait_active_ee_empty(connection);
38100 drbd_flush(connection);
38101
38102- if (atomic_read(&connection->current_epoch->epoch_size)) {
38103+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38104 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38105 if (epoch)
38106 break;
38107@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38108 }
38109
38110 epoch->flags = 0;
38111- atomic_set(&epoch->epoch_size, 0);
38112+ atomic_set_unchecked(&epoch->epoch_size, 0);
38113 atomic_set(&epoch->active, 0);
38114
38115 spin_lock(&connection->epoch_lock);
38116- if (atomic_read(&connection->current_epoch->epoch_size)) {
38117+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38118 list_add(&epoch->list, &connection->current_epoch->list);
38119 connection->current_epoch = epoch;
38120 connection->epochs++;
38121@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38122 list_add_tail(&peer_req->w.list, &device->sync_ee);
38123 spin_unlock_irq(&device->resource->req_lock);
38124
38125- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38126+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38127 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38128 return 0;
38129
38130@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38131 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38132 }
38133
38134- atomic_add(pi->size >> 9, &device->rs_sect_in);
38135+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38136
38137 return err;
38138 }
38139@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38140
38141 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38142 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38143- atomic_inc(&connection->current_epoch->epoch_size);
38144+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38145 err2 = drbd_drain_block(peer_device, pi->size);
38146 if (!err)
38147 err = err2;
38148@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38149
38150 spin_lock(&connection->epoch_lock);
38151 peer_req->epoch = connection->current_epoch;
38152- atomic_inc(&peer_req->epoch->epoch_size);
38153+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38154 atomic_inc(&peer_req->epoch->active);
38155 spin_unlock(&connection->epoch_lock);
38156
38157@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38158
38159 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38160 (int)part_stat_read(&disk->part0, sectors[1]) -
38161- atomic_read(&device->rs_sect_ev);
38162+ atomic_read_unchecked(&device->rs_sect_ev);
38163
38164 if (atomic_read(&device->ap_actlog_cnt)
38165 || curr_events - device->rs_last_events > 64) {
38166@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38167 device->use_csums = true;
38168 } else if (pi->cmd == P_OV_REPLY) {
38169 /* track progress, we may need to throttle */
38170- atomic_add(size >> 9, &device->rs_sect_in);
38171+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38172 peer_req->w.cb = w_e_end_ov_reply;
38173 dec_rs_pending(device);
38174 /* drbd_rs_begin_io done when we sent this request,
38175@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38176 goto out_free_e;
38177
38178 submit_for_resync:
38179- atomic_add(size >> 9, &device->rs_sect_ev);
38180+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38181
38182 submit:
38183 update_receiver_timing_details(connection, drbd_submit_peer_request);
38184@@ -4564,7 +4564,7 @@ struct data_cmd {
38185 int expect_payload;
38186 size_t pkt_size;
38187 int (*fn)(struct drbd_connection *, struct packet_info *);
38188-};
38189+} __do_const;
38190
38191 static struct data_cmd drbd_cmd_handler[] = {
38192 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38193@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38194 if (!list_empty(&connection->current_epoch->list))
38195 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38196 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38197- atomic_set(&connection->current_epoch->epoch_size, 0);
38198+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38199 connection->send.seen_any_write_yet = false;
38200
38201 drbd_info(connection, "Connection closed\n");
38202@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38203 put_ldev(device);
38204 }
38205 dec_rs_pending(device);
38206- atomic_add(blksize >> 9, &device->rs_sect_in);
38207+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38208
38209 return 0;
38210 }
38211@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38212 struct asender_cmd {
38213 size_t pkt_size;
38214 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38215-};
38216+} __do_const;
38217
38218 static struct asender_cmd asender_tbl[] = {
38219 [P_PING] = { 0, got_Ping },
38220diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38221index d0fae55..4469096 100644
38222--- a/drivers/block/drbd/drbd_worker.c
38223+++ b/drivers/block/drbd/drbd_worker.c
38224@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38225 list_add_tail(&peer_req->w.list, &device->read_ee);
38226 spin_unlock_irq(&device->resource->req_lock);
38227
38228- atomic_add(size >> 9, &device->rs_sect_ev);
38229+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38230 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38231 return 0;
38232
38233@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38234 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38235 int number, mxb;
38236
38237- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38238+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38239 device->rs_in_flight -= sect_in;
38240
38241 rcu_read_lock();
38242@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38243 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38244 struct fifo_buffer *plan;
38245
38246- atomic_set(&device->rs_sect_in, 0);
38247- atomic_set(&device->rs_sect_ev, 0);
38248+ atomic_set_unchecked(&device->rs_sect_in, 0);
38249+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38250 device->rs_in_flight = 0;
38251 device->rs_last_events =
38252 (int)part_stat_read(&disk->part0, sectors[0]) +
38253diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38254index 6cb1beb..bf490f7 100644
38255--- a/drivers/block/loop.c
38256+++ b/drivers/block/loop.c
38257@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38258
38259 file_start_write(file);
38260 set_fs(get_ds());
38261- bw = file->f_op->write(file, buf, len, &pos);
38262+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38263 set_fs(old_fs);
38264 file_end_write(file);
38265 if (likely(bw == len))
38266diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
38267index d826bf3..8eb406c 100644
38268--- a/drivers/block/nvme-core.c
38269+++ b/drivers/block/nvme-core.c
38270@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
38271 static struct task_struct *nvme_thread;
38272 static struct workqueue_struct *nvme_workq;
38273 static wait_queue_head_t nvme_kthread_wait;
38274-static struct notifier_block nvme_nb;
38275
38276 static void nvme_reset_failed_dev(struct work_struct *ws);
38277 static int nvme_process_cq(struct nvme_queue *nvmeq);
38278@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
38279 static void __exit nvme_exit(void)
38280 {
38281 pci_unregister_driver(&nvme_driver);
38282- unregister_hotcpu_notifier(&nvme_nb);
38283 unregister_blkdev(nvme_major, "nvme");
38284 destroy_workqueue(nvme_workq);
38285 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
38286diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38287index 09e628da..7607aaa 100644
38288--- a/drivers/block/pktcdvd.c
38289+++ b/drivers/block/pktcdvd.c
38290@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38291
38292 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38293 {
38294- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38295+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38296 }
38297
38298 /*
38299@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38300 return -EROFS;
38301 }
38302 pd->settings.fp = ti.fp;
38303- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38304+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38305
38306 if (ti.nwa_v) {
38307 pd->nwa = be32_to_cpu(ti.next_writable);
38308diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38309index 8a86b62..f54c87e 100644
38310--- a/drivers/block/rbd.c
38311+++ b/drivers/block/rbd.c
38312@@ -63,7 +63,7 @@
38313 * If the counter is already at its maximum value returns
38314 * -EINVAL without updating it.
38315 */
38316-static int atomic_inc_return_safe(atomic_t *v)
38317+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38318 {
38319 unsigned int counter;
38320
38321diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38322index e5565fb..71be10b4 100644
38323--- a/drivers/block/smart1,2.h
38324+++ b/drivers/block/smart1,2.h
38325@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38326 }
38327
38328 static struct access_method smart4_access = {
38329- smart4_submit_command,
38330- smart4_intr_mask,
38331- smart4_fifo_full,
38332- smart4_intr_pending,
38333- smart4_completed,
38334+ .submit_command = smart4_submit_command,
38335+ .set_intr_mask = smart4_intr_mask,
38336+ .fifo_full = smart4_fifo_full,
38337+ .intr_pending = smart4_intr_pending,
38338+ .command_completed = smart4_completed,
38339 };
38340
38341 /*
38342@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38343 }
38344
38345 static struct access_method smart2_access = {
38346- smart2_submit_command,
38347- smart2_intr_mask,
38348- smart2_fifo_full,
38349- smart2_intr_pending,
38350- smart2_completed,
38351+ .submit_command = smart2_submit_command,
38352+ .set_intr_mask = smart2_intr_mask,
38353+ .fifo_full = smart2_fifo_full,
38354+ .intr_pending = smart2_intr_pending,
38355+ .command_completed = smart2_completed,
38356 };
38357
38358 /*
38359@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38360 }
38361
38362 static struct access_method smart2e_access = {
38363- smart2e_submit_command,
38364- smart2e_intr_mask,
38365- smart2e_fifo_full,
38366- smart2e_intr_pending,
38367- smart2e_completed,
38368+ .submit_command = smart2e_submit_command,
38369+ .set_intr_mask = smart2e_intr_mask,
38370+ .fifo_full = smart2e_fifo_full,
38371+ .intr_pending = smart2e_intr_pending,
38372+ .command_completed = smart2e_completed,
38373 };
38374
38375 /*
38376@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38377 }
38378
38379 static struct access_method smart1_access = {
38380- smart1_submit_command,
38381- smart1_intr_mask,
38382- smart1_fifo_full,
38383- smart1_intr_pending,
38384- smart1_completed,
38385+ .submit_command = smart1_submit_command,
38386+ .set_intr_mask = smart1_intr_mask,
38387+ .fifo_full = smart1_fifo_full,
38388+ .intr_pending = smart1_intr_pending,
38389+ .command_completed = smart1_completed,
38390 };
38391diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38392index 55c135b..9f8d60c 100644
38393--- a/drivers/bluetooth/btwilink.c
38394+++ b/drivers/bluetooth/btwilink.c
38395@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38396
38397 static int bt_ti_probe(struct platform_device *pdev)
38398 {
38399- static struct ti_st *hst;
38400+ struct ti_st *hst;
38401 struct hci_dev *hdev;
38402 int err;
38403
38404diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38405index 5d28a45..a538f90 100644
38406--- a/drivers/cdrom/cdrom.c
38407+++ b/drivers/cdrom/cdrom.c
38408@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38409 ENSURE(reset, CDC_RESET);
38410 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38411 cdi->mc_flags = 0;
38412- cdo->n_minors = 0;
38413 cdi->options = CDO_USE_FFLAGS;
38414
38415 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38416@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38417 else
38418 cdi->cdda_method = CDDA_OLD;
38419
38420- if (!cdo->generic_packet)
38421- cdo->generic_packet = cdrom_dummy_generic_packet;
38422+ if (!cdo->generic_packet) {
38423+ pax_open_kernel();
38424+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38425+ pax_close_kernel();
38426+ }
38427
38428 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38429 mutex_lock(&cdrom_mutex);
38430@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38431 if (cdi->exit)
38432 cdi->exit(cdi);
38433
38434- cdi->ops->n_minors--;
38435 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38436 }
38437
38438@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38439 */
38440 nr = nframes;
38441 do {
38442- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38443+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38444 if (cgc.buffer)
38445 break;
38446
38447@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38448 struct cdrom_device_info *cdi;
38449 int ret;
38450
38451- ret = scnprintf(info + *pos, max_size - *pos, header);
38452+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38453 if (!ret)
38454 return 1;
38455
38456diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38457index 584bc31..e64a12c 100644
38458--- a/drivers/cdrom/gdrom.c
38459+++ b/drivers/cdrom/gdrom.c
38460@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38461 .audio_ioctl = gdrom_audio_ioctl,
38462 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38463 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38464- .n_minors = 1,
38465 };
38466
38467 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38468diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38469index efefd12..4f1d494 100644
38470--- a/drivers/char/Kconfig
38471+++ b/drivers/char/Kconfig
38472@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38473
38474 config DEVKMEM
38475 bool "/dev/kmem virtual device support"
38476- default y
38477+ default n
38478+ depends on !GRKERNSEC_KMEM
38479 help
38480 Say Y here if you want to support the /dev/kmem device. The
38481 /dev/kmem device is rarely used, but can be used for certain
38482@@ -577,6 +578,7 @@ config DEVPORT
38483 bool
38484 depends on !M68K
38485 depends on ISA || PCI
38486+ depends on !GRKERNSEC_KMEM
38487 default y
38488
38489 source "drivers/s390/char/Kconfig"
38490diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38491index a48e05b..6bac831 100644
38492--- a/drivers/char/agp/compat_ioctl.c
38493+++ b/drivers/char/agp/compat_ioctl.c
38494@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38495 return -ENOMEM;
38496 }
38497
38498- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38499+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38500 sizeof(*usegment) * ureserve.seg_count)) {
38501 kfree(usegment);
38502 kfree(ksegment);
38503diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38504index 09f17eb..8531d2f 100644
38505--- a/drivers/char/agp/frontend.c
38506+++ b/drivers/char/agp/frontend.c
38507@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38508 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38509 return -EFAULT;
38510
38511- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38512+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38513 return -EFAULT;
38514
38515 client = agp_find_client_by_pid(reserve.pid);
38516@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38517 if (segment == NULL)
38518 return -ENOMEM;
38519
38520- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38521+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38522 sizeof(struct agp_segment) * reserve.seg_count)) {
38523 kfree(segment);
38524 return -EFAULT;
38525diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38526index 4f94375..413694e 100644
38527--- a/drivers/char/genrtc.c
38528+++ b/drivers/char/genrtc.c
38529@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38530 switch (cmd) {
38531
38532 case RTC_PLL_GET:
38533+ memset(&pll, 0, sizeof(pll));
38534 if (get_rtc_pll(&pll))
38535 return -EINVAL;
38536 else
38537diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38538index d5d4cd8..22d561d 100644
38539--- a/drivers/char/hpet.c
38540+++ b/drivers/char/hpet.c
38541@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38542 }
38543
38544 static int
38545-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38546+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38547 struct hpet_info *info)
38548 {
38549 struct hpet_timer __iomem *timer;
38550diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38551index 6b65fa4..8ebbc99 100644
38552--- a/drivers/char/ipmi/ipmi_msghandler.c
38553+++ b/drivers/char/ipmi/ipmi_msghandler.c
38554@@ -436,7 +436,7 @@ struct ipmi_smi {
38555 struct proc_dir_entry *proc_dir;
38556 char proc_dir_name[10];
38557
38558- atomic_t stats[IPMI_NUM_STATS];
38559+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38560
38561 /*
38562 * run_to_completion duplicate of smb_info, smi_info
38563@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38564 static DEFINE_MUTEX(smi_watchers_mutex);
38565
38566 #define ipmi_inc_stat(intf, stat) \
38567- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38568+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38569 #define ipmi_get_stat(intf, stat) \
38570- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38571+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38572
38573 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38574 "ACPI", "SMBIOS", "PCI",
38575@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38576 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38577 init_waitqueue_head(&intf->waitq);
38578 for (i = 0; i < IPMI_NUM_STATS; i++)
38579- atomic_set(&intf->stats[i], 0);
38580+ atomic_set_unchecked(&intf->stats[i], 0);
38581
38582 intf->proc_dir = NULL;
38583
38584diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38585index 967b73a..946e94c 100644
38586--- a/drivers/char/ipmi/ipmi_si_intf.c
38587+++ b/drivers/char/ipmi/ipmi_si_intf.c
38588@@ -284,7 +284,7 @@ struct smi_info {
38589 unsigned char slave_addr;
38590
38591 /* Counters and things for the proc filesystem. */
38592- atomic_t stats[SI_NUM_STATS];
38593+ atomic_unchecked_t stats[SI_NUM_STATS];
38594
38595 struct task_struct *thread;
38596
38597@@ -293,9 +293,9 @@ struct smi_info {
38598 };
38599
38600 #define smi_inc_stat(smi, stat) \
38601- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38602+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38603 #define smi_get_stat(smi, stat) \
38604- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38605+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38606
38607 #define SI_MAX_PARMS 4
38608
38609@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
38610 atomic_set(&new_smi->req_events, 0);
38611 new_smi->run_to_completion = false;
38612 for (i = 0; i < SI_NUM_STATS; i++)
38613- atomic_set(&new_smi->stats[i], 0);
38614+ atomic_set_unchecked(&new_smi->stats[i], 0);
38615
38616 new_smi->interrupt_disabled = true;
38617 atomic_set(&new_smi->need_watch, 0);
38618diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38619index 4c58333..d5cca27 100644
38620--- a/drivers/char/mem.c
38621+++ b/drivers/char/mem.c
38622@@ -18,6 +18,7 @@
38623 #include <linux/raw.h>
38624 #include <linux/tty.h>
38625 #include <linux/capability.h>
38626+#include <linux/security.h>
38627 #include <linux/ptrace.h>
38628 #include <linux/device.h>
38629 #include <linux/highmem.h>
38630@@ -36,6 +37,10 @@
38631
38632 #define DEVPORT_MINOR 4
38633
38634+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38635+extern const struct file_operations grsec_fops;
38636+#endif
38637+
38638 static inline unsigned long size_inside_page(unsigned long start,
38639 unsigned long size)
38640 {
38641@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38642
38643 while (cursor < to) {
38644 if (!devmem_is_allowed(pfn)) {
38645+#ifdef CONFIG_GRKERNSEC_KMEM
38646+ gr_handle_mem_readwrite(from, to);
38647+#else
38648 printk(KERN_INFO
38649 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38650 current->comm, from, to);
38651+#endif
38652 return 0;
38653 }
38654 cursor += PAGE_SIZE;
38655@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38656 }
38657 return 1;
38658 }
38659+#elif defined(CONFIG_GRKERNSEC_KMEM)
38660+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38661+{
38662+ return 0;
38663+}
38664 #else
38665 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38666 {
38667@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38668 #endif
38669
38670 while (count > 0) {
38671- unsigned long remaining;
38672+ unsigned long remaining = 0;
38673+ char *temp;
38674
38675 sz = size_inside_page(p, count);
38676
38677@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38678 if (!ptr)
38679 return -EFAULT;
38680
38681- remaining = copy_to_user(buf, ptr, sz);
38682+#ifdef CONFIG_PAX_USERCOPY
38683+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38684+ if (!temp) {
38685+ unxlate_dev_mem_ptr(p, ptr);
38686+ return -ENOMEM;
38687+ }
38688+ remaining = probe_kernel_read(temp, ptr, sz);
38689+#else
38690+ temp = ptr;
38691+#endif
38692+
38693+ if (!remaining)
38694+ remaining = copy_to_user(buf, temp, sz);
38695+
38696+#ifdef CONFIG_PAX_USERCOPY
38697+ kfree(temp);
38698+#endif
38699+
38700 unxlate_dev_mem_ptr(p, ptr);
38701 if (remaining)
38702 return -EFAULT;
38703@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38704 size_t count, loff_t *ppos)
38705 {
38706 unsigned long p = *ppos;
38707- ssize_t low_count, read, sz;
38708+ ssize_t low_count, read, sz, err = 0;
38709 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38710- int err = 0;
38711
38712 read = 0;
38713 if (p < (unsigned long) high_memory) {
38714@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38715 }
38716 #endif
38717 while (low_count > 0) {
38718+ char *temp;
38719+
38720 sz = size_inside_page(p, low_count);
38721
38722 /*
38723@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38724 */
38725 kbuf = xlate_dev_kmem_ptr((void *)p);
38726
38727- if (copy_to_user(buf, kbuf, sz))
38728+#ifdef CONFIG_PAX_USERCOPY
38729+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38730+ if (!temp)
38731+ return -ENOMEM;
38732+ err = probe_kernel_read(temp, kbuf, sz);
38733+#else
38734+ temp = kbuf;
38735+#endif
38736+
38737+ if (!err)
38738+ err = copy_to_user(buf, temp, sz);
38739+
38740+#ifdef CONFIG_PAX_USERCOPY
38741+ kfree(temp);
38742+#endif
38743+
38744+ if (err)
38745 return -EFAULT;
38746 buf += sz;
38747 p += sz;
38748@@ -800,6 +849,9 @@ static const struct memdev {
38749 #ifdef CONFIG_PRINTK
38750 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38751 #endif
38752+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38753+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38754+#endif
38755 };
38756
38757 static int memory_open(struct inode *inode, struct file *filp)
38758@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
38759 continue;
38760
38761 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38762- NULL, devlist[minor].name);
38763+ NULL, "%s", devlist[minor].name);
38764 }
38765
38766 return tty_init();
38767diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38768index 9df78e2..01ba9ae 100644
38769--- a/drivers/char/nvram.c
38770+++ b/drivers/char/nvram.c
38771@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38772
38773 spin_unlock_irq(&rtc_lock);
38774
38775- if (copy_to_user(buf, contents, tmp - contents))
38776+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38777 return -EFAULT;
38778
38779 *ppos = i;
38780diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
38781index 0ea9986..e7b07e4 100644
38782--- a/drivers/char/pcmcia/synclink_cs.c
38783+++ b/drivers/char/pcmcia/synclink_cs.c
38784@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38785
38786 if (debug_level >= DEBUG_LEVEL_INFO)
38787 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
38788- __FILE__, __LINE__, info->device_name, port->count);
38789+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
38790
38791 if (tty_port_close_start(port, tty, filp) == 0)
38792 goto cleanup;
38793@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38794 cleanup:
38795 if (debug_level >= DEBUG_LEVEL_INFO)
38796 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
38797- tty->driver->name, port->count);
38798+ tty->driver->name, atomic_read(&port->count));
38799 }
38800
38801 /* Wait until the transmitter is empty.
38802@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38803
38804 if (debug_level >= DEBUG_LEVEL_INFO)
38805 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
38806- __FILE__, __LINE__, tty->driver->name, port->count);
38807+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
38808
38809 /* If port is closing, signal caller to try again */
38810 if (port->flags & ASYNC_CLOSING){
38811@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38812 goto cleanup;
38813 }
38814 spin_lock(&port->lock);
38815- port->count++;
38816+ atomic_inc(&port->count);
38817 spin_unlock(&port->lock);
38818 spin_unlock_irqrestore(&info->netlock, flags);
38819
38820- if (port->count == 1) {
38821+ if (atomic_read(&port->count) == 1) {
38822 /* 1st open on this device, init hardware */
38823 retval = startup(info, tty);
38824 if (retval < 0)
38825@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38826 unsigned short new_crctype;
38827
38828 /* return error if TTY interface open */
38829- if (info->port.count)
38830+ if (atomic_read(&info->port.count))
38831 return -EBUSY;
38832
38833 switch (encoding)
38834@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
38835
38836 /* arbitrate between network and tty opens */
38837 spin_lock_irqsave(&info->netlock, flags);
38838- if (info->port.count != 0 || info->netcount != 0) {
38839+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38840 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38841 spin_unlock_irqrestore(&info->netlock, flags);
38842 return -EBUSY;
38843@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38844 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
38845
38846 /* return error if TTY interface open */
38847- if (info->port.count)
38848+ if (atomic_read(&info->port.count))
38849 return -EBUSY;
38850
38851 if (cmd != SIOCWANDEV)
38852diff --git a/drivers/char/random.c b/drivers/char/random.c
38853index 9cd6968..6416f00 100644
38854--- a/drivers/char/random.c
38855+++ b/drivers/char/random.c
38856@@ -289,9 +289,6 @@
38857 /*
38858 * To allow fractional bits to be tracked, the entropy_count field is
38859 * denominated in units of 1/8th bits.
38860- *
38861- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
38862- * credit_entropy_bits() needs to be 64 bits wide.
38863 */
38864 #define ENTROPY_SHIFT 3
38865 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
38866@@ -439,9 +436,9 @@ struct entropy_store {
38867 };
38868
38869 static void push_to_pool(struct work_struct *work);
38870-static __u32 input_pool_data[INPUT_POOL_WORDS];
38871-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
38872-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
38873+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
38874+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38875+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38876
38877 static struct entropy_store input_pool = {
38878 .poolinfo = &poolinfo_table[0],
38879@@ -635,7 +632,7 @@ retry:
38880 /* The +2 corresponds to the /4 in the denominator */
38881
38882 do {
38883- unsigned int anfrac = min(pnfrac, pool_size/2);
38884+ u64 anfrac = min(pnfrac, pool_size/2);
38885 unsigned int add =
38886 ((pool_size - entropy_count)*anfrac*3) >> s;
38887
38888@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38889
38890 extract_buf(r, tmp);
38891 i = min_t(int, nbytes, EXTRACT_SIZE);
38892- if (copy_to_user(buf, tmp, i)) {
38893+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38894 ret = -EFAULT;
38895 break;
38896 }
38897@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
38898 static int proc_do_uuid(struct ctl_table *table, int write,
38899 void __user *buffer, size_t *lenp, loff_t *ppos)
38900 {
38901- struct ctl_table fake_table;
38902+ ctl_table_no_const fake_table;
38903 unsigned char buf[64], tmp_uuid[16], *uuid;
38904
38905 uuid = table->data;
38906@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38907 static int proc_do_entropy(struct ctl_table *table, int write,
38908 void __user *buffer, size_t *lenp, loff_t *ppos)
38909 {
38910- struct ctl_table fake_table;
38911+ ctl_table_no_const fake_table;
38912 int entropy_count;
38913
38914 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38915diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
38916index e496dae..b793e7d 100644
38917--- a/drivers/char/sonypi.c
38918+++ b/drivers/char/sonypi.c
38919@@ -54,6 +54,7 @@
38920
38921 #include <asm/uaccess.h>
38922 #include <asm/io.h>
38923+#include <asm/local.h>
38924
38925 #include <linux/sonypi.h>
38926
38927@@ -490,7 +491,7 @@ static struct sonypi_device {
38928 spinlock_t fifo_lock;
38929 wait_queue_head_t fifo_proc_list;
38930 struct fasync_struct *fifo_async;
38931- int open_count;
38932+ local_t open_count;
38933 int model;
38934 struct input_dev *input_jog_dev;
38935 struct input_dev *input_key_dev;
38936@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
38937 static int sonypi_misc_release(struct inode *inode, struct file *file)
38938 {
38939 mutex_lock(&sonypi_device.lock);
38940- sonypi_device.open_count--;
38941+ local_dec(&sonypi_device.open_count);
38942 mutex_unlock(&sonypi_device.lock);
38943 return 0;
38944 }
38945@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
38946 {
38947 mutex_lock(&sonypi_device.lock);
38948 /* Flush input queue on first open */
38949- if (!sonypi_device.open_count)
38950+ if (!local_read(&sonypi_device.open_count))
38951 kfifo_reset(&sonypi_device.fifo);
38952- sonypi_device.open_count++;
38953+ local_inc(&sonypi_device.open_count);
38954 mutex_unlock(&sonypi_device.lock);
38955
38956 return 0;
38957diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
38958index 565a947..dcdc06e 100644
38959--- a/drivers/char/tpm/tpm_acpi.c
38960+++ b/drivers/char/tpm/tpm_acpi.c
38961@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
38962 virt = acpi_os_map_iomem(start, len);
38963 if (!virt) {
38964 kfree(log->bios_event_log);
38965+ log->bios_event_log = NULL;
38966 printk("%s: ERROR - Unable to map memory\n", __func__);
38967 return -EIO;
38968 }
38969
38970- memcpy_fromio(log->bios_event_log, virt, len);
38971+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
38972
38973 acpi_os_unmap_iomem(virt, len);
38974 return 0;
38975diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
38976index 3a56a13..f8cbd25 100644
38977--- a/drivers/char/tpm/tpm_eventlog.c
38978+++ b/drivers/char/tpm/tpm_eventlog.c
38979@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
38980 event = addr;
38981
38982 if ((event->event_type == 0 && event->event_size == 0) ||
38983- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
38984+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
38985 return NULL;
38986
38987 return addr;
38988@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
38989 return NULL;
38990
38991 if ((event->event_type == 0 && event->event_size == 0) ||
38992- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
38993+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
38994 return NULL;
38995
38996 (*pos)++;
38997@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
38998 int i;
38999
39000 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39001- seq_putc(m, data[i]);
39002+ if (!seq_putc(m, data[i]))
39003+ return -EFAULT;
39004
39005 return 0;
39006 }
39007diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39008index de03df9..0a309a9 100644
39009--- a/drivers/char/virtio_console.c
39010+++ b/drivers/char/virtio_console.c
39011@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39012 if (to_user) {
39013 ssize_t ret;
39014
39015- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39016+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39017 if (ret)
39018 return -EFAULT;
39019 } else {
39020@@ -788,7 +788,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39021 if (!port_has_data(port) && !port->host_connected)
39022 return 0;
39023
39024- return fill_readbuf(port, ubuf, count, true);
39025+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39026 }
39027
39028 static int wait_port_writable(struct port *port, bool nonblock)
39029diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39030index 4386697..754ceca 100644
39031--- a/drivers/clk/clk-composite.c
39032+++ b/drivers/clk/clk-composite.c
39033@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39034 struct clk *clk;
39035 struct clk_init_data init;
39036 struct clk_composite *composite;
39037- struct clk_ops *clk_composite_ops;
39038+ clk_ops_no_const *clk_composite_ops;
39039
39040 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39041 if (!composite) {
39042diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39043index dd3a78c..386d49c 100644
39044--- a/drivers/clk/socfpga/clk-gate.c
39045+++ b/drivers/clk/socfpga/clk-gate.c
39046@@ -22,6 +22,7 @@
39047 #include <linux/mfd/syscon.h>
39048 #include <linux/of.h>
39049 #include <linux/regmap.h>
39050+#include <asm/pgtable.h>
39051
39052 #include "clk.h"
39053
39054@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39055 return 0;
39056 }
39057
39058-static struct clk_ops gateclk_ops = {
39059+static clk_ops_no_const gateclk_ops __read_only = {
39060 .prepare = socfpga_clk_prepare,
39061 .recalc_rate = socfpga_clk_recalc_rate,
39062 .get_parent = socfpga_clk_get_parent,
39063@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39064 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39065 socfpga_clk->hw.bit_idx = clk_gate[1];
39066
39067- gateclk_ops.enable = clk_gate_ops.enable;
39068- gateclk_ops.disable = clk_gate_ops.disable;
39069+ pax_open_kernel();
39070+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39071+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39072+ pax_close_kernel();
39073 }
39074
39075 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39076diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39077index de6da95..c98278b 100644
39078--- a/drivers/clk/socfpga/clk-pll.c
39079+++ b/drivers/clk/socfpga/clk-pll.c
39080@@ -21,6 +21,7 @@
39081 #include <linux/io.h>
39082 #include <linux/of.h>
39083 #include <linux/of_address.h>
39084+#include <asm/pgtable.h>
39085
39086 #include "clk.h"
39087
39088@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39089 CLK_MGR_PLL_CLK_SRC_MASK;
39090 }
39091
39092-static struct clk_ops clk_pll_ops = {
39093+static clk_ops_no_const clk_pll_ops __read_only = {
39094 .recalc_rate = clk_pll_recalc_rate,
39095 .get_parent = clk_pll_get_parent,
39096 };
39097@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39098 pll_clk->hw.hw.init = &init;
39099
39100 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39101- clk_pll_ops.enable = clk_gate_ops.enable;
39102- clk_pll_ops.disable = clk_gate_ops.disable;
39103+ pax_open_kernel();
39104+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39105+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39106+ pax_close_kernel();
39107
39108 clk = clk_register(NULL, &pll_clk->hw.hw);
39109 if (WARN_ON(IS_ERR(clk))) {
39110diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39111index b0c18ed..1713a80 100644
39112--- a/drivers/cpufreq/acpi-cpufreq.c
39113+++ b/drivers/cpufreq/acpi-cpufreq.c
39114@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39115 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39116 per_cpu(acfreq_data, cpu) = data;
39117
39118- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39119- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39120+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39121+ pax_open_kernel();
39122+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39123+ pax_close_kernel();
39124+ }
39125
39126 result = acpi_processor_register_performance(data->acpi_data, cpu);
39127 if (result)
39128@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39129 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39130 break;
39131 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39132- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39133+ pax_open_kernel();
39134+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39135+ pax_close_kernel();
39136 break;
39137 default:
39138 break;
39139@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39140 if (!msrs)
39141 return;
39142
39143- acpi_cpufreq_driver.boost_supported = true;
39144- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39145+ pax_open_kernel();
39146+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39147+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39148+ pax_close_kernel();
39149
39150 cpu_notifier_register_begin();
39151
39152diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39153index fde97d6..3631eca 100644
39154--- a/drivers/cpufreq/cpufreq-dt.c
39155+++ b/drivers/cpufreq/cpufreq-dt.c
39156@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39157 if (!IS_ERR(cpu_reg))
39158 regulator_put(cpu_reg);
39159
39160- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39161+ pax_open_kernel();
39162+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39163+ pax_close_kernel();
39164
39165 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39166 if (ret)
39167diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39168index 7030c40..3a97de6 100644
39169--- a/drivers/cpufreq/cpufreq.c
39170+++ b/drivers/cpufreq/cpufreq.c
39171@@ -2135,7 +2135,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39172 }
39173
39174 mutex_lock(&cpufreq_governor_mutex);
39175- list_del(&governor->governor_list);
39176+ pax_list_del(&governor->governor_list);
39177 mutex_unlock(&cpufreq_governor_mutex);
39178 return;
39179 }
39180@@ -2351,7 +2351,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39181 return NOTIFY_OK;
39182 }
39183
39184-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39185+static struct notifier_block cpufreq_cpu_notifier = {
39186 .notifier_call = cpufreq_cpu_callback,
39187 };
39188
39189@@ -2391,13 +2391,17 @@ int cpufreq_boost_trigger_state(int state)
39190 return 0;
39191
39192 write_lock_irqsave(&cpufreq_driver_lock, flags);
39193- cpufreq_driver->boost_enabled = state;
39194+ pax_open_kernel();
39195+ *(bool *)&cpufreq_driver->boost_enabled = state;
39196+ pax_close_kernel();
39197 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39198
39199 ret = cpufreq_driver->set_boost(state);
39200 if (ret) {
39201 write_lock_irqsave(&cpufreq_driver_lock, flags);
39202- cpufreq_driver->boost_enabled = !state;
39203+ pax_open_kernel();
39204+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39205+ pax_close_kernel();
39206 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39207
39208 pr_err("%s: Cannot %s BOOST\n",
39209@@ -2454,8 +2458,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39210
39211 pr_debug("trying to register driver %s\n", driver_data->name);
39212
39213- if (driver_data->setpolicy)
39214- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39215+ if (driver_data->setpolicy) {
39216+ pax_open_kernel();
39217+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39218+ pax_close_kernel();
39219+ }
39220
39221 write_lock_irqsave(&cpufreq_driver_lock, flags);
39222 if (cpufreq_driver) {
39223@@ -2470,8 +2477,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39224 * Check if driver provides function to enable boost -
39225 * if not, use cpufreq_boost_set_sw as default
39226 */
39227- if (!cpufreq_driver->set_boost)
39228- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39229+ if (!cpufreq_driver->set_boost) {
39230+ pax_open_kernel();
39231+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39232+ pax_close_kernel();
39233+ }
39234
39235 ret = cpufreq_sysfs_create_file(&boost.attr);
39236 if (ret) {
39237diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39238index 1b44496..b80ff5e 100644
39239--- a/drivers/cpufreq/cpufreq_governor.c
39240+++ b/drivers/cpufreq/cpufreq_governor.c
39241@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39242 struct dbs_data *dbs_data;
39243 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39244 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39245- struct od_ops *od_ops = NULL;
39246+ const struct od_ops *od_ops = NULL;
39247 struct od_dbs_tuners *od_tuners = NULL;
39248 struct cs_dbs_tuners *cs_tuners = NULL;
39249 struct cpu_dbs_common_info *cpu_cdbs;
39250@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39251
39252 if ((cdata->governor == GOV_CONSERVATIVE) &&
39253 (!policy->governor->initialized)) {
39254- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39255+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39256
39257 cpufreq_register_notifier(cs_ops->notifier_block,
39258 CPUFREQ_TRANSITION_NOTIFIER);
39259@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39260
39261 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39262 (policy->governor->initialized == 1)) {
39263- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39264+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39265
39266 cpufreq_unregister_notifier(cs_ops->notifier_block,
39267 CPUFREQ_TRANSITION_NOTIFIER);
39268diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39269index cc401d1..8197340 100644
39270--- a/drivers/cpufreq/cpufreq_governor.h
39271+++ b/drivers/cpufreq/cpufreq_governor.h
39272@@ -212,7 +212,7 @@ struct common_dbs_data {
39273 void (*exit)(struct dbs_data *dbs_data);
39274
39275 /* Governor specific ops, see below */
39276- void *gov_ops;
39277+ const void *gov_ops;
39278 };
39279
39280 /* Governor Per policy data */
39281@@ -232,7 +232,7 @@ struct od_ops {
39282 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39283 unsigned int freq_next, unsigned int relation);
39284 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39285-};
39286+} __no_const;
39287
39288 struct cs_ops {
39289 struct notifier_block *notifier_block;
39290diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39291index ad3f38f..8f086cd 100644
39292--- a/drivers/cpufreq/cpufreq_ondemand.c
39293+++ b/drivers/cpufreq/cpufreq_ondemand.c
39294@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39295
39296 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39297
39298-static struct od_ops od_ops = {
39299+static struct od_ops od_ops __read_only = {
39300 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39301 .powersave_bias_target = generic_powersave_bias_target,
39302 .freq_increase = dbs_freq_increase,
39303@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39304 (struct cpufreq_policy *, unsigned int, unsigned int),
39305 unsigned int powersave_bias)
39306 {
39307- od_ops.powersave_bias_target = f;
39308+ pax_open_kernel();
39309+ *(void **)&od_ops.powersave_bias_target = f;
39310+ pax_close_kernel();
39311 od_set_powersave_bias(powersave_bias);
39312 }
39313 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39314
39315 void od_unregister_powersave_bias_handler(void)
39316 {
39317- od_ops.powersave_bias_target = generic_powersave_bias_target;
39318+ pax_open_kernel();
39319+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39320+ pax_close_kernel();
39321 od_set_powersave_bias(0);
39322 }
39323 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39324diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39325index 742eefb..e2fcfc8 100644
39326--- a/drivers/cpufreq/intel_pstate.c
39327+++ b/drivers/cpufreq/intel_pstate.c
39328@@ -133,10 +133,10 @@ struct pstate_funcs {
39329 struct cpu_defaults {
39330 struct pstate_adjust_policy pid_policy;
39331 struct pstate_funcs funcs;
39332-};
39333+} __do_const;
39334
39335 static struct pstate_adjust_policy pid_params;
39336-static struct pstate_funcs pstate_funcs;
39337+static struct pstate_funcs *pstate_funcs;
39338 static int hwp_active;
39339
39340 struct perf_limits {
39341@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39342
39343 cpu->pstate.current_pstate = pstate;
39344
39345- pstate_funcs.set(cpu, pstate);
39346+ pstate_funcs->set(cpu, pstate);
39347 }
39348
39349 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39350 {
39351- cpu->pstate.min_pstate = pstate_funcs.get_min();
39352- cpu->pstate.max_pstate = pstate_funcs.get_max();
39353- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39354- cpu->pstate.scaling = pstate_funcs.get_scaling();
39355+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39356+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39357+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39358+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39359
39360- if (pstate_funcs.get_vid)
39361- pstate_funcs.get_vid(cpu);
39362+ if (pstate_funcs->get_vid)
39363+ pstate_funcs->get_vid(cpu);
39364 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39365 }
39366
39367@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
39368 rdmsrl(MSR_IA32_APERF, aperf);
39369 rdmsrl(MSR_IA32_MPERF, mperf);
39370
39371- if (!pstate_funcs.get_max() ||
39372- !pstate_funcs.get_min() ||
39373- !pstate_funcs.get_turbo())
39374+ if (!pstate_funcs->get_max() ||
39375+ !pstate_funcs->get_min() ||
39376+ !pstate_funcs->get_turbo())
39377 return -ENODEV;
39378
39379 rdmsrl(MSR_IA32_APERF, tmp);
39380@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
39381 return 0;
39382 }
39383
39384-static void copy_pid_params(struct pstate_adjust_policy *policy)
39385+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39386 {
39387 pid_params.sample_rate_ms = policy->sample_rate_ms;
39388 pid_params.p_gain_pct = policy->p_gain_pct;
39389@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39390
39391 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39392 {
39393- pstate_funcs.get_max = funcs->get_max;
39394- pstate_funcs.get_min = funcs->get_min;
39395- pstate_funcs.get_turbo = funcs->get_turbo;
39396- pstate_funcs.get_scaling = funcs->get_scaling;
39397- pstate_funcs.set = funcs->set;
39398- pstate_funcs.get_vid = funcs->get_vid;
39399+ pstate_funcs = funcs;
39400 }
39401
39402 #if IS_ENABLED(CONFIG_ACPI)
39403diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39404index 529cfd9..0e28fff 100644
39405--- a/drivers/cpufreq/p4-clockmod.c
39406+++ b/drivers/cpufreq/p4-clockmod.c
39407@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39408 case 0x0F: /* Core Duo */
39409 case 0x16: /* Celeron Core */
39410 case 0x1C: /* Atom */
39411- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39412+ pax_open_kernel();
39413+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39414+ pax_close_kernel();
39415 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39416 case 0x0D: /* Pentium M (Dothan) */
39417- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39418+ pax_open_kernel();
39419+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39420+ pax_close_kernel();
39421 /* fall through */
39422 case 0x09: /* Pentium M (Banias) */
39423 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39424@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39425
39426 /* on P-4s, the TSC runs with constant frequency independent whether
39427 * throttling is active or not. */
39428- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39429+ pax_open_kernel();
39430+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39431+ pax_close_kernel();
39432
39433 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39434 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39435diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39436index 9bb42ba..b01b4a2 100644
39437--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39438+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39439@@ -18,14 +18,12 @@
39440 #include <asm/head.h>
39441 #include <asm/timer.h>
39442
39443-static struct cpufreq_driver *cpufreq_us3_driver;
39444-
39445 struct us3_freq_percpu_info {
39446 struct cpufreq_frequency_table table[4];
39447 };
39448
39449 /* Indexed by cpu number. */
39450-static struct us3_freq_percpu_info *us3_freq_table;
39451+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39452
39453 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39454 * in the Safari config register.
39455@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39456
39457 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39458 {
39459- if (cpufreq_us3_driver)
39460- us3_freq_target(policy, 0);
39461+ us3_freq_target(policy, 0);
39462
39463 return 0;
39464 }
39465
39466+static int __init us3_freq_init(void);
39467+static void __exit us3_freq_exit(void);
39468+
39469+static struct cpufreq_driver cpufreq_us3_driver = {
39470+ .init = us3_freq_cpu_init,
39471+ .verify = cpufreq_generic_frequency_table_verify,
39472+ .target_index = us3_freq_target,
39473+ .get = us3_freq_get,
39474+ .exit = us3_freq_cpu_exit,
39475+ .name = "UltraSPARC-III",
39476+
39477+};
39478+
39479 static int __init us3_freq_init(void)
39480 {
39481 unsigned long manuf, impl, ver;
39482- int ret;
39483
39484 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39485 return -ENODEV;
39486@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39487 (impl == CHEETAH_IMPL ||
39488 impl == CHEETAH_PLUS_IMPL ||
39489 impl == JAGUAR_IMPL ||
39490- impl == PANTHER_IMPL)) {
39491- struct cpufreq_driver *driver;
39492-
39493- ret = -ENOMEM;
39494- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39495- if (!driver)
39496- goto err_out;
39497-
39498- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39499- GFP_KERNEL);
39500- if (!us3_freq_table)
39501- goto err_out;
39502-
39503- driver->init = us3_freq_cpu_init;
39504- driver->verify = cpufreq_generic_frequency_table_verify;
39505- driver->target_index = us3_freq_target;
39506- driver->get = us3_freq_get;
39507- driver->exit = us3_freq_cpu_exit;
39508- strcpy(driver->name, "UltraSPARC-III");
39509-
39510- cpufreq_us3_driver = driver;
39511- ret = cpufreq_register_driver(driver);
39512- if (ret)
39513- goto err_out;
39514-
39515- return 0;
39516-
39517-err_out:
39518- if (driver) {
39519- kfree(driver);
39520- cpufreq_us3_driver = NULL;
39521- }
39522- kfree(us3_freq_table);
39523- us3_freq_table = NULL;
39524- return ret;
39525- }
39526+ impl == PANTHER_IMPL))
39527+ return cpufreq_register_driver(&cpufreq_us3_driver);
39528
39529 return -ENODEV;
39530 }
39531
39532 static void __exit us3_freq_exit(void)
39533 {
39534- if (cpufreq_us3_driver) {
39535- cpufreq_unregister_driver(cpufreq_us3_driver);
39536- kfree(cpufreq_us3_driver);
39537- cpufreq_us3_driver = NULL;
39538- kfree(us3_freq_table);
39539- us3_freq_table = NULL;
39540- }
39541+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39542 }
39543
39544 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39545diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39546index 7d4a315..21bb886 100644
39547--- a/drivers/cpufreq/speedstep-centrino.c
39548+++ b/drivers/cpufreq/speedstep-centrino.c
39549@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39550 !cpu_has(cpu, X86_FEATURE_EST))
39551 return -ENODEV;
39552
39553- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39554- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39555+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39556+ pax_open_kernel();
39557+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39558+ pax_close_kernel();
39559+ }
39560
39561 if (policy->cpu != 0)
39562 return -ENODEV;
39563diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39564index 2697e87..c32476c 100644
39565--- a/drivers/cpuidle/driver.c
39566+++ b/drivers/cpuidle/driver.c
39567@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39568
39569 static void poll_idle_init(struct cpuidle_driver *drv)
39570 {
39571- struct cpuidle_state *state = &drv->states[0];
39572+ cpuidle_state_no_const *state = &drv->states[0];
39573
39574 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39575 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39576diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39577index fb9f511..213e6cc 100644
39578--- a/drivers/cpuidle/governor.c
39579+++ b/drivers/cpuidle/governor.c
39580@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39581 mutex_lock(&cpuidle_lock);
39582 if (__cpuidle_find_governor(gov->name) == NULL) {
39583 ret = 0;
39584- list_add_tail(&gov->governor_list, &cpuidle_governors);
39585+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39586 if (!cpuidle_curr_governor ||
39587 cpuidle_curr_governor->rating < gov->rating)
39588 cpuidle_switch_governor(gov);
39589diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39590index 97c5903..023ad23 100644
39591--- a/drivers/cpuidle/sysfs.c
39592+++ b/drivers/cpuidle/sysfs.c
39593@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39594 NULL
39595 };
39596
39597-static struct attribute_group cpuidle_attr_group = {
39598+static attribute_group_no_const cpuidle_attr_group = {
39599 .attrs = cpuidle_default_attrs,
39600 .name = "cpuidle",
39601 };
39602diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39603index 8d2a772..33826c9 100644
39604--- a/drivers/crypto/hifn_795x.c
39605+++ b/drivers/crypto/hifn_795x.c
39606@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39607 MODULE_PARM_DESC(hifn_pll_ref,
39608 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39609
39610-static atomic_t hifn_dev_number;
39611+static atomic_unchecked_t hifn_dev_number;
39612
39613 #define ACRYPTO_OP_DECRYPT 0
39614 #define ACRYPTO_OP_ENCRYPT 1
39615@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39616 goto err_out_disable_pci_device;
39617
39618 snprintf(name, sizeof(name), "hifn%d",
39619- atomic_inc_return(&hifn_dev_number)-1);
39620+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39621
39622 err = pci_request_regions(pdev, name);
39623 if (err)
39624diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39625index 30b538d8..1610d75 100644
39626--- a/drivers/devfreq/devfreq.c
39627+++ b/drivers/devfreq/devfreq.c
39628@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39629 goto err_out;
39630 }
39631
39632- list_add(&governor->node, &devfreq_governor_list);
39633+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39634
39635 list_for_each_entry(devfreq, &devfreq_list, node) {
39636 int ret = 0;
39637@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39638 }
39639 }
39640
39641- list_del(&governor->node);
39642+ pax_list_del((struct list_head *)&governor->node);
39643 err_out:
39644 mutex_unlock(&devfreq_list_lock);
39645
39646diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39647index 3a2adb1..b3be9a3 100644
39648--- a/drivers/dma/sh/shdma-base.c
39649+++ b/drivers/dma/sh/shdma-base.c
39650@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39651 schan->slave_id = -EINVAL;
39652 }
39653
39654- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39655- sdev->desc_size, GFP_KERNEL);
39656+ schan->desc = kcalloc(sdev->desc_size,
39657+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39658 if (!schan->desc) {
39659 ret = -ENOMEM;
39660 goto edescalloc;
39661diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39662index aec8a84..7b45a1f 100644
39663--- a/drivers/dma/sh/shdmac.c
39664+++ b/drivers/dma/sh/shdmac.c
39665@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39666 return ret;
39667 }
39668
39669-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39670+static struct notifier_block sh_dmae_nmi_notifier = {
39671 .notifier_call = sh_dmae_nmi_handler,
39672
39673 /* Run before NMI debug handler and KGDB */
39674diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39675index 592af5f..bb1d583 100644
39676--- a/drivers/edac/edac_device.c
39677+++ b/drivers/edac/edac_device.c
39678@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39679 */
39680 int edac_device_alloc_index(void)
39681 {
39682- static atomic_t device_indexes = ATOMIC_INIT(0);
39683+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39684
39685- return atomic_inc_return(&device_indexes) - 1;
39686+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39687 }
39688 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39689
39690diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39691index 670d282..6675f4d 100644
39692--- a/drivers/edac/edac_mc_sysfs.c
39693+++ b/drivers/edac/edac_mc_sysfs.c
39694@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39695 struct dev_ch_attribute {
39696 struct device_attribute attr;
39697 int channel;
39698-};
39699+} __do_const;
39700
39701 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39702 struct dev_ch_attribute dev_attr_legacy_##_name = \
39703@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39704 }
39705
39706 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39707+ pax_open_kernel();
39708 if (mci->get_sdram_scrub_rate) {
39709- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39710- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39711+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39712+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39713 }
39714 if (mci->set_sdram_scrub_rate) {
39715- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39716- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39717+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39718+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39719 }
39720+ pax_close_kernel();
39721 err = device_create_file(&mci->dev,
39722 &dev_attr_sdram_scrub_rate);
39723 if (err) {
39724diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39725index 2cf44b4d..6dd2dc7 100644
39726--- a/drivers/edac/edac_pci.c
39727+++ b/drivers/edac/edac_pci.c
39728@@ -29,7 +29,7 @@
39729
39730 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39731 static LIST_HEAD(edac_pci_list);
39732-static atomic_t pci_indexes = ATOMIC_INIT(0);
39733+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39734
39735 /*
39736 * edac_pci_alloc_ctl_info
39737@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39738 */
39739 int edac_pci_alloc_index(void)
39740 {
39741- return atomic_inc_return(&pci_indexes) - 1;
39742+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39743 }
39744 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39745
39746diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39747index 24d877f..4e30133 100644
39748--- a/drivers/edac/edac_pci_sysfs.c
39749+++ b/drivers/edac/edac_pci_sysfs.c
39750@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39751 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39752 static int edac_pci_poll_msec = 1000; /* one second workq period */
39753
39754-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39755-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39756+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39757+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39758
39759 static struct kobject *edac_pci_top_main_kobj;
39760 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39761@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
39762 void *value;
39763 ssize_t(*show) (void *, char *);
39764 ssize_t(*store) (void *, const char *, size_t);
39765-};
39766+} __do_const;
39767
39768 /* Set of show/store abstract level functions for PCI Parity object */
39769 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39770@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39771 edac_printk(KERN_CRIT, EDAC_PCI,
39772 "Signaled System Error on %s\n",
39773 pci_name(dev));
39774- atomic_inc(&pci_nonparity_count);
39775+ atomic_inc_unchecked(&pci_nonparity_count);
39776 }
39777
39778 if (status & (PCI_STATUS_PARITY)) {
39779@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39780 "Master Data Parity Error on %s\n",
39781 pci_name(dev));
39782
39783- atomic_inc(&pci_parity_count);
39784+ atomic_inc_unchecked(&pci_parity_count);
39785 }
39786
39787 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39788@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39789 "Detected Parity Error on %s\n",
39790 pci_name(dev));
39791
39792- atomic_inc(&pci_parity_count);
39793+ atomic_inc_unchecked(&pci_parity_count);
39794 }
39795 }
39796
39797@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39798 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
39799 "Signaled System Error on %s\n",
39800 pci_name(dev));
39801- atomic_inc(&pci_nonparity_count);
39802+ atomic_inc_unchecked(&pci_nonparity_count);
39803 }
39804
39805 if (status & (PCI_STATUS_PARITY)) {
39806@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39807 "Master Data Parity Error on "
39808 "%s\n", pci_name(dev));
39809
39810- atomic_inc(&pci_parity_count);
39811+ atomic_inc_unchecked(&pci_parity_count);
39812 }
39813
39814 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39815@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39816 "Detected Parity Error on %s\n",
39817 pci_name(dev));
39818
39819- atomic_inc(&pci_parity_count);
39820+ atomic_inc_unchecked(&pci_parity_count);
39821 }
39822 }
39823 }
39824@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
39825 if (!check_pci_errors)
39826 return;
39827
39828- before_count = atomic_read(&pci_parity_count);
39829+ before_count = atomic_read_unchecked(&pci_parity_count);
39830
39831 /* scan all PCI devices looking for a Parity Error on devices and
39832 * bridges.
39833@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
39834 /* Only if operator has selected panic on PCI Error */
39835 if (edac_pci_get_panic_on_pe()) {
39836 /* If the count is different 'after' from 'before' */
39837- if (before_count != atomic_read(&pci_parity_count))
39838+ if (before_count != atomic_read_unchecked(&pci_parity_count))
39839 panic("EDAC: PCI Parity Error");
39840 }
39841 }
39842diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
39843index c2359a1..8bd119d 100644
39844--- a/drivers/edac/mce_amd.h
39845+++ b/drivers/edac/mce_amd.h
39846@@ -74,7 +74,7 @@ struct amd_decoder_ops {
39847 bool (*mc0_mce)(u16, u8);
39848 bool (*mc1_mce)(u16, u8);
39849 bool (*mc2_mce)(u16, u8);
39850-};
39851+} __no_const;
39852
39853 void amd_report_gart_errors(bool);
39854 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
39855diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
39856index 57ea7f4..af06b76 100644
39857--- a/drivers/firewire/core-card.c
39858+++ b/drivers/firewire/core-card.c
39859@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
39860 const struct fw_card_driver *driver,
39861 struct device *device)
39862 {
39863- static atomic_t index = ATOMIC_INIT(-1);
39864+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
39865
39866- card->index = atomic_inc_return(&index);
39867+ card->index = atomic_inc_return_unchecked(&index);
39868 card->driver = driver;
39869 card->device = device;
39870 card->current_tlabel = 0;
39871@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
39872
39873 void fw_core_remove_card(struct fw_card *card)
39874 {
39875- struct fw_card_driver dummy_driver = dummy_driver_template;
39876+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
39877
39878 card->driver->update_phy_reg(card, 4,
39879 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
39880diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
39881index f9e3aee..269dbdb 100644
39882--- a/drivers/firewire/core-device.c
39883+++ b/drivers/firewire/core-device.c
39884@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
39885 struct config_rom_attribute {
39886 struct device_attribute attr;
39887 u32 key;
39888-};
39889+} __do_const;
39890
39891 static ssize_t show_immediate(struct device *dev,
39892 struct device_attribute *dattr, char *buf)
39893diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
39894index eb6935c..3cc2bfa 100644
39895--- a/drivers/firewire/core-transaction.c
39896+++ b/drivers/firewire/core-transaction.c
39897@@ -38,6 +38,7 @@
39898 #include <linux/timer.h>
39899 #include <linux/types.h>
39900 #include <linux/workqueue.h>
39901+#include <linux/sched.h>
39902
39903 #include <asm/byteorder.h>
39904
39905diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
39906index e1480ff6..1a429bd 100644
39907--- a/drivers/firewire/core.h
39908+++ b/drivers/firewire/core.h
39909@@ -111,6 +111,7 @@ struct fw_card_driver {
39910
39911 int (*stop_iso)(struct fw_iso_context *ctx);
39912 };
39913+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
39914
39915 void fw_card_initialize(struct fw_card *card,
39916 const struct fw_card_driver *driver, struct device *device);
39917diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
39918index aff9018..fc87ded 100644
39919--- a/drivers/firewire/ohci.c
39920+++ b/drivers/firewire/ohci.c
39921@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
39922 be32_to_cpu(ohci->next_header));
39923 }
39924
39925+#ifndef CONFIG_GRKERNSEC
39926 if (param_remote_dma) {
39927 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
39928 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
39929 }
39930+#endif
39931
39932 spin_unlock_irq(&ohci->lock);
39933
39934@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
39935 unsigned long flags;
39936 int n, ret = 0;
39937
39938+#ifndef CONFIG_GRKERNSEC
39939 if (param_remote_dma)
39940 return 0;
39941+#endif
39942
39943 /*
39944 * FIXME: Make sure this bitmask is cleared when we clear the busReset
39945diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
39946index 94a58a0..f5eba42 100644
39947--- a/drivers/firmware/dmi-id.c
39948+++ b/drivers/firmware/dmi-id.c
39949@@ -16,7 +16,7 @@
39950 struct dmi_device_attribute{
39951 struct device_attribute dev_attr;
39952 int field;
39953-};
39954+} __do_const;
39955 #define to_dmi_dev_attr(_dev_attr) \
39956 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
39957
39958diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
39959index c5f7b4e..74bc7c9 100644
39960--- a/drivers/firmware/dmi_scan.c
39961+++ b/drivers/firmware/dmi_scan.c
39962@@ -900,7 +900,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
39963 if (buf == NULL)
39964 return -1;
39965
39966- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
39967+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
39968
39969 dmi_unmap(buf);
39970 return 0;
39971diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
39972index 4fd9961..52d60ce 100644
39973--- a/drivers/firmware/efi/cper.c
39974+++ b/drivers/firmware/efi/cper.c
39975@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
39976 */
39977 u64 cper_next_record_id(void)
39978 {
39979- static atomic64_t seq;
39980+ static atomic64_unchecked_t seq;
39981
39982- if (!atomic64_read(&seq))
39983- atomic64_set(&seq, ((u64)get_seconds()) << 32);
39984+ if (!atomic64_read_unchecked(&seq))
39985+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
39986
39987- return atomic64_inc_return(&seq);
39988+ return atomic64_inc_return_unchecked(&seq);
39989 }
39990 EXPORT_SYMBOL_GPL(cper_next_record_id);
39991
39992diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
39993index 9035c1b..aff45f8 100644
39994--- a/drivers/firmware/efi/efi.c
39995+++ b/drivers/firmware/efi/efi.c
39996@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
39997 };
39998
39999 static struct efivars generic_efivars;
40000-static struct efivar_operations generic_ops;
40001+static efivar_operations_no_const generic_ops __read_only;
40002
40003 static int generic_ops_register(void)
40004 {
40005- generic_ops.get_variable = efi.get_variable;
40006- generic_ops.set_variable = efi.set_variable;
40007- generic_ops.get_next_variable = efi.get_next_variable;
40008- generic_ops.query_variable_store = efi_query_variable_store;
40009+ pax_open_kernel();
40010+ *(void **)&generic_ops.get_variable = efi.get_variable;
40011+ *(void **)&generic_ops.set_variable = efi.set_variable;
40012+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40013+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40014+ pax_close_kernel();
40015
40016 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40017 }
40018diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40019index f256ecd..387dcb1 100644
40020--- a/drivers/firmware/efi/efivars.c
40021+++ b/drivers/firmware/efi/efivars.c
40022@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40023 static int
40024 create_efivars_bin_attributes(void)
40025 {
40026- struct bin_attribute *attr;
40027+ bin_attribute_no_const *attr;
40028 int error;
40029
40030 /* new_var */
40031diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40032index 2f569aa..c95f4fb 100644
40033--- a/drivers/firmware/google/memconsole.c
40034+++ b/drivers/firmware/google/memconsole.c
40035@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40036 if (!found_memconsole())
40037 return -ENODEV;
40038
40039- memconsole_bin_attr.size = memconsole_length;
40040+ pax_open_kernel();
40041+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40042+ pax_close_kernel();
40043+
40044 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40045 }
40046
40047diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40048index 3cfcfc6..09d6f117 100644
40049--- a/drivers/gpio/gpio-em.c
40050+++ b/drivers/gpio/gpio-em.c
40051@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40052 struct em_gio_priv *p;
40053 struct resource *io[2], *irq[2];
40054 struct gpio_chip *gpio_chip;
40055- struct irq_chip *irq_chip;
40056+ irq_chip_no_const *irq_chip;
40057 const char *name = dev_name(&pdev->dev);
40058 int ret;
40059
40060diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40061index 7818cd1..1be40e5 100644
40062--- a/drivers/gpio/gpio-ich.c
40063+++ b/drivers/gpio/gpio-ich.c
40064@@ -94,7 +94,7 @@ struct ichx_desc {
40065 * this option allows driver caching written output values
40066 */
40067 bool use_outlvl_cache;
40068-};
40069+} __do_const;
40070
40071 static struct {
40072 spinlock_t lock;
40073diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40074index f476ae2..05e1bdd 100644
40075--- a/drivers/gpio/gpio-omap.c
40076+++ b/drivers/gpio/gpio-omap.c
40077@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40078 const struct omap_gpio_platform_data *pdata;
40079 struct resource *res;
40080 struct gpio_bank *bank;
40081- struct irq_chip *irqc;
40082+ irq_chip_no_const *irqc;
40083 int ret;
40084
40085 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40086diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40087index 584484e..e26ebd6 100644
40088--- a/drivers/gpio/gpio-rcar.c
40089+++ b/drivers/gpio/gpio-rcar.c
40090@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40091 struct gpio_rcar_priv *p;
40092 struct resource *io, *irq;
40093 struct gpio_chip *gpio_chip;
40094- struct irq_chip *irq_chip;
40095+ irq_chip_no_const *irq_chip;
40096 struct device *dev = &pdev->dev;
40097 const char *name = dev_name(dev);
40098 int ret;
40099diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40100index c1caa45..f0f97d2 100644
40101--- a/drivers/gpio/gpio-vr41xx.c
40102+++ b/drivers/gpio/gpio-vr41xx.c
40103@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40104 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40105 maskl, pendl, maskh, pendh);
40106
40107- atomic_inc(&irq_err_count);
40108+ atomic_inc_unchecked(&irq_err_count);
40109
40110 return -EINVAL;
40111 }
40112diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40113index 568aa2b..d1204d8 100644
40114--- a/drivers/gpio/gpiolib.c
40115+++ b/drivers/gpio/gpiolib.c
40116@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40117 }
40118
40119 if (gpiochip->irqchip) {
40120- gpiochip->irqchip->irq_request_resources = NULL;
40121- gpiochip->irqchip->irq_release_resources = NULL;
40122+ pax_open_kernel();
40123+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40124+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40125+ pax_close_kernel();
40126 gpiochip->irqchip = NULL;
40127 }
40128 }
40129@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40130 gpiochip->irqchip = NULL;
40131 return -EINVAL;
40132 }
40133- irqchip->irq_request_resources = gpiochip_irq_reqres;
40134- irqchip->irq_release_resources = gpiochip_irq_relres;
40135+
40136+ pax_open_kernel();
40137+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40138+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40139+ pax_close_kernel();
40140
40141 /*
40142 * Prepare the mapping since the irqchip shall be orthogonal to
40143diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40144index 5213da4..7ef736e 100644
40145--- a/drivers/gpu/drm/drm_crtc.c
40146+++ b/drivers/gpu/drm/drm_crtc.c
40147@@ -3961,7 +3961,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40148 goto done;
40149 }
40150
40151- if (copy_to_user(&enum_ptr[copied].name,
40152+ if (copy_to_user(enum_ptr[copied].name,
40153 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40154 ret = -EFAULT;
40155 goto done;
40156diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40157index 4f41377..ee33f40 100644
40158--- a/drivers/gpu/drm/drm_drv.c
40159+++ b/drivers/gpu/drm/drm_drv.c
40160@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
40161
40162 drm_device_set_unplugged(dev);
40163
40164- if (dev->open_count == 0) {
40165+ if (local_read(&dev->open_count) == 0) {
40166 drm_put_dev(dev);
40167 }
40168 mutex_unlock(&drm_global_mutex);
40169diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40170index 0b9514b..6acd174 100644
40171--- a/drivers/gpu/drm/drm_fops.c
40172+++ b/drivers/gpu/drm/drm_fops.c
40173@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40174 return PTR_ERR(minor);
40175
40176 dev = minor->dev;
40177- if (!dev->open_count++)
40178+ if (local_inc_return(&dev->open_count) == 1)
40179 need_setup = 1;
40180
40181 /* share address_space across all char-devs of a single device */
40182@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40183 return 0;
40184
40185 err_undo:
40186- dev->open_count--;
40187+ local_dec(&dev->open_count);
40188 drm_minor_release(minor);
40189 return retcode;
40190 }
40191@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40192
40193 mutex_lock(&drm_global_mutex);
40194
40195- DRM_DEBUG("open_count = %d\n", dev->open_count);
40196+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40197
40198 mutex_lock(&dev->struct_mutex);
40199 list_del(&file_priv->lhead);
40200@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40201 * Begin inline drm_release
40202 */
40203
40204- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40205+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40206 task_pid_nr(current),
40207 (long)old_encode_dev(file_priv->minor->kdev->devt),
40208- dev->open_count);
40209+ local_read(&dev->open_count));
40210
40211 /* Release any auth tokens that might point to this file_priv,
40212 (do that under the drm_global_mutex) */
40213@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40214 * End inline drm_release
40215 */
40216
40217- if (!--dev->open_count) {
40218+ if (local_dec_and_test(&dev->open_count)) {
40219 retcode = drm_lastclose(dev);
40220 if (drm_device_is_unplugged(dev))
40221 drm_put_dev(dev);
40222diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40223index 3d2e91c..d31c4c9 100644
40224--- a/drivers/gpu/drm/drm_global.c
40225+++ b/drivers/gpu/drm/drm_global.c
40226@@ -36,7 +36,7 @@
40227 struct drm_global_item {
40228 struct mutex mutex;
40229 void *object;
40230- int refcount;
40231+ atomic_t refcount;
40232 };
40233
40234 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40235@@ -49,7 +49,7 @@ void drm_global_init(void)
40236 struct drm_global_item *item = &glob[i];
40237 mutex_init(&item->mutex);
40238 item->object = NULL;
40239- item->refcount = 0;
40240+ atomic_set(&item->refcount, 0);
40241 }
40242 }
40243
40244@@ -59,7 +59,7 @@ void drm_global_release(void)
40245 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40246 struct drm_global_item *item = &glob[i];
40247 BUG_ON(item->object != NULL);
40248- BUG_ON(item->refcount != 0);
40249+ BUG_ON(atomic_read(&item->refcount) != 0);
40250 }
40251 }
40252
40253@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40254 struct drm_global_item *item = &glob[ref->global_type];
40255
40256 mutex_lock(&item->mutex);
40257- if (item->refcount == 0) {
40258+ if (atomic_read(&item->refcount) == 0) {
40259 item->object = kzalloc(ref->size, GFP_KERNEL);
40260 if (unlikely(item->object == NULL)) {
40261 ret = -ENOMEM;
40262@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40263 goto out_err;
40264
40265 }
40266- ++item->refcount;
40267+ atomic_inc(&item->refcount);
40268 ref->object = item->object;
40269 mutex_unlock(&item->mutex);
40270 return 0;
40271@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40272 struct drm_global_item *item = &glob[ref->global_type];
40273
40274 mutex_lock(&item->mutex);
40275- BUG_ON(item->refcount == 0);
40276+ BUG_ON(atomic_read(&item->refcount) == 0);
40277 BUG_ON(ref->object != item->object);
40278- if (--item->refcount == 0) {
40279+ if (atomic_dec_and_test(&item->refcount)) {
40280 ref->release(ref);
40281 item->object = NULL;
40282 }
40283diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40284index 51efebd..2b70935 100644
40285--- a/drivers/gpu/drm/drm_info.c
40286+++ b/drivers/gpu/drm/drm_info.c
40287@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40288 struct drm_local_map *map;
40289 struct drm_map_list *r_list;
40290
40291- /* Hardcoded from _DRM_FRAME_BUFFER,
40292- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40293- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40294- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40295+ static const char * const types[] = {
40296+ [_DRM_FRAME_BUFFER] = "FB",
40297+ [_DRM_REGISTERS] = "REG",
40298+ [_DRM_SHM] = "SHM",
40299+ [_DRM_AGP] = "AGP",
40300+ [_DRM_SCATTER_GATHER] = "SG",
40301+ [_DRM_CONSISTENT] = "PCI"};
40302 const char *type;
40303 int i;
40304
40305@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40306 map = r_list->map;
40307 if (!map)
40308 continue;
40309- if (map->type < 0 || map->type > 5)
40310+ if (map->type >= ARRAY_SIZE(types))
40311 type = "??";
40312 else
40313 type = types[map->type];
40314diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40315index 2f4c4343..dd12cd2 100644
40316--- a/drivers/gpu/drm/drm_ioc32.c
40317+++ b/drivers/gpu/drm/drm_ioc32.c
40318@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40319 request = compat_alloc_user_space(nbytes);
40320 if (!access_ok(VERIFY_WRITE, request, nbytes))
40321 return -EFAULT;
40322- list = (struct drm_buf_desc *) (request + 1);
40323+ list = (struct drm_buf_desc __user *) (request + 1);
40324
40325 if (__put_user(count, &request->count)
40326 || __put_user(list, &request->list))
40327@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40328 request = compat_alloc_user_space(nbytes);
40329 if (!access_ok(VERIFY_WRITE, request, nbytes))
40330 return -EFAULT;
40331- list = (struct drm_buf_pub *) (request + 1);
40332+ list = (struct drm_buf_pub __user *) (request + 1);
40333
40334 if (__put_user(count, &request->count)
40335 || __put_user(list, &request->list))
40336@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40337 return 0;
40338 }
40339
40340-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40341+drm_ioctl_compat_t drm_compat_ioctls[] = {
40342 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40343 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40344 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40345@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40346 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40347 {
40348 unsigned int nr = DRM_IOCTL_NR(cmd);
40349- drm_ioctl_compat_t *fn;
40350 int ret;
40351
40352 /* Assume that ioctls without an explicit compat routine will just
40353@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40354 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40355 return drm_ioctl(filp, cmd, arg);
40356
40357- fn = drm_compat_ioctls[nr];
40358-
40359- if (fn != NULL)
40360- ret = (*fn) (filp, cmd, arg);
40361+ if (drm_compat_ioctls[nr] != NULL)
40362+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40363 else
40364 ret = drm_ioctl(filp, cmd, arg);
40365
40366diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40367index 00587a1..57a65ca 100644
40368--- a/drivers/gpu/drm/drm_ioctl.c
40369+++ b/drivers/gpu/drm/drm_ioctl.c
40370@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
40371 struct drm_file *file_priv = filp->private_data;
40372 struct drm_device *dev;
40373 const struct drm_ioctl_desc *ioctl = NULL;
40374- drm_ioctl_t *func;
40375+ drm_ioctl_no_const_t func;
40376 unsigned int nr = DRM_IOCTL_NR(cmd);
40377 int retcode = -EINVAL;
40378 char stack_kdata[128];
40379diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40380index 93ec5dc..82acbaf 100644
40381--- a/drivers/gpu/drm/i810/i810_drv.h
40382+++ b/drivers/gpu/drm/i810/i810_drv.h
40383@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40384 int page_flipping;
40385
40386 wait_queue_head_t irq_queue;
40387- atomic_t irq_received;
40388- atomic_t irq_emitted;
40389+ atomic_unchecked_t irq_received;
40390+ atomic_unchecked_t irq_emitted;
40391
40392 int front_offset;
40393 } drm_i810_private_t;
40394diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40395index ecee3bc..ad5ae67 100644
40396--- a/drivers/gpu/drm/i915/i915_dma.c
40397+++ b/drivers/gpu/drm/i915/i915_dma.c
40398@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40399 * locking inversion with the driver load path. And the access here is
40400 * completely racy anyway. So don't bother with locking for now.
40401 */
40402- return dev->open_count == 0;
40403+ return local_read(&dev->open_count) == 0;
40404 }
40405
40406 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40407diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40408index 1173831..7dfb389 100644
40409--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40410+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40411@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40412 static int
40413 validate_exec_list(struct drm_device *dev,
40414 struct drm_i915_gem_exec_object2 *exec,
40415- int count)
40416+ unsigned int count)
40417 {
40418 unsigned relocs_total = 0;
40419 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40420 unsigned invalid_flags;
40421- int i;
40422+ unsigned int i;
40423
40424 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40425 if (USES_FULL_PPGTT(dev))
40426diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40427index 176de63..1ef9ac7 100644
40428--- a/drivers/gpu/drm/i915/i915_ioc32.c
40429+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40430@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40431 (unsigned long)request);
40432 }
40433
40434-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40435+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40436 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40437 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40438 [DRM_I915_GETPARAM] = compat_i915_getparam,
40439@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40440 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40441 {
40442 unsigned int nr = DRM_IOCTL_NR(cmd);
40443- drm_ioctl_compat_t *fn = NULL;
40444 int ret;
40445
40446 if (nr < DRM_COMMAND_BASE)
40447 return drm_compat_ioctl(filp, cmd, arg);
40448
40449- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40450- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40451-
40452- if (fn != NULL)
40453+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40454+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40455 ret = (*fn) (filp, cmd, arg);
40456- else
40457+ } else
40458 ret = drm_ioctl(filp, cmd, arg);
40459
40460 return ret;
40461diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40462index e7a16f1..e0d82e8 100644
40463--- a/drivers/gpu/drm/i915/intel_display.c
40464+++ b/drivers/gpu/drm/i915/intel_display.c
40465@@ -12935,13 +12935,13 @@ struct intel_quirk {
40466 int subsystem_vendor;
40467 int subsystem_device;
40468 void (*hook)(struct drm_device *dev);
40469-};
40470+} __do_const;
40471
40472 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40473 struct intel_dmi_quirk {
40474 void (*hook)(struct drm_device *dev);
40475 const struct dmi_system_id (*dmi_id_list)[];
40476-};
40477+} __do_const;
40478
40479 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40480 {
40481@@ -12949,18 +12949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40482 return 1;
40483 }
40484
40485-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40486+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40487 {
40488- .dmi_id_list = &(const struct dmi_system_id[]) {
40489- {
40490- .callback = intel_dmi_reverse_brightness,
40491- .ident = "NCR Corporation",
40492- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40493- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40494- },
40495- },
40496- { } /* terminating entry */
40497+ .callback = intel_dmi_reverse_brightness,
40498+ .ident = "NCR Corporation",
40499+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40500+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40501 },
40502+ },
40503+ { } /* terminating entry */
40504+};
40505+
40506+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40507+ {
40508+ .dmi_id_list = &intel_dmi_quirks_table,
40509 .hook = quirk_invert_brightness,
40510 },
40511 };
40512diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40513index b250130..98df2a4 100644
40514--- a/drivers/gpu/drm/imx/imx-drm-core.c
40515+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40516@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40517 if (imxdrm->pipes >= MAX_CRTC)
40518 return -EINVAL;
40519
40520- if (imxdrm->drm->open_count)
40521+ if (local_read(&imxdrm->drm->open_count))
40522 return -EBUSY;
40523
40524 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40525diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40526index b4a2014..219ab78 100644
40527--- a/drivers/gpu/drm/mga/mga_drv.h
40528+++ b/drivers/gpu/drm/mga/mga_drv.h
40529@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40530 u32 clear_cmd;
40531 u32 maccess;
40532
40533- atomic_t vbl_received; /**< Number of vblanks received. */
40534+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40535 wait_queue_head_t fence_queue;
40536- atomic_t last_fence_retired;
40537+ atomic_unchecked_t last_fence_retired;
40538 u32 next_fence_to_post;
40539
40540 unsigned int fb_cpp;
40541diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40542index 729bfd5..ead8823 100644
40543--- a/drivers/gpu/drm/mga/mga_ioc32.c
40544+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40545@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40546 return 0;
40547 }
40548
40549-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40550+drm_ioctl_compat_t mga_compat_ioctls[] = {
40551 [DRM_MGA_INIT] = compat_mga_init,
40552 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40553 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40554@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40555 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40556 {
40557 unsigned int nr = DRM_IOCTL_NR(cmd);
40558- drm_ioctl_compat_t *fn = NULL;
40559 int ret;
40560
40561 if (nr < DRM_COMMAND_BASE)
40562 return drm_compat_ioctl(filp, cmd, arg);
40563
40564- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40565- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40566-
40567- if (fn != NULL)
40568+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40569+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40570 ret = (*fn) (filp, cmd, arg);
40571- else
40572+ } else
40573 ret = drm_ioctl(filp, cmd, arg);
40574
40575 return ret;
40576diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40577index 1b071b8..de8601a 100644
40578--- a/drivers/gpu/drm/mga/mga_irq.c
40579+++ b/drivers/gpu/drm/mga/mga_irq.c
40580@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40581 if (crtc != 0)
40582 return 0;
40583
40584- return atomic_read(&dev_priv->vbl_received);
40585+ return atomic_read_unchecked(&dev_priv->vbl_received);
40586 }
40587
40588
40589@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40590 /* VBLANK interrupt */
40591 if (status & MGA_VLINEPEN) {
40592 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40593- atomic_inc(&dev_priv->vbl_received);
40594+ atomic_inc_unchecked(&dev_priv->vbl_received);
40595 drm_handle_vblank(dev, 0);
40596 handled = 1;
40597 }
40598@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40599 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40600 MGA_WRITE(MGA_PRIMEND, prim_end);
40601
40602- atomic_inc(&dev_priv->last_fence_retired);
40603+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40604 wake_up(&dev_priv->fence_queue);
40605 handled = 1;
40606 }
40607@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40608 * using fences.
40609 */
40610 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40611- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40612+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40613 - *sequence) <= (1 << 23)));
40614
40615 *sequence = cur_fence;
40616diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40617index 7df6acc..84bbe52 100644
40618--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40619+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40620@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40621 struct bit_table {
40622 const char id;
40623 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40624-};
40625+} __no_const;
40626
40627 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40628
40629diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40630index 8ae36f2..1147a30 100644
40631--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40632+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40633@@ -121,7 +121,6 @@ struct nouveau_drm {
40634 struct drm_global_reference mem_global_ref;
40635 struct ttm_bo_global_ref bo_global_ref;
40636 struct ttm_bo_device bdev;
40637- atomic_t validate_sequence;
40638 int (*move)(struct nouveau_channel *,
40639 struct ttm_buffer_object *,
40640 struct ttm_mem_reg *, struct ttm_mem_reg *);
40641diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40642index 462679a..88e32a7 100644
40643--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40644+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40645@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40646 unsigned long arg)
40647 {
40648 unsigned int nr = DRM_IOCTL_NR(cmd);
40649- drm_ioctl_compat_t *fn = NULL;
40650+ drm_ioctl_compat_t fn = NULL;
40651 int ret;
40652
40653 if (nr < DRM_COMMAND_BASE)
40654diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40655index 3d1cfcb..0542700 100644
40656--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40657+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40658@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40659 }
40660
40661 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40662- nouveau_vram_manager_init,
40663- nouveau_vram_manager_fini,
40664- nouveau_vram_manager_new,
40665- nouveau_vram_manager_del,
40666- nouveau_vram_manager_debug
40667+ .init = nouveau_vram_manager_init,
40668+ .takedown = nouveau_vram_manager_fini,
40669+ .get_node = nouveau_vram_manager_new,
40670+ .put_node = nouveau_vram_manager_del,
40671+ .debug = nouveau_vram_manager_debug
40672 };
40673
40674 static int
40675@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40676 }
40677
40678 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40679- nouveau_gart_manager_init,
40680- nouveau_gart_manager_fini,
40681- nouveau_gart_manager_new,
40682- nouveau_gart_manager_del,
40683- nouveau_gart_manager_debug
40684+ .init = nouveau_gart_manager_init,
40685+ .takedown = nouveau_gart_manager_fini,
40686+ .get_node = nouveau_gart_manager_new,
40687+ .put_node = nouveau_gart_manager_del,
40688+ .debug = nouveau_gart_manager_debug
40689 };
40690
40691 /*XXX*/
40692@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40693 }
40694
40695 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40696- nv04_gart_manager_init,
40697- nv04_gart_manager_fini,
40698- nv04_gart_manager_new,
40699- nv04_gart_manager_del,
40700- nv04_gart_manager_debug
40701+ .init = nv04_gart_manager_init,
40702+ .takedown = nv04_gart_manager_fini,
40703+ .get_node = nv04_gart_manager_new,
40704+ .put_node = nv04_gart_manager_del,
40705+ .debug = nv04_gart_manager_debug
40706 };
40707
40708 int
40709diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40710index c7592ec..dd45ebc 100644
40711--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40712+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40713@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40714 * locking inversion with the driver load path. And the access here is
40715 * completely racy anyway. So don't bother with locking for now.
40716 */
40717- return dev->open_count == 0;
40718+ return local_read(&dev->open_count) == 0;
40719 }
40720
40721 static const struct vga_switcheroo_client_ops
40722diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40723index 9782364..89bd954 100644
40724--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40725+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40726@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40727 int ret;
40728
40729 mutex_lock(&qdev->async_io_mutex);
40730- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40731+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40732 if (qdev->last_sent_io_cmd > irq_num) {
40733 if (intr)
40734 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40735- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40736+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40737 else
40738 ret = wait_event_timeout(qdev->io_cmd_event,
40739- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40740+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40741 /* 0 is timeout, just bail the "hw" has gone away */
40742 if (ret <= 0)
40743 goto out;
40744- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40745+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40746 }
40747 outb(val, addr);
40748 qdev->last_sent_io_cmd = irq_num + 1;
40749 if (intr)
40750 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40751- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40752+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40753 else
40754 ret = wait_event_timeout(qdev->io_cmd_event,
40755- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40756+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40757 out:
40758 if (ret > 0)
40759 ret = 0;
40760diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40761index 6911b8c..89d6867 100644
40762--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40763+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40764@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40765 struct drm_info_node *node = (struct drm_info_node *) m->private;
40766 struct qxl_device *qdev = node->minor->dev->dev_private;
40767
40768- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40769- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40770- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40771- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40772+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40773+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40774+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40775+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40776 seq_printf(m, "%d\n", qdev->irq_received_error);
40777 return 0;
40778 }
40779diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40780index 7c6cafe..460f542 100644
40781--- a/drivers/gpu/drm/qxl/qxl_drv.h
40782+++ b/drivers/gpu/drm/qxl/qxl_drv.h
40783@@ -290,10 +290,10 @@ struct qxl_device {
40784 unsigned int last_sent_io_cmd;
40785
40786 /* interrupt handling */
40787- atomic_t irq_received;
40788- atomic_t irq_received_display;
40789- atomic_t irq_received_cursor;
40790- atomic_t irq_received_io_cmd;
40791+ atomic_unchecked_t irq_received;
40792+ atomic_unchecked_t irq_received_display;
40793+ atomic_unchecked_t irq_received_cursor;
40794+ atomic_unchecked_t irq_received_io_cmd;
40795 unsigned irq_received_error;
40796 wait_queue_head_t display_event;
40797 wait_queue_head_t cursor_event;
40798diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
40799index b110883..dd06418 100644
40800--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
40801+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
40802@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40803
40804 /* TODO copy slow path code from i915 */
40805 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
40806- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
40807+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
40808
40809 {
40810 struct qxl_drawable *draw = fb_cmd;
40811@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40812 struct drm_qxl_reloc reloc;
40813
40814 if (copy_from_user(&reloc,
40815- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
40816+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
40817 sizeof(reloc))) {
40818 ret = -EFAULT;
40819 goto out_free_bos;
40820@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40821
40822 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
40823
40824- struct drm_qxl_command *commands =
40825- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40826+ struct drm_qxl_command __user *commands =
40827+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
40828
40829- if (copy_from_user(&user_cmd, &commands[cmd_num],
40830+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40831 sizeof(user_cmd)))
40832 return -EFAULT;
40833
40834diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40835index 0bf1e20..42a7310 100644
40836--- a/drivers/gpu/drm/qxl/qxl_irq.c
40837+++ b/drivers/gpu/drm/qxl/qxl_irq.c
40838@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
40839 if (!pending)
40840 return IRQ_NONE;
40841
40842- atomic_inc(&qdev->irq_received);
40843+ atomic_inc_unchecked(&qdev->irq_received);
40844
40845 if (pending & QXL_INTERRUPT_DISPLAY) {
40846- atomic_inc(&qdev->irq_received_display);
40847+ atomic_inc_unchecked(&qdev->irq_received_display);
40848 wake_up_all(&qdev->display_event);
40849 qxl_queue_garbage_collect(qdev, false);
40850 }
40851 if (pending & QXL_INTERRUPT_CURSOR) {
40852- atomic_inc(&qdev->irq_received_cursor);
40853+ atomic_inc_unchecked(&qdev->irq_received_cursor);
40854 wake_up_all(&qdev->cursor_event);
40855 }
40856 if (pending & QXL_INTERRUPT_IO_CMD) {
40857- atomic_inc(&qdev->irq_received_io_cmd);
40858+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
40859 wake_up_all(&qdev->io_cmd_event);
40860 }
40861 if (pending & QXL_INTERRUPT_ERROR) {
40862@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
40863 init_waitqueue_head(&qdev->io_cmd_event);
40864 INIT_WORK(&qdev->client_monitors_config_work,
40865 qxl_client_monitors_config_work_func);
40866- atomic_set(&qdev->irq_received, 0);
40867- atomic_set(&qdev->irq_received_display, 0);
40868- atomic_set(&qdev->irq_received_cursor, 0);
40869- atomic_set(&qdev->irq_received_io_cmd, 0);
40870+ atomic_set_unchecked(&qdev->irq_received, 0);
40871+ atomic_set_unchecked(&qdev->irq_received_display, 0);
40872+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40873+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40874 qdev->irq_received_error = 0;
40875 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
40876 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40877diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40878index 0cbc4c9..0e46686 100644
40879--- a/drivers/gpu/drm/qxl/qxl_ttm.c
40880+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40881@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40882 }
40883 }
40884
40885-static struct vm_operations_struct qxl_ttm_vm_ops;
40886+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40887 static const struct vm_operations_struct *ttm_vm_ops;
40888
40889 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40890@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40891 return r;
40892 if (unlikely(ttm_vm_ops == NULL)) {
40893 ttm_vm_ops = vma->vm_ops;
40894+ pax_open_kernel();
40895 qxl_ttm_vm_ops = *ttm_vm_ops;
40896 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40897+ pax_close_kernel();
40898 }
40899 vma->vm_ops = &qxl_ttm_vm_ops;
40900 return 0;
40901@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40902 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40903 {
40904 #if defined(CONFIG_DEBUG_FS)
40905- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40906- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40907- unsigned i;
40908+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40909+ {
40910+ .name = "qxl_mem_mm",
40911+ .show = &qxl_mm_dump_table,
40912+ },
40913+ {
40914+ .name = "qxl_surf_mm",
40915+ .show = &qxl_mm_dump_table,
40916+ }
40917+ };
40918
40919- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
40920- if (i == 0)
40921- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
40922- else
40923- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
40924- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
40925- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
40926- qxl_mem_types_list[i].driver_features = 0;
40927- if (i == 0)
40928- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40929- else
40930- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40931+ pax_open_kernel();
40932+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40933+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40934+ pax_close_kernel();
40935
40936- }
40937- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
40938+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
40939 #else
40940 return 0;
40941 #endif
40942diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
40943index 2c45ac9..5d740f8 100644
40944--- a/drivers/gpu/drm/r128/r128_cce.c
40945+++ b/drivers/gpu/drm/r128/r128_cce.c
40946@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
40947
40948 /* GH: Simple idle check.
40949 */
40950- atomic_set(&dev_priv->idle_count, 0);
40951+ atomic_set_unchecked(&dev_priv->idle_count, 0);
40952
40953 /* We don't support anything other than bus-mastering ring mode,
40954 * but the ring can be in either AGP or PCI space for the ring
40955diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
40956index 723e5d6..102dbaf 100644
40957--- a/drivers/gpu/drm/r128/r128_drv.h
40958+++ b/drivers/gpu/drm/r128/r128_drv.h
40959@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
40960 int is_pci;
40961 unsigned long cce_buffers_offset;
40962
40963- atomic_t idle_count;
40964+ atomic_unchecked_t idle_count;
40965
40966 int page_flipping;
40967 int current_page;
40968 u32 crtc_offset;
40969 u32 crtc_offset_cntl;
40970
40971- atomic_t vbl_received;
40972+ atomic_unchecked_t vbl_received;
40973
40974 u32 color_fmt;
40975 unsigned int front_offset;
40976diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
40977index 663f38c..c689495 100644
40978--- a/drivers/gpu/drm/r128/r128_ioc32.c
40979+++ b/drivers/gpu/drm/r128/r128_ioc32.c
40980@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
40981 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
40982 }
40983
40984-drm_ioctl_compat_t *r128_compat_ioctls[] = {
40985+drm_ioctl_compat_t r128_compat_ioctls[] = {
40986 [DRM_R128_INIT] = compat_r128_init,
40987 [DRM_R128_DEPTH] = compat_r128_depth,
40988 [DRM_R128_STIPPLE] = compat_r128_stipple,
40989@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
40990 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40991 {
40992 unsigned int nr = DRM_IOCTL_NR(cmd);
40993- drm_ioctl_compat_t *fn = NULL;
40994 int ret;
40995
40996 if (nr < DRM_COMMAND_BASE)
40997 return drm_compat_ioctl(filp, cmd, arg);
40998
40999- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41000- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41001-
41002- if (fn != NULL)
41003+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41004+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41005 ret = (*fn) (filp, cmd, arg);
41006- else
41007+ } else
41008 ret = drm_ioctl(filp, cmd, arg);
41009
41010 return ret;
41011diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41012index c2ae496..30b5993 100644
41013--- a/drivers/gpu/drm/r128/r128_irq.c
41014+++ b/drivers/gpu/drm/r128/r128_irq.c
41015@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41016 if (crtc != 0)
41017 return 0;
41018
41019- return atomic_read(&dev_priv->vbl_received);
41020+ return atomic_read_unchecked(&dev_priv->vbl_received);
41021 }
41022
41023 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41024@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41025 /* VBLANK interrupt */
41026 if (status & R128_CRTC_VBLANK_INT) {
41027 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41028- atomic_inc(&dev_priv->vbl_received);
41029+ atomic_inc_unchecked(&dev_priv->vbl_received);
41030 drm_handle_vblank(dev, 0);
41031 return IRQ_HANDLED;
41032 }
41033diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41034index 8fd2d9f..18c9660 100644
41035--- a/drivers/gpu/drm/r128/r128_state.c
41036+++ b/drivers/gpu/drm/r128/r128_state.c
41037@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41038
41039 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41040 {
41041- if (atomic_read(&dev_priv->idle_count) == 0)
41042+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41043 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41044 else
41045- atomic_set(&dev_priv->idle_count, 0);
41046+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41047 }
41048
41049 #endif
41050diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41051index b928c17..e5d9400 100644
41052--- a/drivers/gpu/drm/radeon/mkregtable.c
41053+++ b/drivers/gpu/drm/radeon/mkregtable.c
41054@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41055 regex_t mask_rex;
41056 regmatch_t match[4];
41057 char buf[1024];
41058- size_t end;
41059+ long end;
41060 int len;
41061 int done = 0;
41062 int r;
41063 unsigned o;
41064 struct offset *offset;
41065 char last_reg_s[10];
41066- int last_reg;
41067+ unsigned long last_reg;
41068
41069 if (regcomp
41070 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41071diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41072index bd7519f..e1c2cd95 100644
41073--- a/drivers/gpu/drm/radeon/radeon_device.c
41074+++ b/drivers/gpu/drm/radeon/radeon_device.c
41075@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41076 * locking inversion with the driver load path. And the access here is
41077 * completely racy anyway. So don't bother with locking for now.
41078 */
41079- return dev->open_count == 0;
41080+ return local_read(&dev->open_count) == 0;
41081 }
41082
41083 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41084diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41085index 46bd393..6ae4719 100644
41086--- a/drivers/gpu/drm/radeon/radeon_drv.h
41087+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41088@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41089
41090 /* SW interrupt */
41091 wait_queue_head_t swi_queue;
41092- atomic_t swi_emitted;
41093+ atomic_unchecked_t swi_emitted;
41094 int vblank_crtc;
41095 uint32_t irq_enable_reg;
41096 uint32_t r500_disp_irq_reg;
41097diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41098index 0b98ea1..0881827 100644
41099--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41100+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41101@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41102 request = compat_alloc_user_space(sizeof(*request));
41103 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41104 || __put_user(req32.param, &request->param)
41105- || __put_user((void __user *)(unsigned long)req32.value,
41106+ || __put_user((unsigned long)req32.value,
41107 &request->value))
41108 return -EFAULT;
41109
41110@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41111 #define compat_radeon_cp_setparam NULL
41112 #endif /* X86_64 || IA64 */
41113
41114-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41115+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41116 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41117 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41118 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41119@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41120 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41121 {
41122 unsigned int nr = DRM_IOCTL_NR(cmd);
41123- drm_ioctl_compat_t *fn = NULL;
41124 int ret;
41125
41126 if (nr < DRM_COMMAND_BASE)
41127 return drm_compat_ioctl(filp, cmd, arg);
41128
41129- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41130- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41131-
41132- if (fn != NULL)
41133+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41134+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41135 ret = (*fn) (filp, cmd, arg);
41136- else
41137+ } else
41138 ret = drm_ioctl(filp, cmd, arg);
41139
41140 return ret;
41141diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41142index 244b19b..c19226d 100644
41143--- a/drivers/gpu/drm/radeon/radeon_irq.c
41144+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41145@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41146 unsigned int ret;
41147 RING_LOCALS;
41148
41149- atomic_inc(&dev_priv->swi_emitted);
41150- ret = atomic_read(&dev_priv->swi_emitted);
41151+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41152+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41153
41154 BEGIN_RING(4);
41155 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41156@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41157 drm_radeon_private_t *dev_priv =
41158 (drm_radeon_private_t *) dev->dev_private;
41159
41160- atomic_set(&dev_priv->swi_emitted, 0);
41161+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41162 init_waitqueue_head(&dev_priv->swi_queue);
41163
41164 dev->max_vblank_count = 0x001fffff;
41165diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41166index 15aee72..cda326e 100644
41167--- a/drivers/gpu/drm/radeon/radeon_state.c
41168+++ b/drivers/gpu/drm/radeon/radeon_state.c
41169@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41170 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41171 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41172
41173- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41174+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41175 sarea_priv->nbox * sizeof(depth_boxes[0])))
41176 return -EFAULT;
41177
41178@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41179 {
41180 drm_radeon_private_t *dev_priv = dev->dev_private;
41181 drm_radeon_getparam_t *param = data;
41182- int value;
41183+ int value = 0;
41184
41185 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41186
41187diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41188index d02aa1d..ca19e2c 100644
41189--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41190+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41191@@ -959,7 +959,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41192 man->size = size >> PAGE_SHIFT;
41193 }
41194
41195-static struct vm_operations_struct radeon_ttm_vm_ops;
41196+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41197 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41198
41199 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41200@@ -1000,8 +1000,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41201 }
41202 if (unlikely(ttm_vm_ops == NULL)) {
41203 ttm_vm_ops = vma->vm_ops;
41204+ pax_open_kernel();
41205 radeon_ttm_vm_ops = *ttm_vm_ops;
41206 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41207+ pax_close_kernel();
41208 }
41209 vma->vm_ops = &radeon_ttm_vm_ops;
41210 return 0;
41211diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41212index 978993f..e36e50e 100644
41213--- a/drivers/gpu/drm/tegra/dc.c
41214+++ b/drivers/gpu/drm/tegra/dc.c
41215@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41216 }
41217
41218 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41219- dc->debugfs_files[i].data = dc;
41220+ *(void **)&dc->debugfs_files[i].data = dc;
41221
41222 err = drm_debugfs_create_files(dc->debugfs_files,
41223 ARRAY_SIZE(debugfs_files),
41224diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41225index 33f67fd..55ee9761 100644
41226--- a/drivers/gpu/drm/tegra/dsi.c
41227+++ b/drivers/gpu/drm/tegra/dsi.c
41228@@ -39,7 +39,7 @@ struct tegra_dsi {
41229 struct clk *clk_lp;
41230 struct clk *clk;
41231
41232- struct drm_info_list *debugfs_files;
41233+ drm_info_list_no_const *debugfs_files;
41234 struct drm_minor *minor;
41235 struct dentry *debugfs;
41236
41237diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41238index ffe2654..03c7b1c 100644
41239--- a/drivers/gpu/drm/tegra/hdmi.c
41240+++ b/drivers/gpu/drm/tegra/hdmi.c
41241@@ -60,7 +60,7 @@ struct tegra_hdmi {
41242 bool stereo;
41243 bool dvi;
41244
41245- struct drm_info_list *debugfs_files;
41246+ drm_info_list_no_const *debugfs_files;
41247 struct drm_minor *minor;
41248 struct dentry *debugfs;
41249 };
41250diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41251index aa0bd054..aea6a01 100644
41252--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41253+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41254@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41255 }
41256
41257 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41258- ttm_bo_man_init,
41259- ttm_bo_man_takedown,
41260- ttm_bo_man_get_node,
41261- ttm_bo_man_put_node,
41262- ttm_bo_man_debug
41263+ .init = ttm_bo_man_init,
41264+ .takedown = ttm_bo_man_takedown,
41265+ .get_node = ttm_bo_man_get_node,
41266+ .put_node = ttm_bo_man_put_node,
41267+ .debug = ttm_bo_man_debug
41268 };
41269 EXPORT_SYMBOL(ttm_bo_manager_func);
41270diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41271index a1803fb..c53f6b0 100644
41272--- a/drivers/gpu/drm/ttm/ttm_memory.c
41273+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41274@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41275 zone->glob = glob;
41276 glob->zone_kernel = zone;
41277 ret = kobject_init_and_add(
41278- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41279+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41280 if (unlikely(ret != 0)) {
41281 kobject_put(&zone->kobj);
41282 return ret;
41283@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41284 zone->glob = glob;
41285 glob->zone_dma32 = zone;
41286 ret = kobject_init_and_add(
41287- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41288+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41289 if (unlikely(ret != 0)) {
41290 kobject_put(&zone->kobj);
41291 return ret;
41292diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41293index 025c429..314062f 100644
41294--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41295+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41296@@ -54,7 +54,7 @@
41297
41298 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41299 #define SMALL_ALLOCATION 16
41300-#define FREE_ALL_PAGES (~0U)
41301+#define FREE_ALL_PAGES (~0UL)
41302 /* times are in msecs */
41303 #define PAGE_FREE_INTERVAL 1000
41304
41305@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41306 * @free_all: If set to true will free all pages in pool
41307 * @use_static: Safe to use static buffer
41308 **/
41309-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41310+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41311 bool use_static)
41312 {
41313 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41314 unsigned long irq_flags;
41315 struct page *p;
41316 struct page **pages_to_free;
41317- unsigned freed_pages = 0,
41318- npages_to_free = nr_free;
41319+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41320
41321 if (NUM_PAGES_TO_ALLOC < nr_free)
41322 npages_to_free = NUM_PAGES_TO_ALLOC;
41323@@ -371,7 +370,8 @@ restart:
41324 __list_del(&p->lru, &pool->list);
41325
41326 ttm_pool_update_free_locked(pool, freed_pages);
41327- nr_free -= freed_pages;
41328+ if (likely(nr_free != FREE_ALL_PAGES))
41329+ nr_free -= freed_pages;
41330 }
41331
41332 spin_unlock_irqrestore(&pool->lock, irq_flags);
41333@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41334 unsigned i;
41335 unsigned pool_offset;
41336 struct ttm_page_pool *pool;
41337- int shrink_pages = sc->nr_to_scan;
41338+ unsigned long shrink_pages = sc->nr_to_scan;
41339 unsigned long freed = 0;
41340
41341 if (!mutex_trylock(&lock))
41342@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41343 pool_offset = ++start_pool % NUM_POOLS;
41344 /* select start pool in round robin fashion */
41345 for (i = 0; i < NUM_POOLS; ++i) {
41346- unsigned nr_free = shrink_pages;
41347+ unsigned long nr_free = shrink_pages;
41348 if (shrink_pages == 0)
41349 break;
41350 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41351@@ -673,7 +673,7 @@ out:
41352 }
41353
41354 /* Put all pages in pages list to correct pool to wait for reuse */
41355-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41356+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41357 enum ttm_caching_state cstate)
41358 {
41359 unsigned long irq_flags;
41360@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41361 struct list_head plist;
41362 struct page *p = NULL;
41363 gfp_t gfp_flags = GFP_USER;
41364- unsigned count;
41365+ unsigned long count;
41366 int r;
41367
41368 /* set zero flag for page allocation if required */
41369diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41370index 01e1d27..aaa018a 100644
41371--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41372+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41373@@ -56,7 +56,7 @@
41374
41375 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41376 #define SMALL_ALLOCATION 4
41377-#define FREE_ALL_PAGES (~0U)
41378+#define FREE_ALL_PAGES (~0UL)
41379 /* times are in msecs */
41380 #define IS_UNDEFINED (0)
41381 #define IS_WC (1<<1)
41382@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41383 * @nr_free: If set to true will free all pages in pool
41384 * @use_static: Safe to use static buffer
41385 **/
41386-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41387+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41388 bool use_static)
41389 {
41390 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41391@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41392 struct dma_page *dma_p, *tmp;
41393 struct page **pages_to_free;
41394 struct list_head d_pages;
41395- unsigned freed_pages = 0,
41396- npages_to_free = nr_free;
41397+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41398
41399 if (NUM_PAGES_TO_ALLOC < nr_free)
41400 npages_to_free = NUM_PAGES_TO_ALLOC;
41401@@ -499,7 +498,8 @@ restart:
41402 /* remove range of pages from the pool */
41403 if (freed_pages) {
41404 ttm_pool_update_free_locked(pool, freed_pages);
41405- nr_free -= freed_pages;
41406+ if (likely(nr_free != FREE_ALL_PAGES))
41407+ nr_free -= freed_pages;
41408 }
41409
41410 spin_unlock_irqrestore(&pool->lock, irq_flags);
41411@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41412 struct dma_page *d_page, *next;
41413 enum pool_type type;
41414 bool is_cached = false;
41415- unsigned count = 0, i, npages = 0;
41416+ unsigned long count = 0, i, npages = 0;
41417 unsigned long irq_flags;
41418
41419 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41420@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41421 static unsigned start_pool;
41422 unsigned idx = 0;
41423 unsigned pool_offset;
41424- unsigned shrink_pages = sc->nr_to_scan;
41425+ unsigned long shrink_pages = sc->nr_to_scan;
41426 struct device_pools *p;
41427 unsigned long freed = 0;
41428
41429@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41430 goto out;
41431 pool_offset = ++start_pool % _manager->npools;
41432 list_for_each_entry(p, &_manager->pools, pools) {
41433- unsigned nr_free;
41434+ unsigned long nr_free;
41435
41436 if (!p->dev)
41437 continue;
41438@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41439 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41440 freed += nr_free - shrink_pages;
41441
41442- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41443+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41444 p->pool->dev_name, p->pool->name, current->pid,
41445 nr_free, shrink_pages);
41446 }
41447diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41448index 8cbcb45..a4d9cf7 100644
41449--- a/drivers/gpu/drm/udl/udl_fb.c
41450+++ b/drivers/gpu/drm/udl/udl_fb.c
41451@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41452 fb_deferred_io_cleanup(info);
41453 kfree(info->fbdefio);
41454 info->fbdefio = NULL;
41455- info->fbops->fb_mmap = udl_fb_mmap;
41456 }
41457
41458 pr_warn("released /dev/fb%d user=%d count=%d\n",
41459diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41460index ef8c500..01030c8 100644
41461--- a/drivers/gpu/drm/via/via_drv.h
41462+++ b/drivers/gpu/drm/via/via_drv.h
41463@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41464 typedef uint32_t maskarray_t[5];
41465
41466 typedef struct drm_via_irq {
41467- atomic_t irq_received;
41468+ atomic_unchecked_t irq_received;
41469 uint32_t pending_mask;
41470 uint32_t enable_mask;
41471 wait_queue_head_t irq_queue;
41472@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41473 struct timeval last_vblank;
41474 int last_vblank_valid;
41475 unsigned usec_per_vblank;
41476- atomic_t vbl_received;
41477+ atomic_unchecked_t vbl_received;
41478 drm_via_state_t hc_state;
41479 char pci_buf[VIA_PCI_BUF_SIZE];
41480 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41481diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41482index 1319433..a993b0c 100644
41483--- a/drivers/gpu/drm/via/via_irq.c
41484+++ b/drivers/gpu/drm/via/via_irq.c
41485@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41486 if (crtc != 0)
41487 return 0;
41488
41489- return atomic_read(&dev_priv->vbl_received);
41490+ return atomic_read_unchecked(&dev_priv->vbl_received);
41491 }
41492
41493 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41494@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41495
41496 status = VIA_READ(VIA_REG_INTERRUPT);
41497 if (status & VIA_IRQ_VBLANK_PENDING) {
41498- atomic_inc(&dev_priv->vbl_received);
41499- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41500+ atomic_inc_unchecked(&dev_priv->vbl_received);
41501+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41502 do_gettimeofday(&cur_vblank);
41503 if (dev_priv->last_vblank_valid) {
41504 dev_priv->usec_per_vblank =
41505@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41506 dev_priv->last_vblank = cur_vblank;
41507 dev_priv->last_vblank_valid = 1;
41508 }
41509- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41510+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41511 DRM_DEBUG("US per vblank is: %u\n",
41512 dev_priv->usec_per_vblank);
41513 }
41514@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41515
41516 for (i = 0; i < dev_priv->num_irqs; ++i) {
41517 if (status & cur_irq->pending_mask) {
41518- atomic_inc(&cur_irq->irq_received);
41519+ atomic_inc_unchecked(&cur_irq->irq_received);
41520 wake_up(&cur_irq->irq_queue);
41521 handled = 1;
41522 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41523@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41524 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41525 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41526 masks[irq][4]));
41527- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41528+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41529 } else {
41530 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41531 (((cur_irq_sequence =
41532- atomic_read(&cur_irq->irq_received)) -
41533+ atomic_read_unchecked(&cur_irq->irq_received)) -
41534 *sequence) <= (1 << 23)));
41535 }
41536 *sequence = cur_irq_sequence;
41537@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41538 }
41539
41540 for (i = 0; i < dev_priv->num_irqs; ++i) {
41541- atomic_set(&cur_irq->irq_received, 0);
41542+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41543 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41544 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41545 init_waitqueue_head(&cur_irq->irq_queue);
41546@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41547 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41548 case VIA_IRQ_RELATIVE:
41549 irqwait->request.sequence +=
41550- atomic_read(&cur_irq->irq_received);
41551+ atomic_read_unchecked(&cur_irq->irq_received);
41552 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41553 case VIA_IRQ_ABSOLUTE:
41554 break;
41555diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41556index d26a6da..5fa41ed 100644
41557--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41558+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41559@@ -447,7 +447,7 @@ struct vmw_private {
41560 * Fencing and IRQs.
41561 */
41562
41563- atomic_t marker_seq;
41564+ atomic_unchecked_t marker_seq;
41565 wait_queue_head_t fence_queue;
41566 wait_queue_head_t fifo_queue;
41567 spinlock_t waiter_lock;
41568diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41569index 39f2b03..d1b0a64 100644
41570--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41571+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41572@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41573 (unsigned int) min,
41574 (unsigned int) fifo->capabilities);
41575
41576- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41577+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41578 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41579 vmw_marker_queue_init(&fifo->marker_queue);
41580 return vmw_fifo_send_fence(dev_priv, &dummy);
41581@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41582 if (reserveable)
41583 iowrite32(bytes, fifo_mem +
41584 SVGA_FIFO_RESERVED);
41585- return fifo_mem + (next_cmd >> 2);
41586+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41587 } else {
41588 need_bounce = true;
41589 }
41590@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41591
41592 fm = vmw_fifo_reserve(dev_priv, bytes);
41593 if (unlikely(fm == NULL)) {
41594- *seqno = atomic_read(&dev_priv->marker_seq);
41595+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41596 ret = -ENOMEM;
41597 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41598 false, 3*HZ);
41599@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41600 }
41601
41602 do {
41603- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41604+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41605 } while (*seqno == 0);
41606
41607 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41608diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41609index 170b61b..fec7348 100644
41610--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41611+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41612@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41613 }
41614
41615 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41616- vmw_gmrid_man_init,
41617- vmw_gmrid_man_takedown,
41618- vmw_gmrid_man_get_node,
41619- vmw_gmrid_man_put_node,
41620- vmw_gmrid_man_debug
41621+ .init = vmw_gmrid_man_init,
41622+ .takedown = vmw_gmrid_man_takedown,
41623+ .get_node = vmw_gmrid_man_get_node,
41624+ .put_node = vmw_gmrid_man_put_node,
41625+ .debug = vmw_gmrid_man_debug
41626 };
41627diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41628index 69c8ce2..cacb0ab 100644
41629--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41630+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41631@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41632 int ret;
41633
41634 num_clips = arg->num_clips;
41635- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41636+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41637
41638 if (unlikely(num_clips == 0))
41639 return 0;
41640@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41641 int ret;
41642
41643 num_clips = arg->num_clips;
41644- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41645+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41646
41647 if (unlikely(num_clips == 0))
41648 return 0;
41649diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41650index 9fe9827..0aa2fc0 100644
41651--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41652+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41653@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41654 * emitted. Then the fence is stale and signaled.
41655 */
41656
41657- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41658+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41659 > VMW_FENCE_WRAP);
41660
41661 return ret;
41662@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41663
41664 if (fifo_idle)
41665 down_read(&fifo_state->rwsem);
41666- signal_seq = atomic_read(&dev_priv->marker_seq);
41667+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41668 ret = 0;
41669
41670 for (;;) {
41671diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41672index efd1ffd..0ae13ca 100644
41673--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41674+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41675@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41676 while (!vmw_lag_lt(queue, us)) {
41677 spin_lock(&queue->lock);
41678 if (list_empty(&queue->head))
41679- seqno = atomic_read(&dev_priv->marker_seq);
41680+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41681 else {
41682 marker = list_first_entry(&queue->head,
41683 struct vmw_marker, head);
41684diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41685index 37ac7b5..d52a5c9 100644
41686--- a/drivers/gpu/vga/vga_switcheroo.c
41687+++ b/drivers/gpu/vga/vga_switcheroo.c
41688@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41689
41690 /* this version is for the case where the power switch is separate
41691 to the device being powered down. */
41692-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41693+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41694 {
41695 /* copy over all the bus versions */
41696 if (dev->bus && dev->bus->pm) {
41697@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41698 return ret;
41699 }
41700
41701-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41702+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41703 {
41704 /* copy over all the bus versions */
41705 if (dev->bus && dev->bus->pm) {
41706diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41707index 8b63879..a5a5e72 100644
41708--- a/drivers/hid/hid-core.c
41709+++ b/drivers/hid/hid-core.c
41710@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41711
41712 int hid_add_device(struct hid_device *hdev)
41713 {
41714- static atomic_t id = ATOMIC_INIT(0);
41715+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41716 int ret;
41717
41718 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41719@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
41720 /* XXX hack, any other cleaner solution after the driver core
41721 * is converted to allow more than 20 bytes as the device name? */
41722 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41723- hdev->vendor, hdev->product, atomic_inc_return(&id));
41724+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41725
41726 hid_debug_register(hdev, dev_name(&hdev->dev));
41727 ret = device_add(&hdev->dev);
41728diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
41729index 5bc6d80..e47b55a 100644
41730--- a/drivers/hid/hid-logitech-dj.c
41731+++ b/drivers/hid/hid-logitech-dj.c
41732@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
41733 * case we forward it to the correct hid device (via hid_input_report()
41734 * ) and return 1 so hid-core does not anything else with it.
41735 */
41736+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41737+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41738+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
41739+ __func__, dj_report->device_index);
41740+ return false;
41741+ }
41742
41743 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41744 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41745diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41746index c13fb5b..55a3802 100644
41747--- a/drivers/hid/hid-wiimote-debug.c
41748+++ b/drivers/hid/hid-wiimote-debug.c
41749@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41750 else if (size == 0)
41751 return -EIO;
41752
41753- if (copy_to_user(u, buf, size))
41754+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41755 return -EFAULT;
41756
41757 *off += size;
41758diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41759index 433f72a..2926005 100644
41760--- a/drivers/hv/channel.c
41761+++ b/drivers/hv/channel.c
41762@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41763 unsigned long flags;
41764 int ret = 0;
41765
41766- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41767- atomic_inc(&vmbus_connection.next_gpadl_handle);
41768+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41769+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41770
41771 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41772 if (ret)
41773diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41774index 3e4235c..877d0e5 100644
41775--- a/drivers/hv/hv.c
41776+++ b/drivers/hv/hv.c
41777@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41778 u64 output_address = (output) ? virt_to_phys(output) : 0;
41779 u32 output_address_hi = output_address >> 32;
41780 u32 output_address_lo = output_address & 0xFFFFFFFF;
41781- void *hypercall_page = hv_context.hypercall_page;
41782+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
41783
41784 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
41785 "=a"(hv_status_lo) : "d" (control_hi),
41786@@ -156,7 +156,7 @@ int hv_init(void)
41787 /* See if the hypercall page is already set */
41788 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
41789
41790- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
41791+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
41792
41793 if (!virtaddr)
41794 goto cleanup;
41795diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
41796index b958ded..b2452bb 100644
41797--- a/drivers/hv/hv_balloon.c
41798+++ b/drivers/hv/hv_balloon.c
41799@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
41800
41801 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
41802 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
41803-static atomic_t trans_id = ATOMIC_INIT(0);
41804+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
41805
41806 static int dm_ring_size = (5 * PAGE_SIZE);
41807
41808@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
41809 pr_info("Memory hot add failed\n");
41810
41811 dm->state = DM_INITIALIZED;
41812- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41813+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41814 vmbus_sendpacket(dm->dev->channel, &resp,
41815 sizeof(struct dm_hot_add_response),
41816 (unsigned long)NULL,
41817@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
41818 memset(&status, 0, sizeof(struct dm_status));
41819 status.hdr.type = DM_STATUS_REPORT;
41820 status.hdr.size = sizeof(struct dm_status);
41821- status.hdr.trans_id = atomic_inc_return(&trans_id);
41822+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41823
41824 /*
41825 * The host expects the guest to report free memory.
41826@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
41827 * send the status. This can happen if we were interrupted
41828 * after we picked our transaction ID.
41829 */
41830- if (status.hdr.trans_id != atomic_read(&trans_id))
41831+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
41832 return;
41833
41834 /*
41835@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
41836 */
41837
41838 do {
41839- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
41840+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41841 ret = vmbus_sendpacket(dm_device.dev->channel,
41842 bl_resp,
41843 bl_resp->hdr.size,
41844@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
41845
41846 memset(&resp, 0, sizeof(struct dm_unballoon_response));
41847 resp.hdr.type = DM_UNBALLOON_RESPONSE;
41848- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41849+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41850 resp.hdr.size = sizeof(struct dm_unballoon_response);
41851
41852 vmbus_sendpacket(dm_device.dev->channel, &resp,
41853@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
41854 memset(&version_req, 0, sizeof(struct dm_version_request));
41855 version_req.hdr.type = DM_VERSION_REQUEST;
41856 version_req.hdr.size = sizeof(struct dm_version_request);
41857- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41858+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41859 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
41860 version_req.is_last_attempt = 1;
41861
41862@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
41863 memset(&version_req, 0, sizeof(struct dm_version_request));
41864 version_req.hdr.type = DM_VERSION_REQUEST;
41865 version_req.hdr.size = sizeof(struct dm_version_request);
41866- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41867+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41868 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
41869 version_req.is_last_attempt = 0;
41870
41871@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
41872 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
41873 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
41874 cap_msg.hdr.size = sizeof(struct dm_capabilities);
41875- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
41876+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41877
41878 cap_msg.caps.cap_bits.balloon = 1;
41879 cap_msg.caps.cap_bits.hot_add = 1;
41880diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
41881index c386d8d..d6004c4 100644
41882--- a/drivers/hv/hyperv_vmbus.h
41883+++ b/drivers/hv/hyperv_vmbus.h
41884@@ -611,7 +611,7 @@ enum vmbus_connect_state {
41885 struct vmbus_connection {
41886 enum vmbus_connect_state conn_state;
41887
41888- atomic_t next_gpadl_handle;
41889+ atomic_unchecked_t next_gpadl_handle;
41890
41891 /*
41892 * Represents channel interrupts. Each bit position represents a
41893diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
41894index 4d6b269..2e23b86 100644
41895--- a/drivers/hv/vmbus_drv.c
41896+++ b/drivers/hv/vmbus_drv.c
41897@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
41898 {
41899 int ret = 0;
41900
41901- static atomic_t device_num = ATOMIC_INIT(0);
41902+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41903
41904 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
41905- atomic_inc_return(&device_num));
41906+ atomic_inc_return_unchecked(&device_num));
41907
41908 child_device_obj->device.bus = &hv_bus;
41909 child_device_obj->device.parent = &hv_acpi_dev->dev;
41910diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
41911index 579bdf9..75118b5 100644
41912--- a/drivers/hwmon/acpi_power_meter.c
41913+++ b/drivers/hwmon/acpi_power_meter.c
41914@@ -116,7 +116,7 @@ struct sensor_template {
41915 struct device_attribute *devattr,
41916 const char *buf, size_t count);
41917 int index;
41918-};
41919+} __do_const;
41920
41921 /* Averaging interval */
41922 static int update_avg_interval(struct acpi_power_meter_resource *resource)
41923@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
41924 struct sensor_template *attrs)
41925 {
41926 struct device *dev = &resource->acpi_dev->dev;
41927- struct sensor_device_attribute *sensors =
41928+ sensor_device_attribute_no_const *sensors =
41929 &resource->sensors[resource->num_sensors];
41930 int res = 0;
41931
41932diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
41933index 0af63da..05a183a 100644
41934--- a/drivers/hwmon/applesmc.c
41935+++ b/drivers/hwmon/applesmc.c
41936@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
41937 {
41938 struct applesmc_node_group *grp;
41939 struct applesmc_dev_attr *node;
41940- struct attribute *attr;
41941+ attribute_no_const *attr;
41942 int ret, i;
41943
41944 for (grp = groups; grp->format; grp++) {
41945diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
41946index cccef87..06ce8ec 100644
41947--- a/drivers/hwmon/asus_atk0110.c
41948+++ b/drivers/hwmon/asus_atk0110.c
41949@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
41950 struct atk_sensor_data {
41951 struct list_head list;
41952 struct atk_data *data;
41953- struct device_attribute label_attr;
41954- struct device_attribute input_attr;
41955- struct device_attribute limit1_attr;
41956- struct device_attribute limit2_attr;
41957+ device_attribute_no_const label_attr;
41958+ device_attribute_no_const input_attr;
41959+ device_attribute_no_const limit1_attr;
41960+ device_attribute_no_const limit2_attr;
41961 char label_attr_name[ATTR_NAME_SIZE];
41962 char input_attr_name[ATTR_NAME_SIZE];
41963 char limit1_attr_name[ATTR_NAME_SIZE];
41964@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
41965 static struct device_attribute atk_name_attr =
41966 __ATTR(name, 0444, atk_name_show, NULL);
41967
41968-static void atk_init_attribute(struct device_attribute *attr, char *name,
41969+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
41970 sysfs_show_func show)
41971 {
41972 sysfs_attr_init(&attr->attr);
41973diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
41974index 5b7fec8..05c957a 100644
41975--- a/drivers/hwmon/coretemp.c
41976+++ b/drivers/hwmon/coretemp.c
41977@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
41978 return NOTIFY_OK;
41979 }
41980
41981-static struct notifier_block coretemp_cpu_notifier __refdata = {
41982+static struct notifier_block coretemp_cpu_notifier = {
41983 .notifier_call = coretemp_cpu_callback,
41984 };
41985
41986diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
41987index 7a8a6fb..015c1fd 100644
41988--- a/drivers/hwmon/ibmaem.c
41989+++ b/drivers/hwmon/ibmaem.c
41990@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
41991 struct aem_rw_sensor_template *rw)
41992 {
41993 struct device *dev = &data->pdev->dev;
41994- struct sensor_device_attribute *sensors = data->sensors;
41995+ sensor_device_attribute_no_const *sensors = data->sensors;
41996 int err;
41997
41998 /* Set up read-only sensors */
41999diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42000index 17ae2eb..21b71dd 100644
42001--- a/drivers/hwmon/iio_hwmon.c
42002+++ b/drivers/hwmon/iio_hwmon.c
42003@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42004 {
42005 struct device *dev = &pdev->dev;
42006 struct iio_hwmon_state *st;
42007- struct sensor_device_attribute *a;
42008+ sensor_device_attribute_no_const *a;
42009 int ret, i;
42010 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42011 enum iio_chan_type type;
42012diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42013index f3830db..9f4d6d5 100644
42014--- a/drivers/hwmon/nct6683.c
42015+++ b/drivers/hwmon/nct6683.c
42016@@ -397,11 +397,11 @@ static struct attribute_group *
42017 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42018 int repeat)
42019 {
42020- struct sensor_device_attribute_2 *a2;
42021- struct sensor_device_attribute *a;
42022+ sensor_device_attribute_2_no_const *a2;
42023+ sensor_device_attribute_no_const *a;
42024 struct sensor_device_template **t;
42025 struct sensor_device_attr_u *su;
42026- struct attribute_group *group;
42027+ attribute_group_no_const *group;
42028 struct attribute **attrs;
42029 int i, j, count;
42030
42031diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42032index 1be4117..88ae1e1 100644
42033--- a/drivers/hwmon/nct6775.c
42034+++ b/drivers/hwmon/nct6775.c
42035@@ -952,10 +952,10 @@ static struct attribute_group *
42036 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42037 int repeat)
42038 {
42039- struct attribute_group *group;
42040+ attribute_group_no_const *group;
42041 struct sensor_device_attr_u *su;
42042- struct sensor_device_attribute *a;
42043- struct sensor_device_attribute_2 *a2;
42044+ sensor_device_attribute_no_const *a;
42045+ sensor_device_attribute_2_no_const *a2;
42046 struct attribute **attrs;
42047 struct sensor_device_template **t;
42048 int i, count;
42049diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42050index f2e47c7..45d7941 100644
42051--- a/drivers/hwmon/pmbus/pmbus_core.c
42052+++ b/drivers/hwmon/pmbus/pmbus_core.c
42053@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42054 return 0;
42055 }
42056
42057-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42058+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42059 const char *name,
42060 umode_t mode,
42061 ssize_t (*show)(struct device *dev,
42062@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42063 dev_attr->store = store;
42064 }
42065
42066-static void pmbus_attr_init(struct sensor_device_attribute *a,
42067+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42068 const char *name,
42069 umode_t mode,
42070 ssize_t (*show)(struct device *dev,
42071@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42072 u16 reg, u8 mask)
42073 {
42074 struct pmbus_boolean *boolean;
42075- struct sensor_device_attribute *a;
42076+ sensor_device_attribute_no_const *a;
42077
42078 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42079 if (!boolean)
42080@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42081 bool update, bool readonly)
42082 {
42083 struct pmbus_sensor *sensor;
42084- struct device_attribute *a;
42085+ device_attribute_no_const *a;
42086
42087 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42088 if (!sensor)
42089@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42090 const char *lstring, int index)
42091 {
42092 struct pmbus_label *label;
42093- struct device_attribute *a;
42094+ device_attribute_no_const *a;
42095
42096 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42097 if (!label)
42098diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42099index d4f0935..7420593 100644
42100--- a/drivers/hwmon/sht15.c
42101+++ b/drivers/hwmon/sht15.c
42102@@ -169,7 +169,7 @@ struct sht15_data {
42103 int supply_uv;
42104 bool supply_uv_valid;
42105 struct work_struct update_supply_work;
42106- atomic_t interrupt_handled;
42107+ atomic_unchecked_t interrupt_handled;
42108 };
42109
42110 /**
42111@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42112 ret = gpio_direction_input(data->pdata->gpio_data);
42113 if (ret)
42114 return ret;
42115- atomic_set(&data->interrupt_handled, 0);
42116+ atomic_set_unchecked(&data->interrupt_handled, 0);
42117
42118 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42119 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42120 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42121 /* Only relevant if the interrupt hasn't occurred. */
42122- if (!atomic_read(&data->interrupt_handled))
42123+ if (!atomic_read_unchecked(&data->interrupt_handled))
42124 schedule_work(&data->read_work);
42125 }
42126 ret = wait_event_timeout(data->wait_queue,
42127@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42128
42129 /* First disable the interrupt */
42130 disable_irq_nosync(irq);
42131- atomic_inc(&data->interrupt_handled);
42132+ atomic_inc_unchecked(&data->interrupt_handled);
42133 /* Then schedule a reading work struct */
42134 if (data->state != SHT15_READING_NOTHING)
42135 schedule_work(&data->read_work);
42136@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42137 * If not, then start the interrupt again - care here as could
42138 * have gone low in meantime so verify it hasn't!
42139 */
42140- atomic_set(&data->interrupt_handled, 0);
42141+ atomic_set_unchecked(&data->interrupt_handled, 0);
42142 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42143 /* If still not occurred or another handler was scheduled */
42144 if (gpio_get_value(data->pdata->gpio_data)
42145- || atomic_read(&data->interrupt_handled))
42146+ || atomic_read_unchecked(&data->interrupt_handled))
42147 return;
42148 }
42149
42150diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42151index ac91c07..8e69663 100644
42152--- a/drivers/hwmon/via-cputemp.c
42153+++ b/drivers/hwmon/via-cputemp.c
42154@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42155 return NOTIFY_OK;
42156 }
42157
42158-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42159+static struct notifier_block via_cputemp_cpu_notifier = {
42160 .notifier_call = via_cputemp_cpu_callback,
42161 };
42162
42163diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42164index 65e3240..e6c511d 100644
42165--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42166+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42167@@ -39,7 +39,7 @@
42168 extern struct i2c_adapter amd756_smbus;
42169
42170 static struct i2c_adapter *s4882_adapter;
42171-static struct i2c_algorithm *s4882_algo;
42172+static i2c_algorithm_no_const *s4882_algo;
42173
42174 /* Wrapper access functions for multiplexed SMBus */
42175 static DEFINE_MUTEX(amd756_lock);
42176diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42177index b19a310..d6eece0 100644
42178--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42179+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42180@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42181 /* usb layer */
42182
42183 /* Send command to device, and get response. */
42184-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42185+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42186 {
42187 int ret = 0;
42188 int actual;
42189diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42190index 88eda09..cf40434 100644
42191--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42192+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42193@@ -37,7 +37,7 @@
42194 extern struct i2c_adapter *nforce2_smbus;
42195
42196 static struct i2c_adapter *s4985_adapter;
42197-static struct i2c_algorithm *s4985_algo;
42198+static i2c_algorithm_no_const *s4985_algo;
42199
42200 /* Wrapper access functions for multiplexed SMBus */
42201 static DEFINE_MUTEX(nforce2_lock);
42202diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42203index 71c7a39..71dd3e0 100644
42204--- a/drivers/i2c/i2c-dev.c
42205+++ b/drivers/i2c/i2c-dev.c
42206@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42207 break;
42208 }
42209
42210- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42211+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42212 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42213 if (IS_ERR(rdwr_pa[i].buf)) {
42214 res = PTR_ERR(rdwr_pa[i].buf);
42215diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42216index 0b510ba..4fbb5085 100644
42217--- a/drivers/ide/ide-cd.c
42218+++ b/drivers/ide/ide-cd.c
42219@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42220 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42221 if ((unsigned long)buf & alignment
42222 || blk_rq_bytes(rq) & q->dma_pad_mask
42223- || object_is_on_stack(buf))
42224+ || object_starts_on_stack(buf))
42225 drive->dma = 0;
42226 }
42227 }
42228diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42229index af3e76d..96dfe5e 100644
42230--- a/drivers/iio/industrialio-core.c
42231+++ b/drivers/iio/industrialio-core.c
42232@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42233 }
42234
42235 static
42236-int __iio_device_attr_init(struct device_attribute *dev_attr,
42237+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42238 const char *postfix,
42239 struct iio_chan_spec const *chan,
42240 ssize_t (*readfunc)(struct device *dev,
42241diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42242index e28a494..f7c2671 100644
42243--- a/drivers/infiniband/core/cm.c
42244+++ b/drivers/infiniband/core/cm.c
42245@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42246
42247 struct cm_counter_group {
42248 struct kobject obj;
42249- atomic_long_t counter[CM_ATTR_COUNT];
42250+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42251 };
42252
42253 struct cm_counter_attribute {
42254@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42255 struct ib_mad_send_buf *msg = NULL;
42256 int ret;
42257
42258- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42259+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42260 counter[CM_REQ_COUNTER]);
42261
42262 /* Quick state check to discard duplicate REQs. */
42263@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42264 if (!cm_id_priv)
42265 return;
42266
42267- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42268+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42269 counter[CM_REP_COUNTER]);
42270 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42271 if (ret)
42272@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42273 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42274 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42275 spin_unlock_irq(&cm_id_priv->lock);
42276- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42277+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42278 counter[CM_RTU_COUNTER]);
42279 goto out;
42280 }
42281@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42282 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42283 dreq_msg->local_comm_id);
42284 if (!cm_id_priv) {
42285- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42286+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42287 counter[CM_DREQ_COUNTER]);
42288 cm_issue_drep(work->port, work->mad_recv_wc);
42289 return -EINVAL;
42290@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42291 case IB_CM_MRA_REP_RCVD:
42292 break;
42293 case IB_CM_TIMEWAIT:
42294- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42295+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42296 counter[CM_DREQ_COUNTER]);
42297 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42298 goto unlock;
42299@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42300 cm_free_msg(msg);
42301 goto deref;
42302 case IB_CM_DREQ_RCVD:
42303- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42304+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42305 counter[CM_DREQ_COUNTER]);
42306 goto unlock;
42307 default:
42308@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42309 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42310 cm_id_priv->msg, timeout)) {
42311 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42312- atomic_long_inc(&work->port->
42313+ atomic_long_inc_unchecked(&work->port->
42314 counter_group[CM_RECV_DUPLICATES].
42315 counter[CM_MRA_COUNTER]);
42316 goto out;
42317@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42318 break;
42319 case IB_CM_MRA_REQ_RCVD:
42320 case IB_CM_MRA_REP_RCVD:
42321- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42322+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42323 counter[CM_MRA_COUNTER]);
42324 /* fall through */
42325 default:
42326@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42327 case IB_CM_LAP_IDLE:
42328 break;
42329 case IB_CM_MRA_LAP_SENT:
42330- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42331+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42332 counter[CM_LAP_COUNTER]);
42333 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42334 goto unlock;
42335@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42336 cm_free_msg(msg);
42337 goto deref;
42338 case IB_CM_LAP_RCVD:
42339- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42340+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42341 counter[CM_LAP_COUNTER]);
42342 goto unlock;
42343 default:
42344@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42345 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42346 if (cur_cm_id_priv) {
42347 spin_unlock_irq(&cm.lock);
42348- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42349+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42350 counter[CM_SIDR_REQ_COUNTER]);
42351 goto out; /* Duplicate message. */
42352 }
42353@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42354 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42355 msg->retries = 1;
42356
42357- atomic_long_add(1 + msg->retries,
42358+ atomic_long_add_unchecked(1 + msg->retries,
42359 &port->counter_group[CM_XMIT].counter[attr_index]);
42360 if (msg->retries)
42361- atomic_long_add(msg->retries,
42362+ atomic_long_add_unchecked(msg->retries,
42363 &port->counter_group[CM_XMIT_RETRIES].
42364 counter[attr_index]);
42365
42366@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42367 }
42368
42369 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42370- atomic_long_inc(&port->counter_group[CM_RECV].
42371+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42372 counter[attr_id - CM_ATTR_ID_OFFSET]);
42373
42374 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42375@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42376 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42377
42378 return sprintf(buf, "%ld\n",
42379- atomic_long_read(&group->counter[cm_attr->index]));
42380+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42381 }
42382
42383 static const struct sysfs_ops cm_counter_ops = {
42384diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42385index 9f5ad7c..588cd84 100644
42386--- a/drivers/infiniband/core/fmr_pool.c
42387+++ b/drivers/infiniband/core/fmr_pool.c
42388@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42389
42390 struct task_struct *thread;
42391
42392- atomic_t req_ser;
42393- atomic_t flush_ser;
42394+ atomic_unchecked_t req_ser;
42395+ atomic_unchecked_t flush_ser;
42396
42397 wait_queue_head_t force_wait;
42398 };
42399@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42400 struct ib_fmr_pool *pool = pool_ptr;
42401
42402 do {
42403- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42404+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42405 ib_fmr_batch_release(pool);
42406
42407- atomic_inc(&pool->flush_ser);
42408+ atomic_inc_unchecked(&pool->flush_ser);
42409 wake_up_interruptible(&pool->force_wait);
42410
42411 if (pool->flush_function)
42412@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42413 }
42414
42415 set_current_state(TASK_INTERRUPTIBLE);
42416- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42417+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42418 !kthread_should_stop())
42419 schedule();
42420 __set_current_state(TASK_RUNNING);
42421@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42422 pool->dirty_watermark = params->dirty_watermark;
42423 pool->dirty_len = 0;
42424 spin_lock_init(&pool->pool_lock);
42425- atomic_set(&pool->req_ser, 0);
42426- atomic_set(&pool->flush_ser, 0);
42427+ atomic_set_unchecked(&pool->req_ser, 0);
42428+ atomic_set_unchecked(&pool->flush_ser, 0);
42429 init_waitqueue_head(&pool->force_wait);
42430
42431 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42432@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42433 }
42434 spin_unlock_irq(&pool->pool_lock);
42435
42436- serial = atomic_inc_return(&pool->req_ser);
42437+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42438 wake_up_process(pool->thread);
42439
42440 if (wait_event_interruptible(pool->force_wait,
42441- atomic_read(&pool->flush_ser) - serial >= 0))
42442+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42443 return -EINTR;
42444
42445 return 0;
42446@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42447 } else {
42448 list_add_tail(&fmr->list, &pool->dirty_list);
42449 if (++pool->dirty_len >= pool->dirty_watermark) {
42450- atomic_inc(&pool->req_ser);
42451+ atomic_inc_unchecked(&pool->req_ser);
42452 wake_up_process(pool->thread);
42453 }
42454 }
42455diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42456index cb43c22..2e12dd7 100644
42457--- a/drivers/infiniband/hw/cxgb4/mem.c
42458+++ b/drivers/infiniband/hw/cxgb4/mem.c
42459@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42460 int err;
42461 struct fw_ri_tpte tpt;
42462 u32 stag_idx;
42463- static atomic_t key;
42464+ static atomic_unchecked_t key;
42465
42466 if (c4iw_fatal_error(rdev))
42467 return -EIO;
42468@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42469 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42470 rdev->stats.stag.max = rdev->stats.stag.cur;
42471 mutex_unlock(&rdev->stats.lock);
42472- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42473+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42474 }
42475 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42476 __func__, stag_state, type, pdid, stag_idx);
42477diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42478index 79b3dbc..96e5fcc 100644
42479--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42480+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42481@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42482 struct ib_atomic_eth *ateth;
42483 struct ipath_ack_entry *e;
42484 u64 vaddr;
42485- atomic64_t *maddr;
42486+ atomic64_unchecked_t *maddr;
42487 u64 sdata;
42488 u32 rkey;
42489 u8 next;
42490@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42491 IB_ACCESS_REMOTE_ATOMIC)))
42492 goto nack_acc_unlck;
42493 /* Perform atomic OP and save result. */
42494- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42495+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42496 sdata = be64_to_cpu(ateth->swap_data);
42497 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42498 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42499- (u64) atomic64_add_return(sdata, maddr) - sdata :
42500+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42501 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42502 be64_to_cpu(ateth->compare_data),
42503 sdata);
42504diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42505index 1f95bba..9530f87 100644
42506--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42507+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42508@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42509 unsigned long flags;
42510 struct ib_wc wc;
42511 u64 sdata;
42512- atomic64_t *maddr;
42513+ atomic64_unchecked_t *maddr;
42514 enum ib_wc_status send_status;
42515
42516 /*
42517@@ -382,11 +382,11 @@ again:
42518 IB_ACCESS_REMOTE_ATOMIC)))
42519 goto acc_err;
42520 /* Perform atomic OP and save result. */
42521- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42522+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42523 sdata = wqe->wr.wr.atomic.compare_add;
42524 *(u64 *) sqp->s_sge.sge.vaddr =
42525 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42526- (u64) atomic64_add_return(sdata, maddr) - sdata :
42527+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42528 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42529 sdata, wqe->wr.wr.atomic.swap);
42530 goto send_comp;
42531diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42532index 82a7dd8..8fb6ba6 100644
42533--- a/drivers/infiniband/hw/mlx4/mad.c
42534+++ b/drivers/infiniband/hw/mlx4/mad.c
42535@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42536
42537 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42538 {
42539- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42540+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42541 cpu_to_be64(0xff00000000000000LL);
42542 }
42543
42544diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42545index ed327e6..ca1739e0 100644
42546--- a/drivers/infiniband/hw/mlx4/mcg.c
42547+++ b/drivers/infiniband/hw/mlx4/mcg.c
42548@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42549 {
42550 char name[20];
42551
42552- atomic_set(&ctx->tid, 0);
42553+ atomic_set_unchecked(&ctx->tid, 0);
42554 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42555 ctx->mcg_wq = create_singlethread_workqueue(name);
42556 if (!ctx->mcg_wq)
42557diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42558index 6eb743f..a7b0f6d 100644
42559--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42560+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42561@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
42562 struct list_head mcg_mgid0_list;
42563 struct workqueue_struct *mcg_wq;
42564 struct mlx4_ib_demux_pv_ctx **tun;
42565- atomic_t tid;
42566+ atomic_unchecked_t tid;
42567 int flushing; /* flushing the work queue */
42568 };
42569
42570diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42571index 9d3e5c1..6f166df 100644
42572--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42573+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42574@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42575 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42576 }
42577
42578-int mthca_QUERY_FW(struct mthca_dev *dev)
42579+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42580 {
42581 struct mthca_mailbox *mailbox;
42582 u32 *outbox;
42583@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42584 CMD_TIME_CLASS_B);
42585 }
42586
42587-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42588+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42589 int num_mtt)
42590 {
42591 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42592@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42593 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42594 }
42595
42596-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42597+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42598 int eq_num)
42599 {
42600 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42601@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42602 CMD_TIME_CLASS_B);
42603 }
42604
42605-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42606+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42607 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42608 void *in_mad, void *response_mad)
42609 {
42610diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42611index ded76c1..0cf0a08 100644
42612--- a/drivers/infiniband/hw/mthca/mthca_main.c
42613+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42614@@ -692,7 +692,7 @@ err_close:
42615 return err;
42616 }
42617
42618-static int mthca_setup_hca(struct mthca_dev *dev)
42619+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42620 {
42621 int err;
42622
42623diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42624index ed9a989..6aa5dc2 100644
42625--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42626+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42627@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42628 * through the bitmaps)
42629 */
42630
42631-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42632+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42633 {
42634 int o;
42635 int m;
42636@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42637 return key;
42638 }
42639
42640-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42641+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42642 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42643 {
42644 struct mthca_mailbox *mailbox;
42645@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42646 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42647 }
42648
42649-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42650+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42651 u64 *buffer_list, int buffer_size_shift,
42652 int list_len, u64 iova, u64 total_size,
42653 u32 access, struct mthca_mr *mr)
42654diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42655index 415f8e1..e34214e 100644
42656--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42657+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42658@@ -764,7 +764,7 @@ unlock:
42659 return 0;
42660 }
42661
42662-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42663+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42664 {
42665 struct mthca_dev *dev = to_mdev(ibcq->device);
42666 struct mthca_cq *cq = to_mcq(ibcq);
42667diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42668index 3b2a6dc..bce26ff 100644
42669--- a/drivers/infiniband/hw/nes/nes.c
42670+++ b/drivers/infiniband/hw/nes/nes.c
42671@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42672 LIST_HEAD(nes_adapter_list);
42673 static LIST_HEAD(nes_dev_list);
42674
42675-atomic_t qps_destroyed;
42676+atomic_unchecked_t qps_destroyed;
42677
42678 static unsigned int ee_flsh_adapter;
42679 static unsigned int sysfs_nonidx_addr;
42680@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42681 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42682 struct nes_adapter *nesadapter = nesdev->nesadapter;
42683
42684- atomic_inc(&qps_destroyed);
42685+ atomic_inc_unchecked(&qps_destroyed);
42686
42687 /* Free the control structures */
42688
42689diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42690index bd9d132..70d84f4 100644
42691--- a/drivers/infiniband/hw/nes/nes.h
42692+++ b/drivers/infiniband/hw/nes/nes.h
42693@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
42694 extern unsigned int wqm_quanta;
42695 extern struct list_head nes_adapter_list;
42696
42697-extern atomic_t cm_connects;
42698-extern atomic_t cm_accepts;
42699-extern atomic_t cm_disconnects;
42700-extern atomic_t cm_closes;
42701-extern atomic_t cm_connecteds;
42702-extern atomic_t cm_connect_reqs;
42703-extern atomic_t cm_rejects;
42704-extern atomic_t mod_qp_timouts;
42705-extern atomic_t qps_created;
42706-extern atomic_t qps_destroyed;
42707-extern atomic_t sw_qps_destroyed;
42708+extern atomic_unchecked_t cm_connects;
42709+extern atomic_unchecked_t cm_accepts;
42710+extern atomic_unchecked_t cm_disconnects;
42711+extern atomic_unchecked_t cm_closes;
42712+extern atomic_unchecked_t cm_connecteds;
42713+extern atomic_unchecked_t cm_connect_reqs;
42714+extern atomic_unchecked_t cm_rejects;
42715+extern atomic_unchecked_t mod_qp_timouts;
42716+extern atomic_unchecked_t qps_created;
42717+extern atomic_unchecked_t qps_destroyed;
42718+extern atomic_unchecked_t sw_qps_destroyed;
42719 extern u32 mh_detected;
42720 extern u32 mh_pauses_sent;
42721 extern u32 cm_packets_sent;
42722@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
42723 extern u32 cm_packets_received;
42724 extern u32 cm_packets_dropped;
42725 extern u32 cm_packets_retrans;
42726-extern atomic_t cm_listens_created;
42727-extern atomic_t cm_listens_destroyed;
42728+extern atomic_unchecked_t cm_listens_created;
42729+extern atomic_unchecked_t cm_listens_destroyed;
42730 extern u32 cm_backlog_drops;
42731-extern atomic_t cm_loopbacks;
42732-extern atomic_t cm_nodes_created;
42733-extern atomic_t cm_nodes_destroyed;
42734-extern atomic_t cm_accel_dropped_pkts;
42735-extern atomic_t cm_resets_recvd;
42736-extern atomic_t pau_qps_created;
42737-extern atomic_t pau_qps_destroyed;
42738+extern atomic_unchecked_t cm_loopbacks;
42739+extern atomic_unchecked_t cm_nodes_created;
42740+extern atomic_unchecked_t cm_nodes_destroyed;
42741+extern atomic_unchecked_t cm_accel_dropped_pkts;
42742+extern atomic_unchecked_t cm_resets_recvd;
42743+extern atomic_unchecked_t pau_qps_created;
42744+extern atomic_unchecked_t pau_qps_destroyed;
42745
42746 extern u32 int_mod_timer_init;
42747 extern u32 int_mod_cq_depth_256;
42748diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42749index 6f09a72..cf4399d 100644
42750--- a/drivers/infiniband/hw/nes/nes_cm.c
42751+++ b/drivers/infiniband/hw/nes/nes_cm.c
42752@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
42753 u32 cm_packets_retrans;
42754 u32 cm_packets_created;
42755 u32 cm_packets_received;
42756-atomic_t cm_listens_created;
42757-atomic_t cm_listens_destroyed;
42758+atomic_unchecked_t cm_listens_created;
42759+atomic_unchecked_t cm_listens_destroyed;
42760 u32 cm_backlog_drops;
42761-atomic_t cm_loopbacks;
42762-atomic_t cm_nodes_created;
42763-atomic_t cm_nodes_destroyed;
42764-atomic_t cm_accel_dropped_pkts;
42765-atomic_t cm_resets_recvd;
42766+atomic_unchecked_t cm_loopbacks;
42767+atomic_unchecked_t cm_nodes_created;
42768+atomic_unchecked_t cm_nodes_destroyed;
42769+atomic_unchecked_t cm_accel_dropped_pkts;
42770+atomic_unchecked_t cm_resets_recvd;
42771
42772 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
42773 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
42774@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
42775 /* instance of function pointers for client API */
42776 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
42777 static struct nes_cm_ops nes_cm_api = {
42778- mini_cm_accelerated,
42779- mini_cm_listen,
42780- mini_cm_del_listen,
42781- mini_cm_connect,
42782- mini_cm_close,
42783- mini_cm_accept,
42784- mini_cm_reject,
42785- mini_cm_recv_pkt,
42786- mini_cm_dealloc_core,
42787- mini_cm_get,
42788- mini_cm_set
42789+ .accelerated = mini_cm_accelerated,
42790+ .listen = mini_cm_listen,
42791+ .stop_listener = mini_cm_del_listen,
42792+ .connect = mini_cm_connect,
42793+ .close = mini_cm_close,
42794+ .accept = mini_cm_accept,
42795+ .reject = mini_cm_reject,
42796+ .recv_pkt = mini_cm_recv_pkt,
42797+ .destroy_cm_core = mini_cm_dealloc_core,
42798+ .get = mini_cm_get,
42799+ .set = mini_cm_set
42800 };
42801
42802 static struct nes_cm_core *g_cm_core;
42803
42804-atomic_t cm_connects;
42805-atomic_t cm_accepts;
42806-atomic_t cm_disconnects;
42807-atomic_t cm_closes;
42808-atomic_t cm_connecteds;
42809-atomic_t cm_connect_reqs;
42810-atomic_t cm_rejects;
42811+atomic_unchecked_t cm_connects;
42812+atomic_unchecked_t cm_accepts;
42813+atomic_unchecked_t cm_disconnects;
42814+atomic_unchecked_t cm_closes;
42815+atomic_unchecked_t cm_connecteds;
42816+atomic_unchecked_t cm_connect_reqs;
42817+atomic_unchecked_t cm_rejects;
42818
42819 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
42820 {
42821@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
42822 kfree(listener);
42823 listener = NULL;
42824 ret = 0;
42825- atomic_inc(&cm_listens_destroyed);
42826+ atomic_inc_unchecked(&cm_listens_destroyed);
42827 } else {
42828 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
42829 }
42830@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
42831 cm_node->rem_mac);
42832
42833 add_hte_node(cm_core, cm_node);
42834- atomic_inc(&cm_nodes_created);
42835+ atomic_inc_unchecked(&cm_nodes_created);
42836
42837 return cm_node;
42838 }
42839@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
42840 }
42841
42842 atomic_dec(&cm_core->node_cnt);
42843- atomic_inc(&cm_nodes_destroyed);
42844+ atomic_inc_unchecked(&cm_nodes_destroyed);
42845 nesqp = cm_node->nesqp;
42846 if (nesqp) {
42847 nesqp->cm_node = NULL;
42848@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
42849
42850 static void drop_packet(struct sk_buff *skb)
42851 {
42852- atomic_inc(&cm_accel_dropped_pkts);
42853+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42854 dev_kfree_skb_any(skb);
42855 }
42856
42857@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
42858 {
42859
42860 int reset = 0; /* whether to send reset in case of err.. */
42861- atomic_inc(&cm_resets_recvd);
42862+ atomic_inc_unchecked(&cm_resets_recvd);
42863 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
42864 " refcnt=%d\n", cm_node, cm_node->state,
42865 atomic_read(&cm_node->ref_count));
42866@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
42867 rem_ref_cm_node(cm_node->cm_core, cm_node);
42868 return NULL;
42869 }
42870- atomic_inc(&cm_loopbacks);
42871+ atomic_inc_unchecked(&cm_loopbacks);
42872 loopbackremotenode->loopbackpartner = cm_node;
42873 loopbackremotenode->tcp_cntxt.rcv_wscale =
42874 NES_CM_DEFAULT_RCV_WND_SCALE;
42875@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
42876 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
42877 else {
42878 rem_ref_cm_node(cm_core, cm_node);
42879- atomic_inc(&cm_accel_dropped_pkts);
42880+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42881 dev_kfree_skb_any(skb);
42882 }
42883 break;
42884@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42885
42886 if ((cm_id) && (cm_id->event_handler)) {
42887 if (issue_disconn) {
42888- atomic_inc(&cm_disconnects);
42889+ atomic_inc_unchecked(&cm_disconnects);
42890 cm_event.event = IW_CM_EVENT_DISCONNECT;
42891 cm_event.status = disconn_status;
42892 cm_event.local_addr = cm_id->local_addr;
42893@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42894 }
42895
42896 if (issue_close) {
42897- atomic_inc(&cm_closes);
42898+ atomic_inc_unchecked(&cm_closes);
42899 nes_disconnect(nesqp, 1);
42900
42901 cm_id->provider_data = nesqp;
42902@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42903
42904 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
42905 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
42906- atomic_inc(&cm_accepts);
42907+ atomic_inc_unchecked(&cm_accepts);
42908
42909 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
42910 netdev_refcnt_read(nesvnic->netdev));
42911@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
42912 struct nes_cm_core *cm_core;
42913 u8 *start_buff;
42914
42915- atomic_inc(&cm_rejects);
42916+ atomic_inc_unchecked(&cm_rejects);
42917 cm_node = (struct nes_cm_node *)cm_id->provider_data;
42918 loopback = cm_node->loopbackpartner;
42919 cm_core = cm_node->cm_core;
42920@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42921 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
42922 ntohs(laddr->sin_port));
42923
42924- atomic_inc(&cm_connects);
42925+ atomic_inc_unchecked(&cm_connects);
42926 nesqp->active_conn = 1;
42927
42928 /* cache the cm_id in the qp */
42929@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
42930 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
42931 return err;
42932 }
42933- atomic_inc(&cm_listens_created);
42934+ atomic_inc_unchecked(&cm_listens_created);
42935 }
42936
42937 cm_id->add_ref(cm_id);
42938@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
42939
42940 if (nesqp->destroyed)
42941 return;
42942- atomic_inc(&cm_connecteds);
42943+ atomic_inc_unchecked(&cm_connecteds);
42944 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
42945 " local port 0x%04X. jiffies = %lu.\n",
42946 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
42947@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
42948
42949 cm_id->add_ref(cm_id);
42950 ret = cm_id->event_handler(cm_id, &cm_event);
42951- atomic_inc(&cm_closes);
42952+ atomic_inc_unchecked(&cm_closes);
42953 cm_event.event = IW_CM_EVENT_CLOSE;
42954 cm_event.status = 0;
42955 cm_event.provider_data = cm_id->provider_data;
42956@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
42957 return;
42958 cm_id = cm_node->cm_id;
42959
42960- atomic_inc(&cm_connect_reqs);
42961+ atomic_inc_unchecked(&cm_connect_reqs);
42962 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42963 cm_node, cm_id, jiffies);
42964
42965@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
42966 return;
42967 cm_id = cm_node->cm_id;
42968
42969- atomic_inc(&cm_connect_reqs);
42970+ atomic_inc_unchecked(&cm_connect_reqs);
42971 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42972 cm_node, cm_id, jiffies);
42973
42974diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
42975index 4166452..fc952c3 100644
42976--- a/drivers/infiniband/hw/nes/nes_mgt.c
42977+++ b/drivers/infiniband/hw/nes/nes_mgt.c
42978@@ -40,8 +40,8 @@
42979 #include "nes.h"
42980 #include "nes_mgt.h"
42981
42982-atomic_t pau_qps_created;
42983-atomic_t pau_qps_destroyed;
42984+atomic_unchecked_t pau_qps_created;
42985+atomic_unchecked_t pau_qps_destroyed;
42986
42987 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
42988 {
42989@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
42990 {
42991 struct sk_buff *skb;
42992 unsigned long flags;
42993- atomic_inc(&pau_qps_destroyed);
42994+ atomic_inc_unchecked(&pau_qps_destroyed);
42995
42996 /* Free packets that have not yet been forwarded */
42997 /* Lock is acquired by skb_dequeue when removing the skb */
42998@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
42999 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43000 skb_queue_head_init(&nesqp->pau_list);
43001 spin_lock_init(&nesqp->pau_lock);
43002- atomic_inc(&pau_qps_created);
43003+ atomic_inc_unchecked(&pau_qps_created);
43004 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43005 }
43006
43007diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43008index 49eb511..a774366 100644
43009--- a/drivers/infiniband/hw/nes/nes_nic.c
43010+++ b/drivers/infiniband/hw/nes/nes_nic.c
43011@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43012 target_stat_values[++index] = mh_detected;
43013 target_stat_values[++index] = mh_pauses_sent;
43014 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43015- target_stat_values[++index] = atomic_read(&cm_connects);
43016- target_stat_values[++index] = atomic_read(&cm_accepts);
43017- target_stat_values[++index] = atomic_read(&cm_disconnects);
43018- target_stat_values[++index] = atomic_read(&cm_connecteds);
43019- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43020- target_stat_values[++index] = atomic_read(&cm_rejects);
43021- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43022- target_stat_values[++index] = atomic_read(&qps_created);
43023- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43024- target_stat_values[++index] = atomic_read(&qps_destroyed);
43025- target_stat_values[++index] = atomic_read(&cm_closes);
43026+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43027+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43028+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43029+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43030+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43031+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43032+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43033+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43034+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43035+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43036+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43037 target_stat_values[++index] = cm_packets_sent;
43038 target_stat_values[++index] = cm_packets_bounced;
43039 target_stat_values[++index] = cm_packets_created;
43040 target_stat_values[++index] = cm_packets_received;
43041 target_stat_values[++index] = cm_packets_dropped;
43042 target_stat_values[++index] = cm_packets_retrans;
43043- target_stat_values[++index] = atomic_read(&cm_listens_created);
43044- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43045+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43046+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43047 target_stat_values[++index] = cm_backlog_drops;
43048- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43049- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43050- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43051- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43052- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43053+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43054+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43055+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43056+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43057+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43058 target_stat_values[++index] = nesadapter->free_4kpbl;
43059 target_stat_values[++index] = nesadapter->free_256pbl;
43060 target_stat_values[++index] = int_mod_timer_init;
43061 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43062 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43063 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43064- target_stat_values[++index] = atomic_read(&pau_qps_created);
43065- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43066+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43067+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43068 }
43069
43070 /**
43071diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43072index c0d0296..3185f57 100644
43073--- a/drivers/infiniband/hw/nes/nes_verbs.c
43074+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43075@@ -46,9 +46,9 @@
43076
43077 #include <rdma/ib_umem.h>
43078
43079-atomic_t mod_qp_timouts;
43080-atomic_t qps_created;
43081-atomic_t sw_qps_destroyed;
43082+atomic_unchecked_t mod_qp_timouts;
43083+atomic_unchecked_t qps_created;
43084+atomic_unchecked_t sw_qps_destroyed;
43085
43086 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43087
43088@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43089 if (init_attr->create_flags)
43090 return ERR_PTR(-EINVAL);
43091
43092- atomic_inc(&qps_created);
43093+ atomic_inc_unchecked(&qps_created);
43094 switch (init_attr->qp_type) {
43095 case IB_QPT_RC:
43096 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43097@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43098 struct iw_cm_event cm_event;
43099 int ret = 0;
43100
43101- atomic_inc(&sw_qps_destroyed);
43102+ atomic_inc_unchecked(&sw_qps_destroyed);
43103 nesqp->destroyed = 1;
43104
43105 /* Blow away the connection if it exists. */
43106diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43107index c00ae09..04e91be 100644
43108--- a/drivers/infiniband/hw/qib/qib.h
43109+++ b/drivers/infiniband/hw/qib/qib.h
43110@@ -52,6 +52,7 @@
43111 #include <linux/kref.h>
43112 #include <linux/sched.h>
43113 #include <linux/kthread.h>
43114+#include <linux/slab.h>
43115
43116 #include "qib_common.h"
43117 #include "qib_verbs.h"
43118diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43119index cdc7df4..a2fdfdb 100644
43120--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43121+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43122@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43123 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43124 }
43125
43126-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43127+static struct rtnl_link_ops ipoib_link_ops = {
43128 .kind = "ipoib",
43129 .maxtype = IFLA_IPOIB_MAX,
43130 .policy = ipoib_policy,
43131diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43132index e853a21..56fc5a8 100644
43133--- a/drivers/input/gameport/gameport.c
43134+++ b/drivers/input/gameport/gameport.c
43135@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43136 */
43137 static void gameport_init_port(struct gameport *gameport)
43138 {
43139- static atomic_t gameport_no = ATOMIC_INIT(-1);
43140+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43141
43142 __module_get(THIS_MODULE);
43143
43144 mutex_init(&gameport->drv_mutex);
43145 device_initialize(&gameport->dev);
43146 dev_set_name(&gameport->dev, "gameport%lu",
43147- (unsigned long)atomic_inc_return(&gameport_no));
43148+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43149 gameport->dev.bus = &gameport_bus;
43150 gameport->dev.release = gameport_release_port;
43151 if (gameport->parent)
43152diff --git a/drivers/input/input.c b/drivers/input/input.c
43153index 213e3a1..4fea837 100644
43154--- a/drivers/input/input.c
43155+++ b/drivers/input/input.c
43156@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
43157 */
43158 struct input_dev *input_allocate_device(void)
43159 {
43160- static atomic_t input_no = ATOMIC_INIT(-1);
43161+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43162 struct input_dev *dev;
43163
43164 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43165@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
43166 INIT_LIST_HEAD(&dev->node);
43167
43168 dev_set_name(&dev->dev, "input%lu",
43169- (unsigned long)atomic_inc_return(&input_no));
43170+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43171
43172 __module_get(THIS_MODULE);
43173 }
43174diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43175index 4a95b22..874c182 100644
43176--- a/drivers/input/joystick/sidewinder.c
43177+++ b/drivers/input/joystick/sidewinder.c
43178@@ -30,6 +30,7 @@
43179 #include <linux/kernel.h>
43180 #include <linux/module.h>
43181 #include <linux/slab.h>
43182+#include <linux/sched.h>
43183 #include <linux/input.h>
43184 #include <linux/gameport.h>
43185 #include <linux/jiffies.h>
43186diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43187index 3aa2f3f..53c00ea 100644
43188--- a/drivers/input/joystick/xpad.c
43189+++ b/drivers/input/joystick/xpad.c
43190@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43191
43192 static int xpad_led_probe(struct usb_xpad *xpad)
43193 {
43194- static atomic_t led_seq = ATOMIC_INIT(-1);
43195+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43196 unsigned long led_no;
43197 struct xpad_led *led;
43198 struct led_classdev *led_cdev;
43199@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43200 if (!led)
43201 return -ENOMEM;
43202
43203- led_no = atomic_inc_return(&led_seq);
43204+ led_no = atomic_inc_return_unchecked(&led_seq);
43205
43206 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43207 led->xpad = xpad;
43208diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43209index ac1fa5f..5f7502c 100644
43210--- a/drivers/input/misc/ims-pcu.c
43211+++ b/drivers/input/misc/ims-pcu.c
43212@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43213
43214 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43215 {
43216- static atomic_t device_no = ATOMIC_INIT(-1);
43217+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43218
43219 const struct ims_pcu_device_info *info;
43220 int error;
43221@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43222 }
43223
43224 /* Device appears to be operable, complete initialization */
43225- pcu->device_no = atomic_inc_return(&device_no);
43226+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43227
43228 /*
43229 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43230diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43231index f4cf664..3204fda 100644
43232--- a/drivers/input/mouse/psmouse.h
43233+++ b/drivers/input/mouse/psmouse.h
43234@@ -117,7 +117,7 @@ struct psmouse_attribute {
43235 ssize_t (*set)(struct psmouse *psmouse, void *data,
43236 const char *buf, size_t count);
43237 bool protect;
43238-};
43239+} __do_const;
43240 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43241
43242 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43243diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43244index b604564..3f14ae4 100644
43245--- a/drivers/input/mousedev.c
43246+++ b/drivers/input/mousedev.c
43247@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43248
43249 spin_unlock_irq(&client->packet_lock);
43250
43251- if (copy_to_user(buffer, data, count))
43252+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43253 return -EFAULT;
43254
43255 return count;
43256diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43257index a05a517..323a2fd 100644
43258--- a/drivers/input/serio/serio.c
43259+++ b/drivers/input/serio/serio.c
43260@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43261 */
43262 static void serio_init_port(struct serio *serio)
43263 {
43264- static atomic_t serio_no = ATOMIC_INIT(-1);
43265+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43266
43267 __module_get(THIS_MODULE);
43268
43269@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43270 mutex_init(&serio->drv_mutex);
43271 device_initialize(&serio->dev);
43272 dev_set_name(&serio->dev, "serio%lu",
43273- (unsigned long)atomic_inc_return(&serio_no));
43274+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43275 serio->dev.bus = &serio_bus;
43276 serio->dev.release = serio_release_port;
43277 serio->dev.groups = serio_device_attr_groups;
43278diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43279index 71ef5d6..93380a9 100644
43280--- a/drivers/input/serio/serio_raw.c
43281+++ b/drivers/input/serio/serio_raw.c
43282@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43283
43284 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43285 {
43286- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43287+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43288 struct serio_raw *serio_raw;
43289 int err;
43290
43291@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43292 }
43293
43294 snprintf(serio_raw->name, sizeof(serio_raw->name),
43295- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43296+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43297 kref_init(&serio_raw->kref);
43298 INIT_LIST_HEAD(&serio_raw->client_list);
43299 init_waitqueue_head(&serio_raw->wait);
43300diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43301index 9802485..2e9941d 100644
43302--- a/drivers/iommu/amd_iommu.c
43303+++ b/drivers/iommu/amd_iommu.c
43304@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43305
43306 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43307 {
43308+ phys_addr_t physaddr;
43309 WARN_ON(address & 0x7ULL);
43310
43311 memset(cmd, 0, sizeof(*cmd));
43312- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43313- cmd->data[1] = upper_32_bits(__pa(address));
43314+
43315+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43316+ if (object_starts_on_stack((void *)address)) {
43317+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43318+ physaddr = __pa((u64)adjbuf);
43319+ } else
43320+#endif
43321+ physaddr = __pa(address);
43322+
43323+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43324+ cmd->data[1] = upper_32_bits(physaddr);
43325 cmd->data[2] = 1;
43326 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43327 }
43328diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
43329index 6cd47b7..264d14a 100644
43330--- a/drivers/iommu/arm-smmu.c
43331+++ b/drivers/iommu/arm-smmu.c
43332@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
43333 cfg->irptndx = cfg->cbndx;
43334 }
43335
43336- ACCESS_ONCE(smmu_domain->smmu) = smmu;
43337+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
43338 arm_smmu_init_context_bank(smmu_domain);
43339 spin_unlock_irqrestore(&smmu_domain->lock, flags);
43340
43341diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43342index f7718d7..3ef740b 100644
43343--- a/drivers/iommu/iommu.c
43344+++ b/drivers/iommu/iommu.c
43345@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43346 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43347 {
43348 int err;
43349- struct notifier_block *nb;
43350+ notifier_block_no_const *nb;
43351 struct iommu_callback_data cb = {
43352 .ops = ops,
43353 };
43354diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43355index 89c4846..1de796f 100644
43356--- a/drivers/iommu/irq_remapping.c
43357+++ b/drivers/iommu/irq_remapping.c
43358@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43359 void panic_if_irq_remap(const char *msg)
43360 {
43361 if (irq_remapping_enabled)
43362- panic(msg);
43363+ panic("%s", msg);
43364 }
43365
43366 static void ir_ack_apic_edge(struct irq_data *data)
43367@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43368
43369 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43370 {
43371- chip->irq_print_chip = ir_print_prefix;
43372- chip->irq_ack = ir_ack_apic_edge;
43373- chip->irq_eoi = ir_ack_apic_level;
43374- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43375+ pax_open_kernel();
43376+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43377+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43378+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43379+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43380+ pax_close_kernel();
43381 }
43382
43383 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43384diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43385index d617ee5..df8be8b 100644
43386--- a/drivers/irqchip/irq-gic.c
43387+++ b/drivers/irqchip/irq-gic.c
43388@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43389 * Supported arch specific GIC irq extension.
43390 * Default make them NULL.
43391 */
43392-struct irq_chip gic_arch_extn = {
43393+irq_chip_no_const gic_arch_extn = {
43394 .irq_eoi = NULL,
43395 .irq_mask = NULL,
43396 .irq_unmask = NULL,
43397@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43398 chained_irq_exit(chip, desc);
43399 }
43400
43401-static struct irq_chip gic_chip = {
43402+static irq_chip_no_const gic_chip __read_only = {
43403 .name = "GIC",
43404 .irq_mask = gic_mask_irq,
43405 .irq_unmask = gic_unmask_irq,
43406diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43407index 078cac5..fb0f846 100644
43408--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43409+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43410@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43411 struct intc_irqpin_iomem *i;
43412 struct resource *io[INTC_IRQPIN_REG_NR];
43413 struct resource *irq;
43414- struct irq_chip *irq_chip;
43415+ irq_chip_no_const *irq_chip;
43416 void (*enable_fn)(struct irq_data *d);
43417 void (*disable_fn)(struct irq_data *d);
43418 const char *name = dev_name(dev);
43419diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43420index 384e6ed..7a771b2 100644
43421--- a/drivers/irqchip/irq-renesas-irqc.c
43422+++ b/drivers/irqchip/irq-renesas-irqc.c
43423@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43424 struct irqc_priv *p;
43425 struct resource *io;
43426 struct resource *irq;
43427- struct irq_chip *irq_chip;
43428+ irq_chip_no_const *irq_chip;
43429 const char *name = dev_name(&pdev->dev);
43430 int ret;
43431 int k;
43432diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43433index 6a2df32..dc962f1 100644
43434--- a/drivers/isdn/capi/capi.c
43435+++ b/drivers/isdn/capi/capi.c
43436@@ -81,8 +81,8 @@ struct capiminor {
43437
43438 struct capi20_appl *ap;
43439 u32 ncci;
43440- atomic_t datahandle;
43441- atomic_t msgid;
43442+ atomic_unchecked_t datahandle;
43443+ atomic_unchecked_t msgid;
43444
43445 struct tty_port port;
43446 int ttyinstop;
43447@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43448 capimsg_setu16(s, 2, mp->ap->applid);
43449 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43450 capimsg_setu8 (s, 5, CAPI_RESP);
43451- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43452+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43453 capimsg_setu32(s, 8, mp->ncci);
43454 capimsg_setu16(s, 12, datahandle);
43455 }
43456@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43457 mp->outbytes -= len;
43458 spin_unlock_bh(&mp->outlock);
43459
43460- datahandle = atomic_inc_return(&mp->datahandle);
43461+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43462 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43463 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43464 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43465 capimsg_setu16(skb->data, 2, mp->ap->applid);
43466 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43467 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43468- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43469+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43470 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43471 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43472 capimsg_setu16(skb->data, 16, len); /* Data length */
43473diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43474index aecec6d..11e13c5 100644
43475--- a/drivers/isdn/gigaset/bas-gigaset.c
43476+++ b/drivers/isdn/gigaset/bas-gigaset.c
43477@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43478
43479
43480 static const struct gigaset_ops gigops = {
43481- gigaset_write_cmd,
43482- gigaset_write_room,
43483- gigaset_chars_in_buffer,
43484- gigaset_brkchars,
43485- gigaset_init_bchannel,
43486- gigaset_close_bchannel,
43487- gigaset_initbcshw,
43488- gigaset_freebcshw,
43489- gigaset_reinitbcshw,
43490- gigaset_initcshw,
43491- gigaset_freecshw,
43492- gigaset_set_modem_ctrl,
43493- gigaset_baud_rate,
43494- gigaset_set_line_ctrl,
43495- gigaset_isoc_send_skb,
43496- gigaset_isoc_input,
43497+ .write_cmd = gigaset_write_cmd,
43498+ .write_room = gigaset_write_room,
43499+ .chars_in_buffer = gigaset_chars_in_buffer,
43500+ .brkchars = gigaset_brkchars,
43501+ .init_bchannel = gigaset_init_bchannel,
43502+ .close_bchannel = gigaset_close_bchannel,
43503+ .initbcshw = gigaset_initbcshw,
43504+ .freebcshw = gigaset_freebcshw,
43505+ .reinitbcshw = gigaset_reinitbcshw,
43506+ .initcshw = gigaset_initcshw,
43507+ .freecshw = gigaset_freecshw,
43508+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43509+ .baud_rate = gigaset_baud_rate,
43510+ .set_line_ctrl = gigaset_set_line_ctrl,
43511+ .send_skb = gigaset_isoc_send_skb,
43512+ .handle_input = gigaset_isoc_input,
43513 };
43514
43515 /* bas_gigaset_init
43516diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43517index 600c79b..3752bab 100644
43518--- a/drivers/isdn/gigaset/interface.c
43519+++ b/drivers/isdn/gigaset/interface.c
43520@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43521 }
43522 tty->driver_data = cs;
43523
43524- ++cs->port.count;
43525+ atomic_inc(&cs->port.count);
43526
43527- if (cs->port.count == 1) {
43528+ if (atomic_read(&cs->port.count) == 1) {
43529 tty_port_tty_set(&cs->port, tty);
43530 cs->port.low_latency = 1;
43531 }
43532@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43533
43534 if (!cs->connected)
43535 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43536- else if (!cs->port.count)
43537+ else if (!atomic_read(&cs->port.count))
43538 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43539- else if (!--cs->port.count)
43540+ else if (!atomic_dec_return(&cs->port.count))
43541 tty_port_tty_set(&cs->port, NULL);
43542
43543 mutex_unlock(&cs->mutex);
43544diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43545index 8c91fd5..14f13ce 100644
43546--- a/drivers/isdn/gigaset/ser-gigaset.c
43547+++ b/drivers/isdn/gigaset/ser-gigaset.c
43548@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43549 }
43550
43551 static const struct gigaset_ops ops = {
43552- gigaset_write_cmd,
43553- gigaset_write_room,
43554- gigaset_chars_in_buffer,
43555- gigaset_brkchars,
43556- gigaset_init_bchannel,
43557- gigaset_close_bchannel,
43558- gigaset_initbcshw,
43559- gigaset_freebcshw,
43560- gigaset_reinitbcshw,
43561- gigaset_initcshw,
43562- gigaset_freecshw,
43563- gigaset_set_modem_ctrl,
43564- gigaset_baud_rate,
43565- gigaset_set_line_ctrl,
43566- gigaset_m10x_send_skb, /* asyncdata.c */
43567- gigaset_m10x_input, /* asyncdata.c */
43568+ .write_cmd = gigaset_write_cmd,
43569+ .write_room = gigaset_write_room,
43570+ .chars_in_buffer = gigaset_chars_in_buffer,
43571+ .brkchars = gigaset_brkchars,
43572+ .init_bchannel = gigaset_init_bchannel,
43573+ .close_bchannel = gigaset_close_bchannel,
43574+ .initbcshw = gigaset_initbcshw,
43575+ .freebcshw = gigaset_freebcshw,
43576+ .reinitbcshw = gigaset_reinitbcshw,
43577+ .initcshw = gigaset_initcshw,
43578+ .freecshw = gigaset_freecshw,
43579+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43580+ .baud_rate = gigaset_baud_rate,
43581+ .set_line_ctrl = gigaset_set_line_ctrl,
43582+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43583+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43584 };
43585
43586
43587diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43588index 5f306e2..5342f88 100644
43589--- a/drivers/isdn/gigaset/usb-gigaset.c
43590+++ b/drivers/isdn/gigaset/usb-gigaset.c
43591@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43592 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43593 memcpy(cs->hw.usb->bchars, buf, 6);
43594 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43595- 0, 0, &buf, 6, 2000);
43596+ 0, 0, buf, 6, 2000);
43597 }
43598
43599 static void gigaset_freebcshw(struct bc_state *bcs)
43600@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43601 }
43602
43603 static const struct gigaset_ops ops = {
43604- gigaset_write_cmd,
43605- gigaset_write_room,
43606- gigaset_chars_in_buffer,
43607- gigaset_brkchars,
43608- gigaset_init_bchannel,
43609- gigaset_close_bchannel,
43610- gigaset_initbcshw,
43611- gigaset_freebcshw,
43612- gigaset_reinitbcshw,
43613- gigaset_initcshw,
43614- gigaset_freecshw,
43615- gigaset_set_modem_ctrl,
43616- gigaset_baud_rate,
43617- gigaset_set_line_ctrl,
43618- gigaset_m10x_send_skb,
43619- gigaset_m10x_input,
43620+ .write_cmd = gigaset_write_cmd,
43621+ .write_room = gigaset_write_room,
43622+ .chars_in_buffer = gigaset_chars_in_buffer,
43623+ .brkchars = gigaset_brkchars,
43624+ .init_bchannel = gigaset_init_bchannel,
43625+ .close_bchannel = gigaset_close_bchannel,
43626+ .initbcshw = gigaset_initbcshw,
43627+ .freebcshw = gigaset_freebcshw,
43628+ .reinitbcshw = gigaset_reinitbcshw,
43629+ .initcshw = gigaset_initcshw,
43630+ .freecshw = gigaset_freecshw,
43631+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43632+ .baud_rate = gigaset_baud_rate,
43633+ .set_line_ctrl = gigaset_set_line_ctrl,
43634+ .send_skb = gigaset_m10x_send_skb,
43635+ .handle_input = gigaset_m10x_input,
43636 };
43637
43638 /*
43639diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43640index 4d9b195..455075c 100644
43641--- a/drivers/isdn/hardware/avm/b1.c
43642+++ b/drivers/isdn/hardware/avm/b1.c
43643@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43644 }
43645 if (left) {
43646 if (t4file->user) {
43647- if (copy_from_user(buf, dp, left))
43648+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43649 return -EFAULT;
43650 } else {
43651 memcpy(buf, dp, left);
43652@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43653 }
43654 if (left) {
43655 if (config->user) {
43656- if (copy_from_user(buf, dp, left))
43657+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43658 return -EFAULT;
43659 } else {
43660 memcpy(buf, dp, left);
43661diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43662index 9b856e1..fa03c92 100644
43663--- a/drivers/isdn/i4l/isdn_common.c
43664+++ b/drivers/isdn/i4l/isdn_common.c
43665@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43666 } else
43667 return -EINVAL;
43668 case IIOCDBGVAR:
43669+ if (!capable(CAP_SYS_RAWIO))
43670+ return -EPERM;
43671 if (arg) {
43672 if (copy_to_user(argp, &dev, sizeof(ulong)))
43673 return -EFAULT;
43674diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43675index 91d5730..336523e 100644
43676--- a/drivers/isdn/i4l/isdn_concap.c
43677+++ b/drivers/isdn/i4l/isdn_concap.c
43678@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43679 }
43680
43681 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43682- &isdn_concap_dl_data_req,
43683- &isdn_concap_dl_connect_req,
43684- &isdn_concap_dl_disconn_req
43685+ .data_req = &isdn_concap_dl_data_req,
43686+ .connect_req = &isdn_concap_dl_connect_req,
43687+ .disconn_req = &isdn_concap_dl_disconn_req
43688 };
43689
43690 /* The following should better go into a dedicated source file such that
43691diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43692index bc91261..2ef7e36 100644
43693--- a/drivers/isdn/i4l/isdn_tty.c
43694+++ b/drivers/isdn/i4l/isdn_tty.c
43695@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43696
43697 #ifdef ISDN_DEBUG_MODEM_OPEN
43698 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43699- port->count);
43700+ atomic_read(&port->count));
43701 #endif
43702- port->count++;
43703+ atomic_inc(&port->count);
43704 port->tty = tty;
43705 /*
43706 * Start up serial port
43707@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43708 #endif
43709 return;
43710 }
43711- if ((tty->count == 1) && (port->count != 1)) {
43712+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43713 /*
43714 * Uh, oh. tty->count is 1, which means that the tty
43715 * structure will be freed. Info->count should always
43716@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43717 * serial port won't be shutdown.
43718 */
43719 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43720- "info->count is %d\n", port->count);
43721- port->count = 1;
43722+ "info->count is %d\n", atomic_read(&port->count));
43723+ atomic_set(&port->count, 1);
43724 }
43725- if (--port->count < 0) {
43726+ if (atomic_dec_return(&port->count) < 0) {
43727 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43728- info->line, port->count);
43729- port->count = 0;
43730+ info->line, atomic_read(&port->count));
43731+ atomic_set(&port->count, 0);
43732 }
43733- if (port->count) {
43734+ if (atomic_read(&port->count)) {
43735 #ifdef ISDN_DEBUG_MODEM_OPEN
43736 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43737 #endif
43738@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43739 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43740 return;
43741 isdn_tty_shutdown(info);
43742- port->count = 0;
43743+ atomic_set(&port->count, 0);
43744 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43745 port->tty = NULL;
43746 wake_up_interruptible(&port->open_wait);
43747@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43748 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43749 modem_info *info = &dev->mdm.info[i];
43750
43751- if (info->port.count == 0)
43752+ if (atomic_read(&info->port.count) == 0)
43753 continue;
43754 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43755 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43756diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43757index e2d4e58..40cd045 100644
43758--- a/drivers/isdn/i4l/isdn_x25iface.c
43759+++ b/drivers/isdn/i4l/isdn_x25iface.c
43760@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43761
43762
43763 static struct concap_proto_ops ix25_pops = {
43764- &isdn_x25iface_proto_new,
43765- &isdn_x25iface_proto_del,
43766- &isdn_x25iface_proto_restart,
43767- &isdn_x25iface_proto_close,
43768- &isdn_x25iface_xmit,
43769- &isdn_x25iface_receive,
43770- &isdn_x25iface_connect_ind,
43771- &isdn_x25iface_disconn_ind
43772+ .proto_new = &isdn_x25iface_proto_new,
43773+ .proto_del = &isdn_x25iface_proto_del,
43774+ .restart = &isdn_x25iface_proto_restart,
43775+ .close = &isdn_x25iface_proto_close,
43776+ .encap_and_xmit = &isdn_x25iface_xmit,
43777+ .data_ind = &isdn_x25iface_receive,
43778+ .connect_ind = &isdn_x25iface_connect_ind,
43779+ .disconn_ind = &isdn_x25iface_disconn_ind
43780 };
43781
43782 /* error message helper function */
43783diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
43784index 6a7447c..cae33fe 100644
43785--- a/drivers/isdn/icn/icn.c
43786+++ b/drivers/isdn/icn/icn.c
43787@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
43788 if (count > len)
43789 count = len;
43790 if (user) {
43791- if (copy_from_user(msg, buf, count))
43792+ if (count > sizeof msg || copy_from_user(msg, buf, count))
43793 return -EFAULT;
43794 } else
43795 memcpy(msg, buf, count);
43796diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
43797index 87f7dff..7300125 100644
43798--- a/drivers/isdn/mISDN/dsp_cmx.c
43799+++ b/drivers/isdn/mISDN/dsp_cmx.c
43800@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
43801 static u16 dsp_count; /* last sample count */
43802 static int dsp_count_valid; /* if we have last sample count */
43803
43804-void
43805+void __intentional_overflow(-1)
43806 dsp_cmx_send(void *arg)
43807 {
43808 struct dsp_conf *conf;
43809diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
43810index 0f9ed1e..2715d6f 100644
43811--- a/drivers/leds/leds-clevo-mail.c
43812+++ b/drivers/leds/leds-clevo-mail.c
43813@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
43814 * detected as working, but in reality it is not) as low as
43815 * possible.
43816 */
43817-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
43818+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
43819 {
43820 .callback = clevo_mail_led_dmi_callback,
43821 .ident = "Clevo D410J",
43822diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
43823index 046cb70..6b20d39 100644
43824--- a/drivers/leds/leds-ss4200.c
43825+++ b/drivers/leds/leds-ss4200.c
43826@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
43827 * detected as working, but in reality it is not) as low as
43828 * possible.
43829 */
43830-static struct dmi_system_id nas_led_whitelist[] __initdata = {
43831+static struct dmi_system_id nas_led_whitelist[] __initconst = {
43832 {
43833 .callback = ss4200_led_dmi_callback,
43834 .ident = "Intel SS4200-E",
43835diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
43836index 6590558..a74c5dd 100644
43837--- a/drivers/lguest/core.c
43838+++ b/drivers/lguest/core.c
43839@@ -96,9 +96,17 @@ static __init int map_switcher(void)
43840 * The end address needs +1 because __get_vm_area allocates an
43841 * extra guard page, so we need space for that.
43842 */
43843+
43844+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
43845+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43846+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
43847+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43848+#else
43849 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43850 VM_ALLOC, switcher_addr, switcher_addr
43851 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43852+#endif
43853+
43854 if (!switcher_vma) {
43855 err = -ENOMEM;
43856 printk("lguest: could not map switcher pages high\n");
43857@@ -121,7 +129,7 @@ static __init int map_switcher(void)
43858 * Now the Switcher is mapped at the right address, we can't fail!
43859 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
43860 */
43861- memcpy(switcher_vma->addr, start_switcher_text,
43862+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
43863 end_switcher_text - start_switcher_text);
43864
43865 printk(KERN_INFO "lguest: mapped switcher at %p\n",
43866diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
43867index e8b55c3..3514c37 100644
43868--- a/drivers/lguest/page_tables.c
43869+++ b/drivers/lguest/page_tables.c
43870@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
43871 /*:*/
43872
43873 #ifdef CONFIG_X86_PAE
43874-static void release_pmd(pmd_t *spmd)
43875+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
43876 {
43877 /* If the entry's not present, there's nothing to release. */
43878 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
43879diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
43880index 922a1ac..9dd0c2a 100644
43881--- a/drivers/lguest/x86/core.c
43882+++ b/drivers/lguest/x86/core.c
43883@@ -59,7 +59,7 @@ static struct {
43884 /* Offset from where switcher.S was compiled to where we've copied it */
43885 static unsigned long switcher_offset(void)
43886 {
43887- return switcher_addr - (unsigned long)start_switcher_text;
43888+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
43889 }
43890
43891 /* This cpu's struct lguest_pages (after the Switcher text page) */
43892@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
43893 * These copies are pretty cheap, so we do them unconditionally: */
43894 /* Save the current Host top-level page directory.
43895 */
43896+
43897+#ifdef CONFIG_PAX_PER_CPU_PGD
43898+ pages->state.host_cr3 = read_cr3();
43899+#else
43900 pages->state.host_cr3 = __pa(current->mm->pgd);
43901+#endif
43902+
43903 /*
43904 * Set up the Guest's page tables to see this CPU's pages (and no
43905 * other CPU's pages).
43906@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
43907 * compiled-in switcher code and the high-mapped copy we just made.
43908 */
43909 for (i = 0; i < IDT_ENTRIES; i++)
43910- default_idt_entries[i] += switcher_offset();
43911+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
43912
43913 /*
43914 * Set up the Switcher's per-cpu areas.
43915@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
43916 * it will be undisturbed when we switch. To change %cs and jump we
43917 * need this structure to feed to Intel's "lcall" instruction.
43918 */
43919- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
43920+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
43921 lguest_entry.segment = LGUEST_CS;
43922
43923 /*
43924diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
43925index 40634b0..4f5855e 100644
43926--- a/drivers/lguest/x86/switcher_32.S
43927+++ b/drivers/lguest/x86/switcher_32.S
43928@@ -87,6 +87,7 @@
43929 #include <asm/page.h>
43930 #include <asm/segment.h>
43931 #include <asm/lguest.h>
43932+#include <asm/processor-flags.h>
43933
43934 // We mark the start of the code to copy
43935 // It's placed in .text tho it's never run here
43936@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
43937 // Changes type when we load it: damn Intel!
43938 // For after we switch over our page tables
43939 // That entry will be read-only: we'd crash.
43940+
43941+#ifdef CONFIG_PAX_KERNEXEC
43942+ mov %cr0, %edx
43943+ xor $X86_CR0_WP, %edx
43944+ mov %edx, %cr0
43945+#endif
43946+
43947 movl $(GDT_ENTRY_TSS*8), %edx
43948 ltr %dx
43949
43950@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
43951 // Let's clear it again for our return.
43952 // The GDT descriptor of the Host
43953 // Points to the table after two "size" bytes
43954- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
43955+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
43956 // Clear "used" from type field (byte 5, bit 2)
43957- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
43958+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
43959+
43960+#ifdef CONFIG_PAX_KERNEXEC
43961+ mov %cr0, %eax
43962+ xor $X86_CR0_WP, %eax
43963+ mov %eax, %cr0
43964+#endif
43965
43966 // Once our page table's switched, the Guest is live!
43967 // The Host fades as we run this final step.
43968@@ -295,13 +309,12 @@ deliver_to_host:
43969 // I consulted gcc, and it gave
43970 // These instructions, which I gladly credit:
43971 leal (%edx,%ebx,8), %eax
43972- movzwl (%eax),%edx
43973- movl 4(%eax), %eax
43974- xorw %ax, %ax
43975- orl %eax, %edx
43976+ movl 4(%eax), %edx
43977+ movw (%eax), %dx
43978 // Now the address of the handler's in %edx
43979 // We call it now: its "iret" drops us home.
43980- jmp *%edx
43981+ ljmp $__KERNEL_CS, $1f
43982+1: jmp *%edx
43983
43984 // Every interrupt can come to us here
43985 // But we must truly tell each apart.
43986diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
43987index a08e3ee..df8ade2 100644
43988--- a/drivers/md/bcache/closure.h
43989+++ b/drivers/md/bcache/closure.h
43990@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
43991 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
43992 struct workqueue_struct *wq)
43993 {
43994- BUG_ON(object_is_on_stack(cl));
43995+ BUG_ON(object_starts_on_stack(cl));
43996 closure_set_ip(cl);
43997 cl->fn = fn;
43998 cl->wq = wq;
43999diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44000index 1695ee5..89f18ab 100644
44001--- a/drivers/md/bitmap.c
44002+++ b/drivers/md/bitmap.c
44003@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44004 chunk_kb ? "KB" : "B");
44005 if (bitmap->storage.file) {
44006 seq_printf(seq, ", file: ");
44007- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44008+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44009 }
44010
44011 seq_printf(seq, "\n");
44012diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44013index 73f791b..8c5d3ac 100644
44014--- a/drivers/md/dm-ioctl.c
44015+++ b/drivers/md/dm-ioctl.c
44016@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44017 cmd == DM_LIST_VERSIONS_CMD)
44018 return 0;
44019
44020- if ((cmd == DM_DEV_CREATE_CMD)) {
44021+ if (cmd == DM_DEV_CREATE_CMD) {
44022 if (!*param->name) {
44023 DMWARN("name not supplied when creating device");
44024 return -EINVAL;
44025diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44026index 7dfdb5c..4caada6 100644
44027--- a/drivers/md/dm-raid1.c
44028+++ b/drivers/md/dm-raid1.c
44029@@ -40,7 +40,7 @@ enum dm_raid1_error {
44030
44031 struct mirror {
44032 struct mirror_set *ms;
44033- atomic_t error_count;
44034+ atomic_unchecked_t error_count;
44035 unsigned long error_type;
44036 struct dm_dev *dev;
44037 sector_t offset;
44038@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44039 struct mirror *m;
44040
44041 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44042- if (!atomic_read(&m->error_count))
44043+ if (!atomic_read_unchecked(&m->error_count))
44044 return m;
44045
44046 return NULL;
44047@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44048 * simple way to tell if a device has encountered
44049 * errors.
44050 */
44051- atomic_inc(&m->error_count);
44052+ atomic_inc_unchecked(&m->error_count);
44053
44054 if (test_and_set_bit(error_type, &m->error_type))
44055 return;
44056@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44057 struct mirror *m = get_default_mirror(ms);
44058
44059 do {
44060- if (likely(!atomic_read(&m->error_count)))
44061+ if (likely(!atomic_read_unchecked(&m->error_count)))
44062 return m;
44063
44064 if (m-- == ms->mirror)
44065@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44066 {
44067 struct mirror *default_mirror = get_default_mirror(m->ms);
44068
44069- return !atomic_read(&default_mirror->error_count);
44070+ return !atomic_read_unchecked(&default_mirror->error_count);
44071 }
44072
44073 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44074@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44075 */
44076 if (likely(region_in_sync(ms, region, 1)))
44077 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44078- else if (m && atomic_read(&m->error_count))
44079+ else if (m && atomic_read_unchecked(&m->error_count))
44080 m = NULL;
44081
44082 if (likely(m))
44083@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44084 }
44085
44086 ms->mirror[mirror].ms = ms;
44087- atomic_set(&(ms->mirror[mirror].error_count), 0);
44088+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44089 ms->mirror[mirror].error_type = 0;
44090 ms->mirror[mirror].offset = offset;
44091
44092@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
44093 */
44094 static char device_status_char(struct mirror *m)
44095 {
44096- if (!atomic_read(&(m->error_count)))
44097+ if (!atomic_read_unchecked(&(m->error_count)))
44098 return 'A';
44099
44100 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44101diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44102index f478a4c..4b8e5ef 100644
44103--- a/drivers/md/dm-stats.c
44104+++ b/drivers/md/dm-stats.c
44105@@ -382,7 +382,7 @@ do_sync_free:
44106 synchronize_rcu_expedited();
44107 dm_stat_free(&s->rcu_head);
44108 } else {
44109- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44110+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44111 call_rcu(&s->rcu_head, dm_stat_free);
44112 }
44113 return 0;
44114@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44115 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44116 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44117 ));
44118- ACCESS_ONCE(last->last_sector) = end_sector;
44119- ACCESS_ONCE(last->last_rw) = bi_rw;
44120+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44121+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44122 }
44123
44124 rcu_read_lock();
44125diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44126index f8b37d4..5c5cafd 100644
44127--- a/drivers/md/dm-stripe.c
44128+++ b/drivers/md/dm-stripe.c
44129@@ -21,7 +21,7 @@ struct stripe {
44130 struct dm_dev *dev;
44131 sector_t physical_start;
44132
44133- atomic_t error_count;
44134+ atomic_unchecked_t error_count;
44135 };
44136
44137 struct stripe_c {
44138@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44139 kfree(sc);
44140 return r;
44141 }
44142- atomic_set(&(sc->stripe[i].error_count), 0);
44143+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44144 }
44145
44146 ti->private = sc;
44147@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44148 DMEMIT("%d ", sc->stripes);
44149 for (i = 0; i < sc->stripes; i++) {
44150 DMEMIT("%s ", sc->stripe[i].dev->name);
44151- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44152+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44153 'D' : 'A';
44154 }
44155 buffer[i] = '\0';
44156@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44157 */
44158 for (i = 0; i < sc->stripes; i++)
44159 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44160- atomic_inc(&(sc->stripe[i].error_count));
44161- if (atomic_read(&(sc->stripe[i].error_count)) <
44162+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44163+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44164 DM_IO_ERROR_THRESHOLD)
44165 schedule_work(&sc->trigger_event);
44166 }
44167diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44168index 3afae9e..4e1c954 100644
44169--- a/drivers/md/dm-table.c
44170+++ b/drivers/md/dm-table.c
44171@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44172 if (!dev_size)
44173 return 0;
44174
44175- if ((start >= dev_size) || (start + len > dev_size)) {
44176+ if ((start >= dev_size) || (len > dev_size - start)) {
44177 DMWARN("%s: %s too small for target: "
44178 "start=%llu, len=%llu, dev_size=%llu",
44179 dm_device_name(ti->table->md), bdevname(bdev, b),
44180diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44181index 43adbb8..7b34305 100644
44182--- a/drivers/md/dm-thin-metadata.c
44183+++ b/drivers/md/dm-thin-metadata.c
44184@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44185 {
44186 pmd->info.tm = pmd->tm;
44187 pmd->info.levels = 2;
44188- pmd->info.value_type.context = pmd->data_sm;
44189+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44190 pmd->info.value_type.size = sizeof(__le64);
44191 pmd->info.value_type.inc = data_block_inc;
44192 pmd->info.value_type.dec = data_block_dec;
44193@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44194
44195 pmd->bl_info.tm = pmd->tm;
44196 pmd->bl_info.levels = 1;
44197- pmd->bl_info.value_type.context = pmd->data_sm;
44198+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44199 pmd->bl_info.value_type.size = sizeof(__le64);
44200 pmd->bl_info.value_type.inc = data_block_inc;
44201 pmd->bl_info.value_type.dec = data_block_dec;
44202diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44203index 2caf5b3..104f98f 100644
44204--- a/drivers/md/dm.c
44205+++ b/drivers/md/dm.c
44206@@ -185,9 +185,9 @@ struct mapped_device {
44207 /*
44208 * Event handling.
44209 */
44210- atomic_t event_nr;
44211+ atomic_unchecked_t event_nr;
44212 wait_queue_head_t eventq;
44213- atomic_t uevent_seq;
44214+ atomic_unchecked_t uevent_seq;
44215 struct list_head uevent_list;
44216 spinlock_t uevent_lock; /* Protect access to uevent_list */
44217
44218@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
44219 spin_lock_init(&md->deferred_lock);
44220 atomic_set(&md->holders, 1);
44221 atomic_set(&md->open_count, 0);
44222- atomic_set(&md->event_nr, 0);
44223- atomic_set(&md->uevent_seq, 0);
44224+ atomic_set_unchecked(&md->event_nr, 0);
44225+ atomic_set_unchecked(&md->uevent_seq, 0);
44226 INIT_LIST_HEAD(&md->uevent_list);
44227 INIT_LIST_HEAD(&md->table_devices);
44228 spin_lock_init(&md->uevent_lock);
44229@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
44230
44231 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44232
44233- atomic_inc(&md->event_nr);
44234+ atomic_inc_unchecked(&md->event_nr);
44235 wake_up(&md->eventq);
44236 }
44237
44238@@ -3041,18 +3041,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44239
44240 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44241 {
44242- return atomic_add_return(1, &md->uevent_seq);
44243+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44244 }
44245
44246 uint32_t dm_get_event_nr(struct mapped_device *md)
44247 {
44248- return atomic_read(&md->event_nr);
44249+ return atomic_read_unchecked(&md->event_nr);
44250 }
44251
44252 int dm_wait_event(struct mapped_device *md, int event_nr)
44253 {
44254 return wait_event_interruptible(md->eventq,
44255- (event_nr != atomic_read(&md->event_nr)));
44256+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44257 }
44258
44259 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44260diff --git a/drivers/md/md.c b/drivers/md/md.c
44261index 709755f..5bc3fa4 100644
44262--- a/drivers/md/md.c
44263+++ b/drivers/md/md.c
44264@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44265 * start build, activate spare
44266 */
44267 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44268-static atomic_t md_event_count;
44269+static atomic_unchecked_t md_event_count;
44270 void md_new_event(struct mddev *mddev)
44271 {
44272- atomic_inc(&md_event_count);
44273+ atomic_inc_unchecked(&md_event_count);
44274 wake_up(&md_event_waiters);
44275 }
44276 EXPORT_SYMBOL_GPL(md_new_event);
44277@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44278 */
44279 static void md_new_event_inintr(struct mddev *mddev)
44280 {
44281- atomic_inc(&md_event_count);
44282+ atomic_inc_unchecked(&md_event_count);
44283 wake_up(&md_event_waiters);
44284 }
44285
44286@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44287 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44288 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44289 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44290- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44291+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44292
44293 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44294 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44295@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44296 else
44297 sb->resync_offset = cpu_to_le64(0);
44298
44299- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44300+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44301
44302 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44303 sb->size = cpu_to_le64(mddev->dev_sectors);
44304@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44305 static ssize_t
44306 errors_show(struct md_rdev *rdev, char *page)
44307 {
44308- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44309+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44310 }
44311
44312 static ssize_t
44313@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44314 char *e;
44315 unsigned long n = simple_strtoul(buf, &e, 10);
44316 if (*buf && (*e == 0 || *e == '\n')) {
44317- atomic_set(&rdev->corrected_errors, n);
44318+ atomic_set_unchecked(&rdev->corrected_errors, n);
44319 return len;
44320 }
44321 return -EINVAL;
44322@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
44323 rdev->sb_loaded = 0;
44324 rdev->bb_page = NULL;
44325 atomic_set(&rdev->nr_pending, 0);
44326- atomic_set(&rdev->read_errors, 0);
44327- atomic_set(&rdev->corrected_errors, 0);
44328+ atomic_set_unchecked(&rdev->read_errors, 0);
44329+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44330
44331 INIT_LIST_HEAD(&rdev->same_set);
44332 init_waitqueue_head(&rdev->blocked_wait);
44333@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44334
44335 spin_unlock(&pers_lock);
44336 seq_printf(seq, "\n");
44337- seq->poll_event = atomic_read(&md_event_count);
44338+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44339 return 0;
44340 }
44341 if (v == (void*)2) {
44342@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44343 return error;
44344
44345 seq = file->private_data;
44346- seq->poll_event = atomic_read(&md_event_count);
44347+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44348 return error;
44349 }
44350
44351@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44352 /* always allow read */
44353 mask = POLLIN | POLLRDNORM;
44354
44355- if (seq->poll_event != atomic_read(&md_event_count))
44356+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44357 mask |= POLLERR | POLLPRI;
44358 return mask;
44359 }
44360@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44361 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44362 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44363 (int)part_stat_read(&disk->part0, sectors[1]) -
44364- atomic_read(&disk->sync_io);
44365+ atomic_read_unchecked(&disk->sync_io);
44366 /* sync IO will cause sync_io to increase before the disk_stats
44367 * as sync_io is counted when a request starts, and
44368 * disk_stats is counted when it completes.
44369diff --git a/drivers/md/md.h b/drivers/md/md.h
44370index 03cec5b..0a658c1 100644
44371--- a/drivers/md/md.h
44372+++ b/drivers/md/md.h
44373@@ -94,13 +94,13 @@ struct md_rdev {
44374 * only maintained for arrays that
44375 * support hot removal
44376 */
44377- atomic_t read_errors; /* number of consecutive read errors that
44378+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44379 * we have tried to ignore.
44380 */
44381 struct timespec last_read_error; /* monotonic time since our
44382 * last read error
44383 */
44384- atomic_t corrected_errors; /* number of corrected read errors,
44385+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44386 * for reporting to userspace and storing
44387 * in superblock.
44388 */
44389@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44390
44391 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44392 {
44393- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44394+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44395 }
44396
44397 struct md_personality
44398diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44399index e8a9042..35bd145 100644
44400--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44401+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44402@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44403 * Flick into a mode where all blocks get allocated in the new area.
44404 */
44405 smm->begin = old_len;
44406- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44407+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44408
44409 /*
44410 * Extend.
44411@@ -714,7 +714,7 @@ out:
44412 /*
44413 * Switch back to normal behaviour.
44414 */
44415- memcpy(sm, &ops, sizeof(*sm));
44416+ memcpy((void *)sm, &ops, sizeof(*sm));
44417 return r;
44418 }
44419
44420diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44421index 3e6d115..ffecdeb 100644
44422--- a/drivers/md/persistent-data/dm-space-map.h
44423+++ b/drivers/md/persistent-data/dm-space-map.h
44424@@ -71,6 +71,7 @@ struct dm_space_map {
44425 dm_sm_threshold_fn fn,
44426 void *context);
44427 };
44428+typedef struct dm_space_map __no_const dm_space_map_no_const;
44429
44430 /*----------------------------------------------------------------*/
44431
44432diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44433index 2f2f38f..f6a8ebe 100644
44434--- a/drivers/md/raid1.c
44435+++ b/drivers/md/raid1.c
44436@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44437 if (r1_sync_page_io(rdev, sect, s,
44438 bio->bi_io_vec[idx].bv_page,
44439 READ) != 0)
44440- atomic_add(s, &rdev->corrected_errors);
44441+ atomic_add_unchecked(s, &rdev->corrected_errors);
44442 }
44443 sectors -= s;
44444 sect += s;
44445@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44446 !test_bit(Faulty, &rdev->flags)) {
44447 if (r1_sync_page_io(rdev, sect, s,
44448 conf->tmppage, READ)) {
44449- atomic_add(s, &rdev->corrected_errors);
44450+ atomic_add_unchecked(s, &rdev->corrected_errors);
44451 printk(KERN_INFO
44452 "md/raid1:%s: read error corrected "
44453 "(%d sectors at %llu on %s)\n",
44454diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44455index 32e282f..5cec803 100644
44456--- a/drivers/md/raid10.c
44457+++ b/drivers/md/raid10.c
44458@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
44459 /* The write handler will notice the lack of
44460 * R10BIO_Uptodate and record any errors etc
44461 */
44462- atomic_add(r10_bio->sectors,
44463+ atomic_add_unchecked(r10_bio->sectors,
44464 &conf->mirrors[d].rdev->corrected_errors);
44465
44466 /* for reconstruct, we always reschedule after a read.
44467@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44468 {
44469 struct timespec cur_time_mon;
44470 unsigned long hours_since_last;
44471- unsigned int read_errors = atomic_read(&rdev->read_errors);
44472+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44473
44474 ktime_get_ts(&cur_time_mon);
44475
44476@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44477 * overflowing the shift of read_errors by hours_since_last.
44478 */
44479 if (hours_since_last >= 8 * sizeof(read_errors))
44480- atomic_set(&rdev->read_errors, 0);
44481+ atomic_set_unchecked(&rdev->read_errors, 0);
44482 else
44483- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44484+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44485 }
44486
44487 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44488@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44489 return;
44490
44491 check_decay_read_errors(mddev, rdev);
44492- atomic_inc(&rdev->read_errors);
44493- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44494+ atomic_inc_unchecked(&rdev->read_errors);
44495+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44496 char b[BDEVNAME_SIZE];
44497 bdevname(rdev->bdev, b);
44498
44499@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44500 "md/raid10:%s: %s: Raid device exceeded "
44501 "read_error threshold [cur %d:max %d]\n",
44502 mdname(mddev), b,
44503- atomic_read(&rdev->read_errors), max_read_errors);
44504+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44505 printk(KERN_NOTICE
44506 "md/raid10:%s: %s: Failing raid device\n",
44507 mdname(mddev), b);
44508@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44509 sect +
44510 choose_data_offset(r10_bio, rdev)),
44511 bdevname(rdev->bdev, b));
44512- atomic_add(s, &rdev->corrected_errors);
44513+ atomic_add_unchecked(s, &rdev->corrected_errors);
44514 }
44515
44516 rdev_dec_pending(rdev, mddev);
44517diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44518index 8577cc7..e80e05d 100644
44519--- a/drivers/md/raid5.c
44520+++ b/drivers/md/raid5.c
44521@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44522 return 1;
44523 }
44524
44525+#ifdef CONFIG_GRKERNSEC_HIDESYM
44526+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44527+#endif
44528+
44529 static int grow_stripes(struct r5conf *conf, int num)
44530 {
44531 struct kmem_cache *sc;
44532@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44533 "raid%d-%s", conf->level, mdname(conf->mddev));
44534 else
44535 sprintf(conf->cache_name[0],
44536+#ifdef CONFIG_GRKERNSEC_HIDESYM
44537+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44538+#else
44539 "raid%d-%p", conf->level, conf->mddev);
44540+#endif
44541 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44542
44543 conf->active_name = 0;
44544@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44545 mdname(conf->mddev), STRIPE_SECTORS,
44546 (unsigned long long)s,
44547 bdevname(rdev->bdev, b));
44548- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44549+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44550 clear_bit(R5_ReadError, &sh->dev[i].flags);
44551 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44552 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44553 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44554
44555- if (atomic_read(&rdev->read_errors))
44556- atomic_set(&rdev->read_errors, 0);
44557+ if (atomic_read_unchecked(&rdev->read_errors))
44558+ atomic_set_unchecked(&rdev->read_errors, 0);
44559 } else {
44560 const char *bdn = bdevname(rdev->bdev, b);
44561 int retry = 0;
44562 int set_bad = 0;
44563
44564 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44565- atomic_inc(&rdev->read_errors);
44566+ atomic_inc_unchecked(&rdev->read_errors);
44567 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44568 printk_ratelimited(
44569 KERN_WARNING
44570@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44571 mdname(conf->mddev),
44572 (unsigned long long)s,
44573 bdn);
44574- } else if (atomic_read(&rdev->read_errors)
44575+ } else if (atomic_read_unchecked(&rdev->read_errors)
44576 > conf->max_nr_stripes)
44577 printk(KERN_WARNING
44578 "md/raid:%s: Too many read errors, failing device %s.\n",
44579diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44580index 983db75..ef9248c 100644
44581--- a/drivers/media/dvb-core/dvbdev.c
44582+++ b/drivers/media/dvb-core/dvbdev.c
44583@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44584 const struct dvb_device *template, void *priv, int type)
44585 {
44586 struct dvb_device *dvbdev;
44587- struct file_operations *dvbdevfops;
44588+ file_operations_no_const *dvbdevfops;
44589 struct device *clsdev;
44590 int minor;
44591 int id;
44592diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44593index 6ad22b6..6e90e2a 100644
44594--- a/drivers/media/dvb-frontends/af9033.h
44595+++ b/drivers/media/dvb-frontends/af9033.h
44596@@ -96,6 +96,6 @@ struct af9033_ops {
44597 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44598 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44599 int onoff);
44600-};
44601+} __no_const;
44602
44603 #endif /* AF9033_H */
44604diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44605index 9b6c3bb..baeb5c7 100644
44606--- a/drivers/media/dvb-frontends/dib3000.h
44607+++ b/drivers/media/dvb-frontends/dib3000.h
44608@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44609 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44610 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44611 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44612-};
44613+} __no_const;
44614
44615 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44616 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44617diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44618index 1fea0e9..321ce8f 100644
44619--- a/drivers/media/dvb-frontends/dib7000p.h
44620+++ b/drivers/media/dvb-frontends/dib7000p.h
44621@@ -64,7 +64,7 @@ struct dib7000p_ops {
44622 int (*get_adc_power)(struct dvb_frontend *fe);
44623 int (*slave_reset)(struct dvb_frontend *fe);
44624 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44625-};
44626+} __no_const;
44627
44628 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
44629 void *dib7000p_attach(struct dib7000p_ops *ops);
44630diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
44631index 84cc103..5780c54 100644
44632--- a/drivers/media/dvb-frontends/dib8000.h
44633+++ b/drivers/media/dvb-frontends/dib8000.h
44634@@ -61,7 +61,7 @@ struct dib8000_ops {
44635 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
44636 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
44637 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
44638-};
44639+} __no_const;
44640
44641 #if IS_ENABLED(CONFIG_DVB_DIB8000)
44642 void *dib8000_attach(struct dib8000_ops *ops);
44643diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44644index 860c98fc..497fa25 100644
44645--- a/drivers/media/pci/cx88/cx88-video.c
44646+++ b/drivers/media/pci/cx88/cx88-video.c
44647@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44648
44649 /* ------------------------------------------------------------------ */
44650
44651-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44652-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44653-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44654+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44655+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44656+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44657
44658 module_param_array(video_nr, int, NULL, 0444);
44659 module_param_array(vbi_nr, int, NULL, 0444);
44660diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44661index 802642d..5534900 100644
44662--- a/drivers/media/pci/ivtv/ivtv-driver.c
44663+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44664@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44665 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44666
44667 /* ivtv instance counter */
44668-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44669+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44670
44671 /* Parameter declarations */
44672 static int cardtype[IVTV_MAX_CARDS];
44673diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
44674index 8cbe6b4..ea3601c 100644
44675--- a/drivers/media/pci/solo6x10/solo6x10-core.c
44676+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
44677@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
44678
44679 static int solo_sysfs_init(struct solo_dev *solo_dev)
44680 {
44681- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
44682+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
44683 struct device *dev = &solo_dev->dev;
44684 const char *driver;
44685 int i;
44686diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
44687index c7141f2..5301fec 100644
44688--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
44689+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
44690@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
44691
44692 int solo_g723_init(struct solo_dev *solo_dev)
44693 {
44694- static struct snd_device_ops ops = { NULL };
44695+ static struct snd_device_ops ops = { };
44696 struct snd_card *card;
44697 struct snd_kcontrol_new kctl;
44698 char name[32];
44699diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44700index 8c84846..27b4f83 100644
44701--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
44702+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44703@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
44704
44705 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
44706 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
44707- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
44708+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
44709 if (p2m_id < 0)
44710 p2m_id = -p2m_id;
44711 }
44712diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
44713index bd8edfa..e82ed85 100644
44714--- a/drivers/media/pci/solo6x10/solo6x10.h
44715+++ b/drivers/media/pci/solo6x10/solo6x10.h
44716@@ -220,7 +220,7 @@ struct solo_dev {
44717
44718 /* P2M DMA Engine */
44719 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
44720- atomic_t p2m_count;
44721+ atomic_unchecked_t p2m_count;
44722 int p2m_jiffies;
44723 unsigned int p2m_timeouts;
44724
44725diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
44726index c135165..dc69499 100644
44727--- a/drivers/media/pci/tw68/tw68-core.c
44728+++ b/drivers/media/pci/tw68/tw68-core.c
44729@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
44730 module_param_array(card, int, NULL, 0444);
44731 MODULE_PARM_DESC(card, "card type");
44732
44733-static atomic_t tw68_instance = ATOMIC_INIT(0);
44734+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
44735
44736 /* ------------------------------------------------------------------ */
44737
44738diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44739index ba2d8f9..1566684 100644
44740--- a/drivers/media/platform/omap/omap_vout.c
44741+++ b/drivers/media/platform/omap/omap_vout.c
44742@@ -63,7 +63,6 @@ enum omap_vout_channels {
44743 OMAP_VIDEO2,
44744 };
44745
44746-static struct videobuf_queue_ops video_vbq_ops;
44747 /* Variables configurable through module params*/
44748 static u32 video1_numbuffers = 3;
44749 static u32 video2_numbuffers = 3;
44750@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
44751 {
44752 struct videobuf_queue *q;
44753 struct omap_vout_device *vout = NULL;
44754+ static struct videobuf_queue_ops video_vbq_ops = {
44755+ .buf_setup = omap_vout_buffer_setup,
44756+ .buf_prepare = omap_vout_buffer_prepare,
44757+ .buf_release = omap_vout_buffer_release,
44758+ .buf_queue = omap_vout_buffer_queue,
44759+ };
44760
44761 vout = video_drvdata(file);
44762 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44763@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
44764 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44765
44766 q = &vout->vbq;
44767- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44768- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44769- video_vbq_ops.buf_release = omap_vout_buffer_release;
44770- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44771 spin_lock_init(&vout->vbq_lock);
44772
44773 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44774diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44775index fb2acc5..a2fcbdc4 100644
44776--- a/drivers/media/platform/s5p-tv/mixer.h
44777+++ b/drivers/media/platform/s5p-tv/mixer.h
44778@@ -156,7 +156,7 @@ struct mxr_layer {
44779 /** layer index (unique identifier) */
44780 int idx;
44781 /** callbacks for layer methods */
44782- struct mxr_layer_ops ops;
44783+ struct mxr_layer_ops *ops;
44784 /** format array */
44785 const struct mxr_format **fmt_array;
44786 /** size of format array */
44787diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44788index 74344c7..a39e70e 100644
44789--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44790+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44791@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44792 {
44793 struct mxr_layer *layer;
44794 int ret;
44795- struct mxr_layer_ops ops = {
44796+ static struct mxr_layer_ops ops = {
44797 .release = mxr_graph_layer_release,
44798 .buffer_set = mxr_graph_buffer_set,
44799 .stream_set = mxr_graph_stream_set,
44800diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44801index b713403..53cb5ad 100644
44802--- a/drivers/media/platform/s5p-tv/mixer_reg.c
44803+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44804@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44805 layer->update_buf = next;
44806 }
44807
44808- layer->ops.buffer_set(layer, layer->update_buf);
44809+ layer->ops->buffer_set(layer, layer->update_buf);
44810
44811 if (done && done != layer->shadow_buf)
44812 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
44813diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
44814index b4d2696..91df48e 100644
44815--- a/drivers/media/platform/s5p-tv/mixer_video.c
44816+++ b/drivers/media/platform/s5p-tv/mixer_video.c
44817@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
44818 layer->geo.src.height = layer->geo.src.full_height;
44819
44820 mxr_geometry_dump(mdev, &layer->geo);
44821- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44822+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44823 mxr_geometry_dump(mdev, &layer->geo);
44824 }
44825
44826@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
44827 layer->geo.dst.full_width = mbus_fmt.width;
44828 layer->geo.dst.full_height = mbus_fmt.height;
44829 layer->geo.dst.field = mbus_fmt.field;
44830- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44831+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44832
44833 mxr_geometry_dump(mdev, &layer->geo);
44834 }
44835@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
44836 /* set source size to highest accepted value */
44837 geo->src.full_width = max(geo->dst.full_width, pix->width);
44838 geo->src.full_height = max(geo->dst.full_height, pix->height);
44839- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44840+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44841 mxr_geometry_dump(mdev, &layer->geo);
44842 /* set cropping to total visible screen */
44843 geo->src.width = pix->width;
44844@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
44845 geo->src.x_offset = 0;
44846 geo->src.y_offset = 0;
44847 /* assure consistency of geometry */
44848- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44849+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44850 mxr_geometry_dump(mdev, &layer->geo);
44851 /* set full size to lowest possible value */
44852 geo->src.full_width = 0;
44853 geo->src.full_height = 0;
44854- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44855+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44856 mxr_geometry_dump(mdev, &layer->geo);
44857
44858 /* returning results */
44859@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
44860 target->width = s->r.width;
44861 target->height = s->r.height;
44862
44863- layer->ops.fix_geometry(layer, stage, s->flags);
44864+ layer->ops->fix_geometry(layer, stage, s->flags);
44865
44866 /* retrieve update selection rectangle */
44867 res.left = target->x_offset;
44868@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
44869 mxr_output_get(mdev);
44870
44871 mxr_layer_update_output(layer);
44872- layer->ops.format_set(layer);
44873+ layer->ops->format_set(layer);
44874 /* enabling layer in hardware */
44875 spin_lock_irqsave(&layer->enq_slock, flags);
44876 layer->state = MXR_LAYER_STREAMING;
44877 spin_unlock_irqrestore(&layer->enq_slock, flags);
44878
44879- layer->ops.stream_set(layer, MXR_ENABLE);
44880+ layer->ops->stream_set(layer, MXR_ENABLE);
44881 mxr_streamer_get(mdev);
44882
44883 return 0;
44884@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
44885 spin_unlock_irqrestore(&layer->enq_slock, flags);
44886
44887 /* disabling layer in hardware */
44888- layer->ops.stream_set(layer, MXR_DISABLE);
44889+ layer->ops->stream_set(layer, MXR_DISABLE);
44890 /* remove one streamer */
44891 mxr_streamer_put(mdev);
44892 /* allow changes in output configuration */
44893@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
44894
44895 void mxr_layer_release(struct mxr_layer *layer)
44896 {
44897- if (layer->ops.release)
44898- layer->ops.release(layer);
44899+ if (layer->ops->release)
44900+ layer->ops->release(layer);
44901 }
44902
44903 void mxr_base_layer_release(struct mxr_layer *layer)
44904@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
44905
44906 layer->mdev = mdev;
44907 layer->idx = idx;
44908- layer->ops = *ops;
44909+ layer->ops = ops;
44910
44911 spin_lock_init(&layer->enq_slock);
44912 INIT_LIST_HEAD(&layer->enq_list);
44913diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44914index c9388c4..ce71ece 100644
44915--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44916+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44917@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
44918 {
44919 struct mxr_layer *layer;
44920 int ret;
44921- struct mxr_layer_ops ops = {
44922+ static struct mxr_layer_ops ops = {
44923 .release = mxr_vp_layer_release,
44924 .buffer_set = mxr_vp_buffer_set,
44925 .stream_set = mxr_vp_stream_set,
44926diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
44927index 82affae..42833ec 100644
44928--- a/drivers/media/radio/radio-cadet.c
44929+++ b/drivers/media/radio/radio-cadet.c
44930@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
44931 unsigned char readbuf[RDS_BUFFER];
44932 int i = 0;
44933
44934+ if (count > RDS_BUFFER)
44935+ return -EFAULT;
44936 mutex_lock(&dev->lock);
44937 if (dev->rdsstat == 0)
44938 cadet_start_rds(dev);
44939@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
44940 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
44941 mutex_unlock(&dev->lock);
44942
44943- if (i && copy_to_user(data, readbuf, i))
44944- return -EFAULT;
44945+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
44946+ i = -EFAULT;
44947+
44948 return i;
44949 }
44950
44951diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
44952index 5236035..c622c74 100644
44953--- a/drivers/media/radio/radio-maxiradio.c
44954+++ b/drivers/media/radio/radio-maxiradio.c
44955@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
44956 /* TEA5757 pin mappings */
44957 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
44958
44959-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
44960+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
44961
44962 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
44963 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
44964diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
44965index 050b3bb..79f62b9 100644
44966--- a/drivers/media/radio/radio-shark.c
44967+++ b/drivers/media/radio/radio-shark.c
44968@@ -79,7 +79,7 @@ struct shark_device {
44969 u32 last_val;
44970 };
44971
44972-static atomic_t shark_instance = ATOMIC_INIT(0);
44973+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
44974
44975 static void shark_write_val(struct snd_tea575x *tea, u32 val)
44976 {
44977diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
44978index 8654e0d..0608a64 100644
44979--- a/drivers/media/radio/radio-shark2.c
44980+++ b/drivers/media/radio/radio-shark2.c
44981@@ -74,7 +74,7 @@ struct shark_device {
44982 u8 *transfer_buffer;
44983 };
44984
44985-static atomic_t shark_instance = ATOMIC_INIT(0);
44986+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
44987
44988 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
44989 {
44990diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
44991index dccf586..d5db411 100644
44992--- a/drivers/media/radio/radio-si476x.c
44993+++ b/drivers/media/radio/radio-si476x.c
44994@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
44995 struct si476x_radio *radio;
44996 struct v4l2_ctrl *ctrl;
44997
44998- static atomic_t instance = ATOMIC_INIT(0);
44999+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45000
45001 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45002 if (!radio)
45003diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45004index 704397f..4d05977 100644
45005--- a/drivers/media/radio/wl128x/fmdrv_common.c
45006+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45007@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45008 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45009
45010 /* Radio Nr */
45011-static u32 radio_nr = -1;
45012+static int radio_nr = -1;
45013 module_param(radio_nr, int, 0444);
45014 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45015
45016diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45017index 9fd1527..8927230 100644
45018--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45019+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45020@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45021
45022 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45023 {
45024- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45025- char result[64];
45026- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45027- sizeof(result), 0);
45028+ char *buf;
45029+ char *result;
45030+ int retval;
45031+
45032+ buf = kmalloc(2, GFP_KERNEL);
45033+ if (buf == NULL)
45034+ return -ENOMEM;
45035+ result = kmalloc(64, GFP_KERNEL);
45036+ if (result == NULL) {
45037+ kfree(buf);
45038+ return -ENOMEM;
45039+ }
45040+
45041+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45042+ buf[1] = enable ? 1 : 0;
45043+
45044+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45045+
45046+ kfree(buf);
45047+ kfree(result);
45048+ return retval;
45049 }
45050
45051 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45052 {
45053- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45054- char state[3];
45055- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45056+ char *buf;
45057+ char *state;
45058+ int retval;
45059+
45060+ buf = kmalloc(2, GFP_KERNEL);
45061+ if (buf == NULL)
45062+ return -ENOMEM;
45063+ state = kmalloc(3, GFP_KERNEL);
45064+ if (state == NULL) {
45065+ kfree(buf);
45066+ return -ENOMEM;
45067+ }
45068+
45069+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45070+ buf[1] = enable ? 1 : 0;
45071+
45072+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45073+
45074+ kfree(buf);
45075+ kfree(state);
45076+ return retval;
45077 }
45078
45079 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45080 {
45081- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45082- char state[3];
45083+ char *query;
45084+ char *state;
45085 int ret;
45086+ query = kmalloc(1, GFP_KERNEL);
45087+ if (query == NULL)
45088+ return -ENOMEM;
45089+ state = kmalloc(3, GFP_KERNEL);
45090+ if (state == NULL) {
45091+ kfree(query);
45092+ return -ENOMEM;
45093+ }
45094+
45095+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45096
45097 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45098
45099- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45100- sizeof(state), 0);
45101+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45102 if (ret < 0) {
45103 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45104 "state info\n");
45105@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45106
45107 /* Copy this pointer as we are gonna need it in the release phase */
45108 cinergyt2_usb_device = adap->dev;
45109-
45110+ kfree(query);
45111+ kfree(state);
45112 return 0;
45113 }
45114
45115@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45116 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45117 {
45118 struct cinergyt2_state *st = d->priv;
45119- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45120+ u8 *key, *cmd;
45121 int i;
45122
45123+ cmd = kmalloc(1, GFP_KERNEL);
45124+ if (cmd == NULL)
45125+ return -EINVAL;
45126+ key = kzalloc(5, GFP_KERNEL);
45127+ if (key == NULL) {
45128+ kfree(cmd);
45129+ return -EINVAL;
45130+ }
45131+
45132+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45133+
45134 *state = REMOTE_NO_KEY_PRESSED;
45135
45136- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45137+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45138 if (key[4] == 0xff) {
45139 /* key repeat */
45140 st->rc_counter++;
45141@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45142 *event = d->last_event;
45143 deb_rc("repeat key, event %x\n",
45144 *event);
45145- return 0;
45146+ goto out;
45147 }
45148 }
45149 deb_rc("repeated key (non repeatable)\n");
45150 }
45151- return 0;
45152+ goto out;
45153 }
45154
45155 /* hack to pass checksum on the custom field */
45156@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45157
45158 deb_rc("key: %*ph\n", 5, key);
45159 }
45160+out:
45161+ kfree(cmd);
45162+ kfree(key);
45163 return 0;
45164 }
45165
45166diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45167index c890fe4..f9b2ae6 100644
45168--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45169+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45170@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45171 fe_status_t *status)
45172 {
45173 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45174- struct dvbt_get_status_msg result;
45175- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45176+ struct dvbt_get_status_msg *result;
45177+ u8 *cmd;
45178 int ret;
45179
45180- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45181- sizeof(result), 0);
45182+ cmd = kmalloc(1, GFP_KERNEL);
45183+ if (cmd == NULL)
45184+ return -ENOMEM;
45185+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45186+ if (result == NULL) {
45187+ kfree(cmd);
45188+ return -ENOMEM;
45189+ }
45190+
45191+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45192+
45193+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45194+ sizeof(*result), 0);
45195 if (ret < 0)
45196- return ret;
45197+ goto out;
45198
45199 *status = 0;
45200
45201- if (0xffff - le16_to_cpu(result.gain) > 30)
45202+ if (0xffff - le16_to_cpu(result->gain) > 30)
45203 *status |= FE_HAS_SIGNAL;
45204- if (result.lock_bits & (1 << 6))
45205+ if (result->lock_bits & (1 << 6))
45206 *status |= FE_HAS_LOCK;
45207- if (result.lock_bits & (1 << 5))
45208+ if (result->lock_bits & (1 << 5))
45209 *status |= FE_HAS_SYNC;
45210- if (result.lock_bits & (1 << 4))
45211+ if (result->lock_bits & (1 << 4))
45212 *status |= FE_HAS_CARRIER;
45213- if (result.lock_bits & (1 << 1))
45214+ if (result->lock_bits & (1 << 1))
45215 *status |= FE_HAS_VITERBI;
45216
45217 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45218 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45219 *status &= ~FE_HAS_LOCK;
45220
45221- return 0;
45222+out:
45223+ kfree(cmd);
45224+ kfree(result);
45225+ return ret;
45226 }
45227
45228 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45229 {
45230 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45231- struct dvbt_get_status_msg status;
45232- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45233+ struct dvbt_get_status_msg *status;
45234+ char *cmd;
45235 int ret;
45236
45237- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45238- sizeof(status), 0);
45239+ cmd = kmalloc(1, GFP_KERNEL);
45240+ if (cmd == NULL)
45241+ return -ENOMEM;
45242+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45243+ if (status == NULL) {
45244+ kfree(cmd);
45245+ return -ENOMEM;
45246+ }
45247+
45248+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45249+
45250+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45251+ sizeof(*status), 0);
45252 if (ret < 0)
45253- return ret;
45254+ goto out;
45255
45256- *ber = le32_to_cpu(status.viterbi_error_rate);
45257+ *ber = le32_to_cpu(status->viterbi_error_rate);
45258+out:
45259+ kfree(cmd);
45260+ kfree(status);
45261 return 0;
45262 }
45263
45264 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45265 {
45266 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45267- struct dvbt_get_status_msg status;
45268- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45269+ struct dvbt_get_status_msg *status;
45270+ u8 *cmd;
45271 int ret;
45272
45273- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45274- sizeof(status), 0);
45275+ cmd = kmalloc(1, GFP_KERNEL);
45276+ if (cmd == NULL)
45277+ return -ENOMEM;
45278+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45279+ if (status == NULL) {
45280+ kfree(cmd);
45281+ return -ENOMEM;
45282+ }
45283+
45284+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45285+
45286+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45287+ sizeof(*status), 0);
45288 if (ret < 0) {
45289 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45290 ret);
45291- return ret;
45292+ goto out;
45293 }
45294- *unc = le32_to_cpu(status.uncorrected_block_count);
45295- return 0;
45296+ *unc = le32_to_cpu(status->uncorrected_block_count);
45297+
45298+out:
45299+ kfree(cmd);
45300+ kfree(status);
45301+ return ret;
45302 }
45303
45304 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45305 u16 *strength)
45306 {
45307 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45308- struct dvbt_get_status_msg status;
45309- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45310+ struct dvbt_get_status_msg *status;
45311+ char *cmd;
45312 int ret;
45313
45314- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45315- sizeof(status), 0);
45316+ cmd = kmalloc(1, GFP_KERNEL);
45317+ if (cmd == NULL)
45318+ return -ENOMEM;
45319+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45320+ if (status == NULL) {
45321+ kfree(cmd);
45322+ return -ENOMEM;
45323+ }
45324+
45325+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45326+
45327+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45328+ sizeof(*status), 0);
45329 if (ret < 0) {
45330 err("cinergyt2_fe_read_signal_strength() Failed!"
45331 " (Error=%d)\n", ret);
45332- return ret;
45333+ goto out;
45334 }
45335- *strength = (0xffff - le16_to_cpu(status.gain));
45336+ *strength = (0xffff - le16_to_cpu(status->gain));
45337+
45338+out:
45339+ kfree(cmd);
45340+ kfree(status);
45341 return 0;
45342 }
45343
45344 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45345 {
45346 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45347- struct dvbt_get_status_msg status;
45348- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45349+ struct dvbt_get_status_msg *status;
45350+ char *cmd;
45351 int ret;
45352
45353- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45354- sizeof(status), 0);
45355+ cmd = kmalloc(1, GFP_KERNEL);
45356+ if (cmd == NULL)
45357+ return -ENOMEM;
45358+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45359+ if (status == NULL) {
45360+ kfree(cmd);
45361+ return -ENOMEM;
45362+ }
45363+
45364+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45365+
45366+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45367+ sizeof(*status), 0);
45368 if (ret < 0) {
45369 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45370- return ret;
45371+ goto out;
45372 }
45373- *snr = (status.snr << 8) | status.snr;
45374- return 0;
45375+ *snr = (status->snr << 8) | status->snr;
45376+
45377+out:
45378+ kfree(cmd);
45379+ kfree(status);
45380+ return ret;
45381 }
45382
45383 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45384@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45385 {
45386 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45387 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45388- struct dvbt_set_parameters_msg param;
45389- char result[2];
45390+ struct dvbt_set_parameters_msg *param;
45391+ char *result;
45392 int err;
45393
45394- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45395- param.tps = cpu_to_le16(compute_tps(fep));
45396- param.freq = cpu_to_le32(fep->frequency / 1000);
45397- param.flags = 0;
45398+ result = kmalloc(2, GFP_KERNEL);
45399+ if (result == NULL)
45400+ return -ENOMEM;
45401+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45402+ if (param == NULL) {
45403+ kfree(result);
45404+ return -ENOMEM;
45405+ }
45406+
45407+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45408+ param->tps = cpu_to_le16(compute_tps(fep));
45409+ param->freq = cpu_to_le32(fep->frequency / 1000);
45410+ param->flags = 0;
45411
45412 switch (fep->bandwidth_hz) {
45413 default:
45414 case 8000000:
45415- param.bandwidth = 8;
45416+ param->bandwidth = 8;
45417 break;
45418 case 7000000:
45419- param.bandwidth = 7;
45420+ param->bandwidth = 7;
45421 break;
45422 case 6000000:
45423- param.bandwidth = 6;
45424+ param->bandwidth = 6;
45425 break;
45426 }
45427
45428 err = dvb_usb_generic_rw(state->d,
45429- (char *)&param, sizeof(param),
45430- result, sizeof(result), 0);
45431+ (char *)param, sizeof(*param),
45432+ result, 2, 0);
45433 if (err < 0)
45434 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45435
45436- return (err < 0) ? err : 0;
45437+ kfree(result);
45438+ kfree(param);
45439+ return err;
45440 }
45441
45442 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45443diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45444index 733a7ff..f8b52e3 100644
45445--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45446+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45447@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45448
45449 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45450 {
45451- struct hexline hx;
45452- u8 reset;
45453+ struct hexline *hx;
45454+ u8 *reset;
45455 int ret,pos=0;
45456
45457+ reset = kmalloc(1, GFP_KERNEL);
45458+ if (reset == NULL)
45459+ return -ENOMEM;
45460+
45461+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45462+ if (hx == NULL) {
45463+ kfree(reset);
45464+ return -ENOMEM;
45465+ }
45466+
45467 /* stop the CPU */
45468- reset = 1;
45469- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45470+ reset[0] = 1;
45471+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45472 err("could not stop the USB controller CPU.");
45473
45474- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45475- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45476- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45477+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45478+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45479+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45480
45481- if (ret != hx.len) {
45482+ if (ret != hx->len) {
45483 err("error while transferring firmware "
45484 "(transferred size: %d, block size: %d)",
45485- ret,hx.len);
45486+ ret,hx->len);
45487 ret = -EINVAL;
45488 break;
45489 }
45490 }
45491 if (ret < 0) {
45492 err("firmware download failed at %d with %d",pos,ret);
45493+ kfree(reset);
45494+ kfree(hx);
45495 return ret;
45496 }
45497
45498 if (ret == 0) {
45499 /* restart the CPU */
45500- reset = 0;
45501- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45502+ reset[0] = 0;
45503+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45504 err("could not restart the USB controller CPU.");
45505 ret = -EINVAL;
45506 }
45507 } else
45508 ret = -EIO;
45509
45510+ kfree(reset);
45511+ kfree(hx);
45512+
45513 return ret;
45514 }
45515 EXPORT_SYMBOL(usb_cypress_load_firmware);
45516diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45517index 1a3df10..57997a5 100644
45518--- a/drivers/media/usb/dvb-usb/dw2102.c
45519+++ b/drivers/media/usb/dvb-usb/dw2102.c
45520@@ -118,7 +118,7 @@ struct su3000_state {
45521
45522 struct s6x0_state {
45523 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45524-};
45525+} __no_const;
45526
45527 /* debug */
45528 static int dvb_usb_dw2102_debug;
45529diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45530index 5801ae7..83f71fa 100644
45531--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45532+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45533@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45534 static int technisat_usb2_i2c_access(struct usb_device *udev,
45535 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45536 {
45537- u8 b[64];
45538- int ret, actual_length;
45539+ u8 *b = kmalloc(64, GFP_KERNEL);
45540+ int ret, actual_length, error = 0;
45541+
45542+ if (b == NULL)
45543+ return -ENOMEM;
45544
45545 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45546 debug_dump(tx, txlen, deb_i2c);
45547@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45548
45549 if (ret < 0) {
45550 err("i2c-error: out failed %02x = %d", device_addr, ret);
45551- return -ENODEV;
45552+ error = -ENODEV;
45553+ goto out;
45554 }
45555
45556 ret = usb_bulk_msg(udev,
45557@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45558 b, 64, &actual_length, 1000);
45559 if (ret < 0) {
45560 err("i2c-error: in failed %02x = %d", device_addr, ret);
45561- return -ENODEV;
45562+ error = -ENODEV;
45563+ goto out;
45564 }
45565
45566 if (b[0] != I2C_STATUS_OK) {
45567@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45568 /* handle tuner-i2c-nak */
45569 if (!(b[0] == I2C_STATUS_NAK &&
45570 device_addr == 0x60
45571- /* && device_is_technisat_usb2 */))
45572- return -ENODEV;
45573+ /* && device_is_technisat_usb2 */)) {
45574+ error = -ENODEV;
45575+ goto out;
45576+ }
45577 }
45578
45579 deb_i2c("status: %d, ", b[0]);
45580@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45581
45582 deb_i2c("\n");
45583
45584- return 0;
45585+out:
45586+ kfree(b);
45587+ return error;
45588 }
45589
45590 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45591@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45592 {
45593 int ret;
45594
45595- u8 led[8] = {
45596- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45597- 0
45598- };
45599+ u8 *led = kzalloc(8, GFP_KERNEL);
45600+
45601+ if (led == NULL)
45602+ return -ENOMEM;
45603
45604 if (disable_led_control && state != TECH_LED_OFF)
45605 return 0;
45606
45607+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45608+
45609 switch (state) {
45610 case TECH_LED_ON:
45611 led[1] = 0x82;
45612@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45613 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45614 USB_TYPE_VENDOR | USB_DIR_OUT,
45615 0, 0,
45616- led, sizeof(led), 500);
45617+ led, 8, 500);
45618
45619 mutex_unlock(&d->i2c_mutex);
45620+
45621+ kfree(led);
45622+
45623 return ret;
45624 }
45625
45626 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
45627 {
45628 int ret;
45629- u8 b = 0;
45630+ u8 *b = kzalloc(1, GFP_KERNEL);
45631+
45632+ if (b == NULL)
45633+ return -ENOMEM;
45634
45635 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
45636 return -EAGAIN;
45637@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
45638 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
45639 USB_TYPE_VENDOR | USB_DIR_OUT,
45640 (red << 8) | green, 0,
45641- &b, 1, 500);
45642+ b, 1, 500);
45643
45644 mutex_unlock(&d->i2c_mutex);
45645
45646+ kfree(b);
45647+
45648 return ret;
45649 }
45650
45651@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45652 struct dvb_usb_device_description **desc, int *cold)
45653 {
45654 int ret;
45655- u8 version[3];
45656+ u8 *version = kmalloc(3, GFP_KERNEL);
45657
45658 /* first select the interface */
45659 if (usb_set_interface(udev, 0, 1) != 0)
45660@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45661
45662 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
45663
45664+ if (version == NULL)
45665+ return 0;
45666+
45667 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
45668 GET_VERSION_INFO_VENDOR_REQUEST,
45669 USB_TYPE_VENDOR | USB_DIR_IN,
45670 0, 0,
45671- version, sizeof(version), 500);
45672+ version, 3, 500);
45673
45674 if (ret < 0)
45675 *cold = 1;
45676@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45677 *cold = 0;
45678 }
45679
45680+ kfree(version);
45681+
45682 return 0;
45683 }
45684
45685@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
45686
45687 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45688 {
45689- u8 buf[62], *b;
45690+ u8 *buf, *b;
45691 int ret;
45692 struct ir_raw_event ev;
45693
45694+ buf = kmalloc(62, GFP_KERNEL);
45695+
45696+ if (buf == NULL)
45697+ return -ENOMEM;
45698+
45699 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
45700 buf[1] = 0x08;
45701 buf[2] = 0x8f;
45702@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45703 GET_IR_DATA_VENDOR_REQUEST,
45704 USB_TYPE_VENDOR | USB_DIR_IN,
45705 0x8080, 0,
45706- buf, sizeof(buf), 500);
45707+ buf, 62, 500);
45708
45709 unlock:
45710 mutex_unlock(&d->i2c_mutex);
45711
45712- if (ret < 0)
45713+ if (ret < 0) {
45714+ kfree(buf);
45715 return ret;
45716+ }
45717
45718- if (ret == 1)
45719+ if (ret == 1) {
45720+ kfree(buf);
45721 return 0; /* no key pressed */
45722+ }
45723
45724 /* decoding */
45725 b = buf+1;
45726@@ -656,6 +689,8 @@ unlock:
45727
45728 ir_raw_event_handle(d->rc_dev);
45729
45730+ kfree(buf);
45731+
45732 return 1;
45733 }
45734
45735diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45736index af63543..0436f20 100644
45737--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45738+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45739@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45740 * by passing a very big num_planes value */
45741 uplane = compat_alloc_user_space(num_planes *
45742 sizeof(struct v4l2_plane));
45743- kp->m.planes = (__force struct v4l2_plane *)uplane;
45744+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
45745
45746 while (--num_planes >= 0) {
45747 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
45748@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45749 if (num_planes == 0)
45750 return 0;
45751
45752- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
45753+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
45754 if (get_user(p, &up->m.planes))
45755 return -EFAULT;
45756 uplane32 = compat_ptr(p);
45757@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
45758 get_user(kp->flags, &up->flags) ||
45759 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
45760 return -EFAULT;
45761- kp->base = (__force void *)compat_ptr(tmp);
45762+ kp->base = (__force_kernel void *)compat_ptr(tmp);
45763 return 0;
45764 }
45765
45766@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45767 n * sizeof(struct v4l2_ext_control32)))
45768 return -EFAULT;
45769 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
45770- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
45771+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
45772 while (--n >= 0) {
45773 u32 id;
45774
45775@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45776 {
45777 struct v4l2_ext_control32 __user *ucontrols;
45778 struct v4l2_ext_control __user *kcontrols =
45779- (__force struct v4l2_ext_control __user *)kp->controls;
45780+ (struct v4l2_ext_control __force_user *)kp->controls;
45781 int n = kp->count;
45782 compat_caddr_t p;
45783
45784@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
45785 get_user(tmp, &up->edid) ||
45786 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
45787 return -EFAULT;
45788- kp->edid = (__force u8 *)compat_ptr(tmp);
45789+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
45790 return 0;
45791 }
45792
45793diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
45794index 015f92a..59e311e 100644
45795--- a/drivers/media/v4l2-core/v4l2-device.c
45796+++ b/drivers/media/v4l2-core/v4l2-device.c
45797@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
45798 EXPORT_SYMBOL_GPL(v4l2_device_put);
45799
45800 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
45801- atomic_t *instance)
45802+ atomic_unchecked_t *instance)
45803 {
45804- int num = atomic_inc_return(instance) - 1;
45805+ int num = atomic_inc_return_unchecked(instance) - 1;
45806 int len = strlen(basename);
45807
45808 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
45809diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
45810index faac2f4..e39dcd9 100644
45811--- a/drivers/media/v4l2-core/v4l2-ioctl.c
45812+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
45813@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
45814 struct file *file, void *fh, void *p);
45815 } u;
45816 void (*debug)(const void *arg, bool write_only);
45817-};
45818+} __do_const;
45819+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
45820
45821 /* This control needs a priority check */
45822 #define INFO_FL_PRIO (1 << 0)
45823@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
45824 struct video_device *vfd = video_devdata(file);
45825 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
45826 bool write_only = false;
45827- struct v4l2_ioctl_info default_info;
45828+ v4l2_ioctl_info_no_const default_info;
45829 const struct v4l2_ioctl_info *info;
45830 void *fh = file->private_data;
45831 struct v4l2_fh *vfh = NULL;
45832@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45833 ret = -EINVAL;
45834 break;
45835 }
45836- *user_ptr = (void __user *)buf->m.planes;
45837+ *user_ptr = (void __force_user *)buf->m.planes;
45838 *kernel_ptr = (void **)&buf->m.planes;
45839 *array_size = sizeof(struct v4l2_plane) * buf->length;
45840 ret = 1;
45841@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45842 ret = -EINVAL;
45843 break;
45844 }
45845- *user_ptr = (void __user *)edid->edid;
45846+ *user_ptr = (void __force_user *)edid->edid;
45847 *kernel_ptr = (void **)&edid->edid;
45848 *array_size = edid->blocks * 128;
45849 ret = 1;
45850@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45851 ret = -EINVAL;
45852 break;
45853 }
45854- *user_ptr = (void __user *)ctrls->controls;
45855+ *user_ptr = (void __force_user *)ctrls->controls;
45856 *kernel_ptr = (void **)&ctrls->controls;
45857 *array_size = sizeof(struct v4l2_ext_control)
45858 * ctrls->count;
45859@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
45860 }
45861
45862 if (has_array_args) {
45863- *kernel_ptr = (void __force *)user_ptr;
45864+ *kernel_ptr = (void __force_kernel *)user_ptr;
45865 if (copy_to_user(user_ptr, mbuf, array_size))
45866 err = -EFAULT;
45867 goto out_array_args;
45868diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
45869index 24696f5..3637780 100644
45870--- a/drivers/memory/omap-gpmc.c
45871+++ b/drivers/memory/omap-gpmc.c
45872@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
45873 };
45874
45875 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
45876-static struct irq_chip gpmc_irq_chip;
45877 static int gpmc_irq_start;
45878
45879 static struct resource gpmc_mem_root;
45880@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
45881
45882 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
45883
45884+static struct irq_chip gpmc_irq_chip = {
45885+ .name = "gpmc",
45886+ .irq_startup = gpmc_irq_noop_ret,
45887+ .irq_enable = gpmc_irq_enable,
45888+ .irq_disable = gpmc_irq_disable,
45889+ .irq_shutdown = gpmc_irq_noop,
45890+ .irq_ack = gpmc_irq_noop,
45891+ .irq_mask = gpmc_irq_noop,
45892+ .irq_unmask = gpmc_irq_noop,
45893+};
45894+
45895 static int gpmc_setup_irq(void)
45896 {
45897 int i;
45898@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
45899 return gpmc_irq_start;
45900 }
45901
45902- gpmc_irq_chip.name = "gpmc";
45903- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
45904- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
45905- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
45906- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
45907- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
45908- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
45909- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
45910-
45911 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
45912 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
45913
45914diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
45915index 187f836..679544b 100644
45916--- a/drivers/message/fusion/mptbase.c
45917+++ b/drivers/message/fusion/mptbase.c
45918@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
45919 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
45920 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
45921
45922+#ifdef CONFIG_GRKERNSEC_HIDESYM
45923+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
45924+#else
45925 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
45926 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
45927+#endif
45928+
45929 /*
45930 * Rounding UP to nearest 4-kB boundary here...
45931 */
45932@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
45933 ioc->facts.GlobalCredits);
45934
45935 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
45936+#ifdef CONFIG_GRKERNSEC_HIDESYM
45937+ NULL, NULL);
45938+#else
45939 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
45940+#endif
45941 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
45942 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
45943 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
45944diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
45945index 5bdaae1..eced16f 100644
45946--- a/drivers/message/fusion/mptsas.c
45947+++ b/drivers/message/fusion/mptsas.c
45948@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
45949 return 0;
45950 }
45951
45952+static inline void
45953+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
45954+{
45955+ if (phy_info->port_details) {
45956+ phy_info->port_details->rphy = rphy;
45957+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
45958+ ioc->name, rphy));
45959+ }
45960+
45961+ if (rphy) {
45962+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
45963+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
45964+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
45965+ ioc->name, rphy, rphy->dev.release));
45966+ }
45967+}
45968+
45969 /* no mutex */
45970 static void
45971 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
45972@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
45973 return NULL;
45974 }
45975
45976-static inline void
45977-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
45978-{
45979- if (phy_info->port_details) {
45980- phy_info->port_details->rphy = rphy;
45981- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
45982- ioc->name, rphy));
45983- }
45984-
45985- if (rphy) {
45986- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
45987- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
45988- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
45989- ioc->name, rphy, rphy->dev.release));
45990- }
45991-}
45992-
45993 static inline struct sas_port *
45994 mptsas_get_port(struct mptsas_phyinfo *phy_info)
45995 {
45996diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
45997index b7d87cd..3fb36da 100644
45998--- a/drivers/message/i2o/i2o_proc.c
45999+++ b/drivers/message/i2o/i2o_proc.c
46000@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46001 "Array Controller Device"
46002 };
46003
46004-static char *chtostr(char *tmp, u8 *chars, int n)
46005-{
46006- tmp[0] = 0;
46007- return strncat(tmp, (char *)chars, n);
46008-}
46009-
46010 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46011 char *group)
46012 {
46013@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46014 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46015 {
46016 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46017- static u32 work32[5];
46018- static u8 *work8 = (u8 *) work32;
46019- static u16 *work16 = (u16 *) work32;
46020+ u32 work32[5];
46021+ u8 *work8 = (u8 *) work32;
46022+ u16 *work16 = (u16 *) work32;
46023 int token;
46024 u32 hwcap;
46025
46026@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46027 } *result;
46028
46029 i2o_exec_execute_ddm_table ddm_table;
46030- char tmp[28 + 1];
46031
46032 result = kmalloc(sizeof(*result), GFP_KERNEL);
46033 if (!result)
46034@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46035
46036 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46037 seq_printf(seq, "%-#8x", ddm_table.module_id);
46038- seq_printf(seq, "%-29s",
46039- chtostr(tmp, ddm_table.module_name_version, 28));
46040+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46041 seq_printf(seq, "%9d ", ddm_table.data_size);
46042 seq_printf(seq, "%8d", ddm_table.code_size);
46043
46044@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46045
46046 i2o_driver_result_table *result;
46047 i2o_driver_store_table *dst;
46048- char tmp[28 + 1];
46049
46050 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46051 if (result == NULL)
46052@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46053
46054 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46055 seq_printf(seq, "%-#8x", dst->module_id);
46056- seq_printf(seq, "%-29s",
46057- chtostr(tmp, dst->module_name_version, 28));
46058- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46059+ seq_printf(seq, "%-.28s", dst->module_name_version);
46060+ seq_printf(seq, "%-.8s", dst->date);
46061 seq_printf(seq, "%8d ", dst->module_size);
46062 seq_printf(seq, "%8d ", dst->mpb_size);
46063 seq_printf(seq, "0x%04x", dst->module_flags);
46064@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46065 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46066 {
46067 struct i2o_device *d = (struct i2o_device *)seq->private;
46068- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46069+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46070 // == (allow) 512d bytes (max)
46071- static u16 *work16 = (u16 *) work32;
46072+ u16 *work16 = (u16 *) work32;
46073 int token;
46074- char tmp[16 + 1];
46075
46076 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46077
46078@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46079 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46080 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46081 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46082- seq_printf(seq, "Vendor info : %s\n",
46083- chtostr(tmp, (u8 *) (work32 + 2), 16));
46084- seq_printf(seq, "Product info : %s\n",
46085- chtostr(tmp, (u8 *) (work32 + 6), 16));
46086- seq_printf(seq, "Description : %s\n",
46087- chtostr(tmp, (u8 *) (work32 + 10), 16));
46088- seq_printf(seq, "Product rev. : %s\n",
46089- chtostr(tmp, (u8 *) (work32 + 14), 8));
46090+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46091+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46092+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46093+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46094
46095 seq_printf(seq, "Serial number : ");
46096 print_serial_number(seq, (u8 *) (work32 + 16),
46097@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46098 u8 pad[256]; // allow up to 256 byte (max) serial number
46099 } result;
46100
46101- char tmp[24 + 1];
46102-
46103 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46104
46105 if (token < 0) {
46106@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46107 }
46108
46109 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46110- seq_printf(seq, "Module name : %s\n",
46111- chtostr(tmp, result.module_name, 24));
46112- seq_printf(seq, "Module revision : %s\n",
46113- chtostr(tmp, result.module_rev, 8));
46114+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46115+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46116
46117 seq_printf(seq, "Serial number : ");
46118 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46119@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46120 u8 instance_number[4];
46121 } result;
46122
46123- char tmp[64 + 1];
46124-
46125 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46126
46127 if (token < 0) {
46128@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46129 return 0;
46130 }
46131
46132- seq_printf(seq, "Device name : %s\n",
46133- chtostr(tmp, result.device_name, 64));
46134- seq_printf(seq, "Service name : %s\n",
46135- chtostr(tmp, result.service_name, 64));
46136- seq_printf(seq, "Physical name : %s\n",
46137- chtostr(tmp, result.physical_location, 64));
46138- seq_printf(seq, "Instance number : %s\n",
46139- chtostr(tmp, result.instance_number, 4));
46140+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46141+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46142+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46143+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46144
46145 return 0;
46146 }
46147@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46148 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46149 {
46150 struct i2o_device *d = (struct i2o_device *)seq->private;
46151- static u32 work32[12];
46152- static u16 *work16 = (u16 *) work32;
46153- static u8 *work8 = (u8 *) work32;
46154+ u32 work32[12];
46155+ u16 *work16 = (u16 *) work32;
46156+ u8 *work8 = (u8 *) work32;
46157 int token;
46158
46159 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46160diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46161index 92752fb..a7494f6 100644
46162--- a/drivers/message/i2o/iop.c
46163+++ b/drivers/message/i2o/iop.c
46164@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46165
46166 spin_lock_irqsave(&c->context_list_lock, flags);
46167
46168- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46169- atomic_inc(&c->context_list_counter);
46170+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46171+ atomic_inc_unchecked(&c->context_list_counter);
46172
46173- entry->context = atomic_read(&c->context_list_counter);
46174+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46175
46176 list_add(&entry->list, &c->context_list);
46177
46178@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46179
46180 #if BITS_PER_LONG == 64
46181 spin_lock_init(&c->context_list_lock);
46182- atomic_set(&c->context_list_counter, 0);
46183+ atomic_set_unchecked(&c->context_list_counter, 0);
46184 INIT_LIST_HEAD(&c->context_list);
46185 #endif
46186
46187diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46188index 9a8e185..27ff17d 100644
46189--- a/drivers/mfd/ab8500-debugfs.c
46190+++ b/drivers/mfd/ab8500-debugfs.c
46191@@ -100,7 +100,7 @@ static int irq_last;
46192 static u32 *irq_count;
46193 static int num_irqs;
46194
46195-static struct device_attribute **dev_attr;
46196+static device_attribute_no_const **dev_attr;
46197 static char **event_name;
46198
46199 static u8 avg_sample = SAMPLE_16;
46200diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46201index c880c89..45a7c68 100644
46202--- a/drivers/mfd/max8925-i2c.c
46203+++ b/drivers/mfd/max8925-i2c.c
46204@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46205 const struct i2c_device_id *id)
46206 {
46207 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46208- static struct max8925_chip *chip;
46209+ struct max8925_chip *chip;
46210 struct device_node *node = client->dev.of_node;
46211
46212 if (node && !pdata) {
46213diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46214index 7612d89..70549c2 100644
46215--- a/drivers/mfd/tps65910.c
46216+++ b/drivers/mfd/tps65910.c
46217@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46218 struct tps65910_platform_data *pdata)
46219 {
46220 int ret = 0;
46221- static struct regmap_irq_chip *tps6591x_irqs_chip;
46222+ struct regmap_irq_chip *tps6591x_irqs_chip;
46223
46224 if (!irq) {
46225 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46226diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46227index 1b772ef..01e77d33 100644
46228--- a/drivers/mfd/twl4030-irq.c
46229+++ b/drivers/mfd/twl4030-irq.c
46230@@ -34,6 +34,7 @@
46231 #include <linux/of.h>
46232 #include <linux/irqdomain.h>
46233 #include <linux/i2c/twl.h>
46234+#include <asm/pgtable.h>
46235
46236 #include "twl-core.h"
46237
46238@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46239 * Install an irq handler for each of the SIH modules;
46240 * clone dummy irq_chip since PIH can't *do* anything
46241 */
46242- twl4030_irq_chip = dummy_irq_chip;
46243- twl4030_irq_chip.name = "twl4030";
46244+ pax_open_kernel();
46245+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46246+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46247
46248- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46249+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46250+ pax_close_kernel();
46251
46252 for (i = irq_base; i < irq_end; i++) {
46253 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46254diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46255index 464419b..64bae8d 100644
46256--- a/drivers/misc/c2port/core.c
46257+++ b/drivers/misc/c2port/core.c
46258@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46259 goto error_idr_alloc;
46260 c2dev->id = ret;
46261
46262- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46263+ pax_open_kernel();
46264+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46265+ pax_close_kernel();
46266
46267 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46268 "c2port%d", c2dev->id);
46269diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46270index 8385177..2f54635 100644
46271--- a/drivers/misc/eeprom/sunxi_sid.c
46272+++ b/drivers/misc/eeprom/sunxi_sid.c
46273@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46274
46275 platform_set_drvdata(pdev, sid_data);
46276
46277- sid_bin_attr.size = sid_data->keysize;
46278+ pax_open_kernel();
46279+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46280+ pax_close_kernel();
46281 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46282 return -ENODEV;
46283
46284diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46285index 36f5d52..32311c3 100644
46286--- a/drivers/misc/kgdbts.c
46287+++ b/drivers/misc/kgdbts.c
46288@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46289 char before[BREAK_INSTR_SIZE];
46290 char after[BREAK_INSTR_SIZE];
46291
46292- probe_kernel_read(before, (char *)kgdbts_break_test,
46293+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46294 BREAK_INSTR_SIZE);
46295 init_simple_test();
46296 ts.tst = plant_and_detach_test;
46297@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46298 /* Activate test with initial breakpoint */
46299 if (!is_early)
46300 kgdb_breakpoint();
46301- probe_kernel_read(after, (char *)kgdbts_break_test,
46302+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46303 BREAK_INSTR_SIZE);
46304 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46305 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46306diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46307index 3ef4627..8d00486 100644
46308--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46309+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46310@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46311 * the lid is closed. This leads to interrupts as soon as a little move
46312 * is done.
46313 */
46314- atomic_inc(&lis3->count);
46315+ atomic_inc_unchecked(&lis3->count);
46316
46317 wake_up_interruptible(&lis3->misc_wait);
46318 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46319@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46320 if (lis3->pm_dev)
46321 pm_runtime_get_sync(lis3->pm_dev);
46322
46323- atomic_set(&lis3->count, 0);
46324+ atomic_set_unchecked(&lis3->count, 0);
46325 return 0;
46326 }
46327
46328@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46329 add_wait_queue(&lis3->misc_wait, &wait);
46330 while (true) {
46331 set_current_state(TASK_INTERRUPTIBLE);
46332- data = atomic_xchg(&lis3->count, 0);
46333+ data = atomic_xchg_unchecked(&lis3->count, 0);
46334 if (data)
46335 break;
46336
46337@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46338 struct lis3lv02d, miscdev);
46339
46340 poll_wait(file, &lis3->misc_wait, wait);
46341- if (atomic_read(&lis3->count))
46342+ if (atomic_read_unchecked(&lis3->count))
46343 return POLLIN | POLLRDNORM;
46344 return 0;
46345 }
46346diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46347index c439c82..1f20f57 100644
46348--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46349+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46350@@ -297,7 +297,7 @@ struct lis3lv02d {
46351 struct input_polled_dev *idev; /* input device */
46352 struct platform_device *pdev; /* platform device */
46353 struct regulator_bulk_data regulators[2];
46354- atomic_t count; /* interrupt count after last read */
46355+ atomic_unchecked_t count; /* interrupt count after last read */
46356 union axis_conversion ac; /* hw -> logical axis */
46357 int mapped_btns[3];
46358
46359diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46360index 2f30bad..c4c13d0 100644
46361--- a/drivers/misc/sgi-gru/gruhandles.c
46362+++ b/drivers/misc/sgi-gru/gruhandles.c
46363@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46364 unsigned long nsec;
46365
46366 nsec = CLKS2NSEC(clks);
46367- atomic_long_inc(&mcs_op_statistics[op].count);
46368- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46369+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46370+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46371 if (mcs_op_statistics[op].max < nsec)
46372 mcs_op_statistics[op].max = nsec;
46373 }
46374diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46375index 4f76359..cdfcb2e 100644
46376--- a/drivers/misc/sgi-gru/gruprocfs.c
46377+++ b/drivers/misc/sgi-gru/gruprocfs.c
46378@@ -32,9 +32,9 @@
46379
46380 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46381
46382-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46383+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46384 {
46385- unsigned long val = atomic_long_read(v);
46386+ unsigned long val = atomic_long_read_unchecked(v);
46387
46388 seq_printf(s, "%16lu %s\n", val, id);
46389 }
46390@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46391
46392 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46393 for (op = 0; op < mcsop_last; op++) {
46394- count = atomic_long_read(&mcs_op_statistics[op].count);
46395- total = atomic_long_read(&mcs_op_statistics[op].total);
46396+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46397+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46398 max = mcs_op_statistics[op].max;
46399 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46400 count ? total / count : 0, max);
46401diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46402index 5c3ce24..4915ccb 100644
46403--- a/drivers/misc/sgi-gru/grutables.h
46404+++ b/drivers/misc/sgi-gru/grutables.h
46405@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46406 * GRU statistics.
46407 */
46408 struct gru_stats_s {
46409- atomic_long_t vdata_alloc;
46410- atomic_long_t vdata_free;
46411- atomic_long_t gts_alloc;
46412- atomic_long_t gts_free;
46413- atomic_long_t gms_alloc;
46414- atomic_long_t gms_free;
46415- atomic_long_t gts_double_allocate;
46416- atomic_long_t assign_context;
46417- atomic_long_t assign_context_failed;
46418- atomic_long_t free_context;
46419- atomic_long_t load_user_context;
46420- atomic_long_t load_kernel_context;
46421- atomic_long_t lock_kernel_context;
46422- atomic_long_t unlock_kernel_context;
46423- atomic_long_t steal_user_context;
46424- atomic_long_t steal_kernel_context;
46425- atomic_long_t steal_context_failed;
46426- atomic_long_t nopfn;
46427- atomic_long_t asid_new;
46428- atomic_long_t asid_next;
46429- atomic_long_t asid_wrap;
46430- atomic_long_t asid_reuse;
46431- atomic_long_t intr;
46432- atomic_long_t intr_cbr;
46433- atomic_long_t intr_tfh;
46434- atomic_long_t intr_spurious;
46435- atomic_long_t intr_mm_lock_failed;
46436- atomic_long_t call_os;
46437- atomic_long_t call_os_wait_queue;
46438- atomic_long_t user_flush_tlb;
46439- atomic_long_t user_unload_context;
46440- atomic_long_t user_exception;
46441- atomic_long_t set_context_option;
46442- atomic_long_t check_context_retarget_intr;
46443- atomic_long_t check_context_unload;
46444- atomic_long_t tlb_dropin;
46445- atomic_long_t tlb_preload_page;
46446- atomic_long_t tlb_dropin_fail_no_asid;
46447- atomic_long_t tlb_dropin_fail_upm;
46448- atomic_long_t tlb_dropin_fail_invalid;
46449- atomic_long_t tlb_dropin_fail_range_active;
46450- atomic_long_t tlb_dropin_fail_idle;
46451- atomic_long_t tlb_dropin_fail_fmm;
46452- atomic_long_t tlb_dropin_fail_no_exception;
46453- atomic_long_t tfh_stale_on_fault;
46454- atomic_long_t mmu_invalidate_range;
46455- atomic_long_t mmu_invalidate_page;
46456- atomic_long_t flush_tlb;
46457- atomic_long_t flush_tlb_gru;
46458- atomic_long_t flush_tlb_gru_tgh;
46459- atomic_long_t flush_tlb_gru_zero_asid;
46460+ atomic_long_unchecked_t vdata_alloc;
46461+ atomic_long_unchecked_t vdata_free;
46462+ atomic_long_unchecked_t gts_alloc;
46463+ atomic_long_unchecked_t gts_free;
46464+ atomic_long_unchecked_t gms_alloc;
46465+ atomic_long_unchecked_t gms_free;
46466+ atomic_long_unchecked_t gts_double_allocate;
46467+ atomic_long_unchecked_t assign_context;
46468+ atomic_long_unchecked_t assign_context_failed;
46469+ atomic_long_unchecked_t free_context;
46470+ atomic_long_unchecked_t load_user_context;
46471+ atomic_long_unchecked_t load_kernel_context;
46472+ atomic_long_unchecked_t lock_kernel_context;
46473+ atomic_long_unchecked_t unlock_kernel_context;
46474+ atomic_long_unchecked_t steal_user_context;
46475+ atomic_long_unchecked_t steal_kernel_context;
46476+ atomic_long_unchecked_t steal_context_failed;
46477+ atomic_long_unchecked_t nopfn;
46478+ atomic_long_unchecked_t asid_new;
46479+ atomic_long_unchecked_t asid_next;
46480+ atomic_long_unchecked_t asid_wrap;
46481+ atomic_long_unchecked_t asid_reuse;
46482+ atomic_long_unchecked_t intr;
46483+ atomic_long_unchecked_t intr_cbr;
46484+ atomic_long_unchecked_t intr_tfh;
46485+ atomic_long_unchecked_t intr_spurious;
46486+ atomic_long_unchecked_t intr_mm_lock_failed;
46487+ atomic_long_unchecked_t call_os;
46488+ atomic_long_unchecked_t call_os_wait_queue;
46489+ atomic_long_unchecked_t user_flush_tlb;
46490+ atomic_long_unchecked_t user_unload_context;
46491+ atomic_long_unchecked_t user_exception;
46492+ atomic_long_unchecked_t set_context_option;
46493+ atomic_long_unchecked_t check_context_retarget_intr;
46494+ atomic_long_unchecked_t check_context_unload;
46495+ atomic_long_unchecked_t tlb_dropin;
46496+ atomic_long_unchecked_t tlb_preload_page;
46497+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46498+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46499+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46500+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46501+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46502+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46503+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46504+ atomic_long_unchecked_t tfh_stale_on_fault;
46505+ atomic_long_unchecked_t mmu_invalidate_range;
46506+ atomic_long_unchecked_t mmu_invalidate_page;
46507+ atomic_long_unchecked_t flush_tlb;
46508+ atomic_long_unchecked_t flush_tlb_gru;
46509+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46510+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46511
46512- atomic_long_t copy_gpa;
46513- atomic_long_t read_gpa;
46514+ atomic_long_unchecked_t copy_gpa;
46515+ atomic_long_unchecked_t read_gpa;
46516
46517- atomic_long_t mesq_receive;
46518- atomic_long_t mesq_receive_none;
46519- atomic_long_t mesq_send;
46520- atomic_long_t mesq_send_failed;
46521- atomic_long_t mesq_noop;
46522- atomic_long_t mesq_send_unexpected_error;
46523- atomic_long_t mesq_send_lb_overflow;
46524- atomic_long_t mesq_send_qlimit_reached;
46525- atomic_long_t mesq_send_amo_nacked;
46526- atomic_long_t mesq_send_put_nacked;
46527- atomic_long_t mesq_page_overflow;
46528- atomic_long_t mesq_qf_locked;
46529- atomic_long_t mesq_qf_noop_not_full;
46530- atomic_long_t mesq_qf_switch_head_failed;
46531- atomic_long_t mesq_qf_unexpected_error;
46532- atomic_long_t mesq_noop_unexpected_error;
46533- atomic_long_t mesq_noop_lb_overflow;
46534- atomic_long_t mesq_noop_qlimit_reached;
46535- atomic_long_t mesq_noop_amo_nacked;
46536- atomic_long_t mesq_noop_put_nacked;
46537- atomic_long_t mesq_noop_page_overflow;
46538+ atomic_long_unchecked_t mesq_receive;
46539+ atomic_long_unchecked_t mesq_receive_none;
46540+ atomic_long_unchecked_t mesq_send;
46541+ atomic_long_unchecked_t mesq_send_failed;
46542+ atomic_long_unchecked_t mesq_noop;
46543+ atomic_long_unchecked_t mesq_send_unexpected_error;
46544+ atomic_long_unchecked_t mesq_send_lb_overflow;
46545+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46546+ atomic_long_unchecked_t mesq_send_amo_nacked;
46547+ atomic_long_unchecked_t mesq_send_put_nacked;
46548+ atomic_long_unchecked_t mesq_page_overflow;
46549+ atomic_long_unchecked_t mesq_qf_locked;
46550+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46551+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46552+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46553+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46554+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46555+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46556+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46557+ atomic_long_unchecked_t mesq_noop_put_nacked;
46558+ atomic_long_unchecked_t mesq_noop_page_overflow;
46559
46560 };
46561
46562@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46563 tghop_invalidate, mcsop_last};
46564
46565 struct mcs_op_statistic {
46566- atomic_long_t count;
46567- atomic_long_t total;
46568+ atomic_long_unchecked_t count;
46569+ atomic_long_unchecked_t total;
46570 unsigned long max;
46571 };
46572
46573@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46574
46575 #define STAT(id) do { \
46576 if (gru_options & OPT_STATS) \
46577- atomic_long_inc(&gru_stats.id); \
46578+ atomic_long_inc_unchecked(&gru_stats.id); \
46579 } while (0)
46580
46581 #ifdef CONFIG_SGI_GRU_DEBUG
46582diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46583index c862cd4..0d176fe 100644
46584--- a/drivers/misc/sgi-xp/xp.h
46585+++ b/drivers/misc/sgi-xp/xp.h
46586@@ -288,7 +288,7 @@ struct xpc_interface {
46587 xpc_notify_func, void *);
46588 void (*received) (short, int, void *);
46589 enum xp_retval (*partid_to_nasids) (short, void *);
46590-};
46591+} __no_const;
46592
46593 extern struct xpc_interface xpc_interface;
46594
46595diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46596index 01be66d..e3a0c7e 100644
46597--- a/drivers/misc/sgi-xp/xp_main.c
46598+++ b/drivers/misc/sgi-xp/xp_main.c
46599@@ -78,13 +78,13 @@ xpc_notloaded(void)
46600 }
46601
46602 struct xpc_interface xpc_interface = {
46603- (void (*)(int))xpc_notloaded,
46604- (void (*)(int))xpc_notloaded,
46605- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46606- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46607+ .connect = (void (*)(int))xpc_notloaded,
46608+ .disconnect = (void (*)(int))xpc_notloaded,
46609+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46610+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46611 void *))xpc_notloaded,
46612- (void (*)(short, int, void *))xpc_notloaded,
46613- (enum xp_retval(*)(short, void *))xpc_notloaded
46614+ .received = (void (*)(short, int, void *))xpc_notloaded,
46615+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46616 };
46617 EXPORT_SYMBOL_GPL(xpc_interface);
46618
46619diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46620index b94d5f7..7f494c5 100644
46621--- a/drivers/misc/sgi-xp/xpc.h
46622+++ b/drivers/misc/sgi-xp/xpc.h
46623@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46624 void (*received_payload) (struct xpc_channel *, void *);
46625 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46626 };
46627+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46628
46629 /* struct xpc_partition act_state values (for XPC HB) */
46630
46631@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46632 /* found in xpc_main.c */
46633 extern struct device *xpc_part;
46634 extern struct device *xpc_chan;
46635-extern struct xpc_arch_operations xpc_arch_ops;
46636+extern xpc_arch_operations_no_const xpc_arch_ops;
46637 extern int xpc_disengage_timelimit;
46638 extern int xpc_disengage_timedout;
46639 extern int xpc_activate_IRQ_rcvd;
46640diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46641index 82dc574..8539ab2 100644
46642--- a/drivers/misc/sgi-xp/xpc_main.c
46643+++ b/drivers/misc/sgi-xp/xpc_main.c
46644@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46645 .notifier_call = xpc_system_die,
46646 };
46647
46648-struct xpc_arch_operations xpc_arch_ops;
46649+xpc_arch_operations_no_const xpc_arch_ops;
46650
46651 /*
46652 * Timer function to enforce the timelimit on the partition disengage.
46653@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46654
46655 if (((die_args->trapnr == X86_TRAP_MF) ||
46656 (die_args->trapnr == X86_TRAP_XF)) &&
46657- !user_mode_vm(die_args->regs))
46658+ !user_mode(die_args->regs))
46659 xpc_die_deactivate();
46660
46661 break;
46662diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46663index 4409d79..d7766d0 100644
46664--- a/drivers/mmc/card/block.c
46665+++ b/drivers/mmc/card/block.c
46666@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46667 if (idata->ic.postsleep_min_us)
46668 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46669
46670- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46671+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46672 err = -EFAULT;
46673 goto cmd_rel_host;
46674 }
46675diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46676index 0d0f7a2..45b8d60 100644
46677--- a/drivers/mmc/host/dw_mmc.h
46678+++ b/drivers/mmc/host/dw_mmc.h
46679@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
46680 int (*parse_dt)(struct dw_mci *host);
46681 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46682 struct dw_mci_tuning_data *tuning_data);
46683-};
46684+} __do_const;
46685 #endif /* _DW_MMC_H_ */
46686diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46687index 8232e9a..7776006 100644
46688--- a/drivers/mmc/host/mmci.c
46689+++ b/drivers/mmc/host/mmci.c
46690@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
46691 mmc->caps |= MMC_CAP_CMD23;
46692
46693 if (variant->busy_detect) {
46694- mmci_ops.card_busy = mmci_card_busy;
46695+ pax_open_kernel();
46696+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46697+ pax_close_kernel();
46698 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46699 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46700 mmc->max_busy_timeout = 0;
46701diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46702index 7c71dcd..74cb746 100644
46703--- a/drivers/mmc/host/omap_hsmmc.c
46704+++ b/drivers/mmc/host/omap_hsmmc.c
46705@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46706
46707 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46708 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46709- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46710+ pax_open_kernel();
46711+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46712+ pax_close_kernel();
46713 }
46714
46715 pm_runtime_enable(host->dev);
46716diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46717index af1f7c0..00d368a 100644
46718--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46719+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46720@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46721 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46722 }
46723
46724- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46725- sdhci_esdhc_ops.platform_execute_tuning =
46726+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46727+ pax_open_kernel();
46728+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46729 esdhc_executing_tuning;
46730+ pax_close_kernel();
46731+ }
46732
46733 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46734 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46735diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46736index c45b893..fba0144 100644
46737--- a/drivers/mmc/host/sdhci-s3c.c
46738+++ b/drivers/mmc/host/sdhci-s3c.c
46739@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46740 * we can use overriding functions instead of default.
46741 */
46742 if (sc->no_divider) {
46743- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46744- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46745- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46746+ pax_open_kernel();
46747+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46748+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46749+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46750+ pax_close_kernel();
46751 }
46752
46753 /* It supports additional host capabilities if needed */
46754diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46755index 423666b..81ff5eb 100644
46756--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46757+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46758@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46759 size_t totlen = 0, thislen;
46760 int ret = 0;
46761 size_t buflen = 0;
46762- static char *buffer;
46763+ char *buffer;
46764
46765 if (!ECCBUF_SIZE) {
46766 /* We should fall back to a general writev implementation.
46767diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46768index b3b7ca1..5dd4634 100644
46769--- a/drivers/mtd/nand/denali.c
46770+++ b/drivers/mtd/nand/denali.c
46771@@ -24,6 +24,7 @@
46772 #include <linux/slab.h>
46773 #include <linux/mtd/mtd.h>
46774 #include <linux/module.h>
46775+#include <linux/slab.h>
46776
46777 #include "denali.h"
46778
46779diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46780index 4f3851a..f477a23 100644
46781--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46782+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46783@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46784
46785 /* first try to map the upper buffer directly */
46786 if (virt_addr_valid(this->upper_buf) &&
46787- !object_is_on_stack(this->upper_buf)) {
46788+ !object_starts_on_stack(this->upper_buf)) {
46789 sg_init_one(sgl, this->upper_buf, this->upper_len);
46790 ret = dma_map_sg(this->dev, sgl, 1, dr);
46791 if (ret == 0)
46792diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
46793index 51b9d6a..52af9a7 100644
46794--- a/drivers/mtd/nftlmount.c
46795+++ b/drivers/mtd/nftlmount.c
46796@@ -24,6 +24,7 @@
46797 #include <asm/errno.h>
46798 #include <linux/delay.h>
46799 #include <linux/slab.h>
46800+#include <linux/sched.h>
46801 #include <linux/mtd/mtd.h>
46802 #include <linux/mtd/nand.h>
46803 #include <linux/mtd/nftl.h>
46804diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
46805index c23184a..4115c41 100644
46806--- a/drivers/mtd/sm_ftl.c
46807+++ b/drivers/mtd/sm_ftl.c
46808@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
46809 #define SM_CIS_VENDOR_OFFSET 0x59
46810 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
46811 {
46812- struct attribute_group *attr_group;
46813+ attribute_group_no_const *attr_group;
46814 struct attribute **attributes;
46815 struct sm_sysfs_attribute *vendor_attribute;
46816 char *vendor;
46817diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
46818index 7b11243..b3278a3 100644
46819--- a/drivers/net/bonding/bond_netlink.c
46820+++ b/drivers/net/bonding/bond_netlink.c
46821@@ -585,7 +585,7 @@ nla_put_failure:
46822 return -EMSGSIZE;
46823 }
46824
46825-struct rtnl_link_ops bond_link_ops __read_mostly = {
46826+struct rtnl_link_ops bond_link_ops = {
46827 .kind = "bond",
46828 .priv_size = sizeof(struct bonding),
46829 .setup = bond_setup,
46830diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
46831index b3b922a..80bba38 100644
46832--- a/drivers/net/caif/caif_hsi.c
46833+++ b/drivers/net/caif/caif_hsi.c
46834@@ -1444,7 +1444,7 @@ err:
46835 return -ENODEV;
46836 }
46837
46838-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
46839+static struct rtnl_link_ops caif_hsi_link_ops = {
46840 .kind = "cfhsi",
46841 .priv_size = sizeof(struct cfhsi),
46842 .setup = cfhsi_setup,
46843diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
46844index 98d73aa..63ef9da 100644
46845--- a/drivers/net/can/Kconfig
46846+++ b/drivers/net/can/Kconfig
46847@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
46848
46849 config CAN_FLEXCAN
46850 tristate "Support for Freescale FLEXCAN based chips"
46851- depends on ARM || PPC
46852+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
46853 ---help---
46854 Say Y here if you want to support for Freescale FlexCAN.
46855
46856diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
46857index 847c1f8..69a0df3 100644
46858--- a/drivers/net/can/dev.c
46859+++ b/drivers/net/can/dev.c
46860@@ -950,7 +950,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
46861 return -EOPNOTSUPP;
46862 }
46863
46864-static struct rtnl_link_ops can_link_ops __read_mostly = {
46865+static struct rtnl_link_ops can_link_ops = {
46866 .kind = "can",
46867 .maxtype = IFLA_CAN_MAX,
46868 .policy = can_policy,
46869diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
46870index 674f367..ec3a31f 100644
46871--- a/drivers/net/can/vcan.c
46872+++ b/drivers/net/can/vcan.c
46873@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
46874 dev->destructor = free_netdev;
46875 }
46876
46877-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
46878+static struct rtnl_link_ops vcan_link_ops = {
46879 .kind = "vcan",
46880 .setup = vcan_setup,
46881 };
46882diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
46883index 49adbf1..fff7ff8 100644
46884--- a/drivers/net/dummy.c
46885+++ b/drivers/net/dummy.c
46886@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
46887 return 0;
46888 }
46889
46890-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
46891+static struct rtnl_link_ops dummy_link_ops = {
46892 .kind = DRV_NAME,
46893 .setup = dummy_setup,
46894 .validate = dummy_validate,
46895diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
46896index 0443654..4f0aa18 100644
46897--- a/drivers/net/ethernet/8390/ax88796.c
46898+++ b/drivers/net/ethernet/8390/ax88796.c
46899@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
46900 if (ax->plat->reg_offsets)
46901 ei_local->reg_offset = ax->plat->reg_offsets;
46902 else {
46903+ resource_size_t _mem_size = mem_size;
46904+ do_div(_mem_size, 0x18);
46905 ei_local->reg_offset = ax->reg_offsets;
46906 for (ret = 0; ret < 0x18; ret++)
46907- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
46908+ ax->reg_offsets[ret] = _mem_size * ret;
46909 }
46910
46911 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
46912diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
46913index 760c72c..a99728c 100644
46914--- a/drivers/net/ethernet/altera/altera_tse_main.c
46915+++ b/drivers/net/ethernet/altera/altera_tse_main.c
46916@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
46917 return 0;
46918 }
46919
46920-static struct net_device_ops altera_tse_netdev_ops = {
46921+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
46922 .ndo_open = tse_open,
46923 .ndo_stop = tse_shutdown,
46924 .ndo_start_xmit = tse_start_xmit,
46925@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
46926 ndev->netdev_ops = &altera_tse_netdev_ops;
46927 altera_tse_set_ethtool_ops(ndev);
46928
46929+ pax_open_kernel();
46930 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
46931
46932 if (priv->hash_filter)
46933 altera_tse_netdev_ops.ndo_set_rx_mode =
46934 tse_set_rx_mode_hashfilter;
46935+ pax_close_kernel();
46936
46937 /* Scatter/gather IO is not supported,
46938 * so it is turned off
46939diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
46940index 29a0927..5a348e24 100644
46941--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
46942+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
46943@@ -1122,14 +1122,14 @@ do { \
46944 * operations, everything works on mask values.
46945 */
46946 #define XMDIO_READ(_pdata, _mmd, _reg) \
46947- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
46948+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
46949 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
46950
46951 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
46952 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
46953
46954 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
46955- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
46956+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
46957 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
46958
46959 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
46960diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
46961index 8a50b01..39c1ad0 100644
46962--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
46963+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
46964@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
46965
46966 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
46967
46968- pdata->hw_if.config_dcb_tc(pdata);
46969+ pdata->hw_if->config_dcb_tc(pdata);
46970
46971 return 0;
46972 }
46973@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
46974
46975 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
46976
46977- pdata->hw_if.config_dcb_pfc(pdata);
46978+ pdata->hw_if->config_dcb_pfc(pdata);
46979
46980 return 0;
46981 }
46982diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
46983index a50891f..b26fe24 100644
46984--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
46985+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
46986@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
46987
46988 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
46989 {
46990- struct xgbe_hw_if *hw_if = &pdata->hw_if;
46991+ struct xgbe_hw_if *hw_if = pdata->hw_if;
46992 struct xgbe_channel *channel;
46993 struct xgbe_ring *ring;
46994 struct xgbe_ring_data *rdata;
46995@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
46996
46997 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
46998 {
46999- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47000+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47001 struct xgbe_channel *channel;
47002 struct xgbe_ring *ring;
47003 struct xgbe_ring_desc *rdesc;
47004@@ -624,7 +624,7 @@ err_out:
47005 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47006 {
47007 struct xgbe_prv_data *pdata = channel->pdata;
47008- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47009+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47010 struct xgbe_ring *ring = channel->rx_ring;
47011 struct xgbe_ring_data *rdata;
47012 int i;
47013@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47014 DBGPR("<--xgbe_realloc_rx_buffer\n");
47015 }
47016
47017-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47018-{
47019- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47020-
47021- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47022- desc_if->free_ring_resources = xgbe_free_ring_resources;
47023- desc_if->map_tx_skb = xgbe_map_tx_skb;
47024- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
47025- desc_if->unmap_rdata = xgbe_unmap_rdata;
47026- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47027- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47028-
47029- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47030-}
47031+const struct xgbe_desc_if default_xgbe_desc_if = {
47032+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47033+ .free_ring_resources = xgbe_free_ring_resources,
47034+ .map_tx_skb = xgbe_map_tx_skb,
47035+ .realloc_rx_buffer = xgbe_realloc_rx_buffer,
47036+ .unmap_rdata = xgbe_unmap_rdata,
47037+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47038+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47039+};
47040diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47041index 4c66cd1..1a20aab 100644
47042--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47043+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47044@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47045
47046 static int xgbe_init(struct xgbe_prv_data *pdata)
47047 {
47048- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47049+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47050 int ret;
47051
47052 DBGPR("-->xgbe_init\n");
47053@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47054 return 0;
47055 }
47056
47057-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47058-{
47059- DBGPR("-->xgbe_init_function_ptrs\n");
47060-
47061- hw_if->tx_complete = xgbe_tx_complete;
47062-
47063- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47064- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47065- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47066- hw_if->set_mac_address = xgbe_set_mac_address;
47067-
47068- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47069- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47070-
47071- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47072- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47073- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47074- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47075- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47076-
47077- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47078- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47079-
47080- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47081- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47082- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47083-
47084- hw_if->enable_tx = xgbe_enable_tx;
47085- hw_if->disable_tx = xgbe_disable_tx;
47086- hw_if->enable_rx = xgbe_enable_rx;
47087- hw_if->disable_rx = xgbe_disable_rx;
47088-
47089- hw_if->powerup_tx = xgbe_powerup_tx;
47090- hw_if->powerdown_tx = xgbe_powerdown_tx;
47091- hw_if->powerup_rx = xgbe_powerup_rx;
47092- hw_if->powerdown_rx = xgbe_powerdown_rx;
47093-
47094- hw_if->dev_xmit = xgbe_dev_xmit;
47095- hw_if->dev_read = xgbe_dev_read;
47096- hw_if->enable_int = xgbe_enable_int;
47097- hw_if->disable_int = xgbe_disable_int;
47098- hw_if->init = xgbe_init;
47099- hw_if->exit = xgbe_exit;
47100+const struct xgbe_hw_if default_xgbe_hw_if = {
47101+ .tx_complete = xgbe_tx_complete,
47102+
47103+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47104+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47105+ .add_mac_addresses = xgbe_add_mac_addresses,
47106+ .set_mac_address = xgbe_set_mac_address,
47107+
47108+ .enable_rx_csum = xgbe_enable_rx_csum,
47109+ .disable_rx_csum = xgbe_disable_rx_csum,
47110+
47111+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47112+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47113+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47114+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47115+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47116+
47117+ .read_mmd_regs = xgbe_read_mmd_regs,
47118+ .write_mmd_regs = xgbe_write_mmd_regs,
47119+
47120+ .set_gmii_speed = xgbe_set_gmii_speed,
47121+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47122+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47123+
47124+ .enable_tx = xgbe_enable_tx,
47125+ .disable_tx = xgbe_disable_tx,
47126+ .enable_rx = xgbe_enable_rx,
47127+ .disable_rx = xgbe_disable_rx,
47128+
47129+ .powerup_tx = xgbe_powerup_tx,
47130+ .powerdown_tx = xgbe_powerdown_tx,
47131+ .powerup_rx = xgbe_powerup_rx,
47132+ .powerdown_rx = xgbe_powerdown_rx,
47133+
47134+ .dev_xmit = xgbe_dev_xmit,
47135+ .dev_read = xgbe_dev_read,
47136+ .enable_int = xgbe_enable_int,
47137+ .disable_int = xgbe_disable_int,
47138+ .init = xgbe_init,
47139+ .exit = xgbe_exit,
47140
47141 /* Descriptor related Sequences have to be initialized here */
47142- hw_if->tx_desc_init = xgbe_tx_desc_init;
47143- hw_if->rx_desc_init = xgbe_rx_desc_init;
47144- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47145- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47146- hw_if->is_last_desc = xgbe_is_last_desc;
47147- hw_if->is_context_desc = xgbe_is_context_desc;
47148- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47149+ .tx_desc_init = xgbe_tx_desc_init,
47150+ .rx_desc_init = xgbe_rx_desc_init,
47151+ .tx_desc_reset = xgbe_tx_desc_reset,
47152+ .rx_desc_reset = xgbe_rx_desc_reset,
47153+ .is_last_desc = xgbe_is_last_desc,
47154+ .is_context_desc = xgbe_is_context_desc,
47155+ .tx_start_xmit = xgbe_tx_start_xmit,
47156
47157 /* For FLOW ctrl */
47158- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47159- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47160+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47161+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47162
47163 /* For RX coalescing */
47164- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47165- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47166- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47167- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47168+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47169+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47170+ .usec_to_riwt = xgbe_usec_to_riwt,
47171+ .riwt_to_usec = xgbe_riwt_to_usec,
47172
47173 /* For RX and TX threshold config */
47174- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47175- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47176+ .config_rx_threshold = xgbe_config_rx_threshold,
47177+ .config_tx_threshold = xgbe_config_tx_threshold,
47178
47179 /* For RX and TX Store and Forward Mode config */
47180- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47181- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47182+ .config_rsf_mode = xgbe_config_rsf_mode,
47183+ .config_tsf_mode = xgbe_config_tsf_mode,
47184
47185 /* For TX DMA Operating on Second Frame config */
47186- hw_if->config_osp_mode = xgbe_config_osp_mode;
47187+ .config_osp_mode = xgbe_config_osp_mode,
47188
47189 /* For RX and TX PBL config */
47190- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47191- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47192- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47193- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47194- hw_if->config_pblx8 = xgbe_config_pblx8;
47195+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47196+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47197+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47198+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47199+ .config_pblx8 = xgbe_config_pblx8,
47200
47201 /* For MMC statistics support */
47202- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47203- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47204- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47205+ .tx_mmc_int = xgbe_tx_mmc_int,
47206+ .rx_mmc_int = xgbe_rx_mmc_int,
47207+ .read_mmc_stats = xgbe_read_mmc_stats,
47208
47209 /* For PTP config */
47210- hw_if->config_tstamp = xgbe_config_tstamp;
47211- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47212- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47213- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47214- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47215+ .config_tstamp = xgbe_config_tstamp,
47216+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47217+ .set_tstamp_time = xgbe_set_tstamp_time,
47218+ .get_tstamp_time = xgbe_get_tstamp_time,
47219+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47220
47221 /* For Data Center Bridging config */
47222- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47223- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47224+ .config_dcb_tc = xgbe_config_dcb_tc,
47225+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47226
47227 /* For Receive Side Scaling */
47228- hw_if->enable_rss = xgbe_enable_rss;
47229- hw_if->disable_rss = xgbe_disable_rss;
47230- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47231- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47232-
47233- DBGPR("<--xgbe_init_function_ptrs\n");
47234-}
47235+ .enable_rss = xgbe_enable_rss,
47236+ .disable_rss = xgbe_disable_rss,
47237+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47238+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47239+};
47240diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47241index e5ffb2c..e56d30b 100644
47242--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47243+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47244@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47245 * support, tell it now
47246 */
47247 if (ring->tx.xmit_more)
47248- pdata->hw_if.tx_start_xmit(channel, ring);
47249+ pdata->hw_if->tx_start_xmit(channel, ring);
47250
47251 return NETDEV_TX_BUSY;
47252 }
47253@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47254
47255 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47256 {
47257- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47258+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47259 struct xgbe_channel *channel;
47260 enum xgbe_int int_id;
47261 unsigned int i;
47262@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47263
47264 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47265 {
47266- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47267+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47268 struct xgbe_channel *channel;
47269 enum xgbe_int int_id;
47270 unsigned int i;
47271@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47272 static irqreturn_t xgbe_isr(int irq, void *data)
47273 {
47274 struct xgbe_prv_data *pdata = data;
47275- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47276+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47277 struct xgbe_channel *channel;
47278 unsigned int dma_isr, dma_ch_isr;
47279 unsigned int mac_isr, mac_tssr;
47280@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47281
47282 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47283 {
47284- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47285+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47286
47287 DBGPR("-->xgbe_init_tx_coalesce\n");
47288
47289@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47290
47291 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47292 {
47293- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47294+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47295
47296 DBGPR("-->xgbe_init_rx_coalesce\n");
47297
47298@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47299
47300 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47301 {
47302- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47303+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47304 struct xgbe_channel *channel;
47305 struct xgbe_ring *ring;
47306 struct xgbe_ring_data *rdata;
47307@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47308
47309 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47310 {
47311- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47312+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47313 struct xgbe_channel *channel;
47314 struct xgbe_ring *ring;
47315 struct xgbe_ring_data *rdata;
47316@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47317 static void xgbe_adjust_link(struct net_device *netdev)
47318 {
47319 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47320- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47321+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47322 struct phy_device *phydev = pdata->phydev;
47323 int new_state = 0;
47324
47325@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47326 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47327 {
47328 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47329- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47330+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47331 unsigned long flags;
47332
47333 DBGPR("-->xgbe_powerdown\n");
47334@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47335 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47336 {
47337 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47338- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47339+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47340 unsigned long flags;
47341
47342 DBGPR("-->xgbe_powerup\n");
47343@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47344
47345 static int xgbe_start(struct xgbe_prv_data *pdata)
47346 {
47347- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47348+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47349 struct net_device *netdev = pdata->netdev;
47350
47351 DBGPR("-->xgbe_start\n");
47352@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47353
47354 static void xgbe_stop(struct xgbe_prv_data *pdata)
47355 {
47356- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47357+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47358 struct xgbe_channel *channel;
47359 struct net_device *netdev = pdata->netdev;
47360 struct netdev_queue *txq;
47361@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47362 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47363 {
47364 struct xgbe_channel *channel;
47365- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47366+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47367 unsigned int i;
47368
47369 DBGPR("-->xgbe_restart_dev\n");
47370@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47371 return -ERANGE;
47372 }
47373
47374- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47375+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47376
47377 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47378
47379@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47380 static int xgbe_open(struct net_device *netdev)
47381 {
47382 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47383- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47384- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47385+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47386+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47387 struct xgbe_channel *channel = NULL;
47388 unsigned int i = 0;
47389 int ret;
47390@@ -1400,8 +1400,8 @@ err_phy_init:
47391 static int xgbe_close(struct net_device *netdev)
47392 {
47393 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47394- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47395- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47396+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47397+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47398 struct xgbe_channel *channel;
47399 unsigned int i;
47400
47401@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
47402 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47403 {
47404 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47405- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47406- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47407+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47408+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47409 struct xgbe_channel *channel;
47410 struct xgbe_ring *ring;
47411 struct xgbe_packet_data *packet;
47412@@ -1518,7 +1518,7 @@ tx_netdev_return:
47413 static void xgbe_set_rx_mode(struct net_device *netdev)
47414 {
47415 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47416- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47417+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47418 unsigned int pr_mode, am_mode;
47419
47420 DBGPR("-->xgbe_set_rx_mode\n");
47421@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47422 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47423 {
47424 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47425- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47426+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47427 struct sockaddr *saddr = addr;
47428
47429 DBGPR("-->xgbe_set_mac_address\n");
47430@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47431
47432 DBGPR("-->%s\n", __func__);
47433
47434- pdata->hw_if.read_mmc_stats(pdata);
47435+ pdata->hw_if->read_mmc_stats(pdata);
47436
47437 s->rx_packets = pstats->rxframecount_gb;
47438 s->rx_bytes = pstats->rxoctetcount_gb;
47439@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47440 u16 vid)
47441 {
47442 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47443- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47444+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47445
47446 DBGPR("-->%s\n", __func__);
47447
47448@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47449 u16 vid)
47450 {
47451 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47452- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47453+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47454
47455 DBGPR("-->%s\n", __func__);
47456
47457@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
47458 netdev_features_t features)
47459 {
47460 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47461- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47462+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47463 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47464 int ret = 0;
47465
47466@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47467 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47468 {
47469 struct xgbe_prv_data *pdata = channel->pdata;
47470- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47471+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47472 struct xgbe_ring *ring = channel->rx_ring;
47473 struct xgbe_ring_data *rdata;
47474
47475@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47476 static int xgbe_tx_poll(struct xgbe_channel *channel)
47477 {
47478 struct xgbe_prv_data *pdata = channel->pdata;
47479- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47480- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47481+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47482+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47483 struct xgbe_ring *ring = channel->tx_ring;
47484 struct xgbe_ring_data *rdata;
47485 struct xgbe_ring_desc *rdesc;
47486@@ -1891,7 +1891,7 @@ unlock:
47487 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47488 {
47489 struct xgbe_prv_data *pdata = channel->pdata;
47490- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47491+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47492 struct xgbe_ring *ring = channel->rx_ring;
47493 struct xgbe_ring_data *rdata;
47494 struct xgbe_packet_data *packet;
47495diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47496index ebf4893..28108c7 100644
47497--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47498+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47499@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47500
47501 DBGPR("-->%s\n", __func__);
47502
47503- pdata->hw_if.read_mmc_stats(pdata);
47504+ pdata->hw_if->read_mmc_stats(pdata);
47505 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47506 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47507 *data++ = *(u64 *)stat;
47508@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47509 struct ethtool_coalesce *ec)
47510 {
47511 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47512- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47513+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47514 unsigned int riwt;
47515
47516 DBGPR("-->xgbe_get_coalesce\n");
47517@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47518 struct ethtool_coalesce *ec)
47519 {
47520 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47521- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47522+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47523 unsigned int rx_frames, rx_riwt, rx_usecs;
47524 unsigned int tx_frames, tx_usecs;
47525
47526diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47527index dbd3850..4e31b38 100644
47528--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47529+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47530@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47531 DBGPR("<--xgbe_default_config\n");
47532 }
47533
47534-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47535-{
47536- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47537- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47538-}
47539-
47540 static int xgbe_probe(struct platform_device *pdev)
47541 {
47542 struct xgbe_prv_data *pdata;
47543@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
47544 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
47545
47546 /* Set all the function pointers */
47547- xgbe_init_all_fptrs(pdata);
47548- hw_if = &pdata->hw_if;
47549- desc_if = &pdata->desc_if;
47550+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47551+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47552
47553 /* Issue software reset to device */
47554 hw_if->exit(pdata);
47555diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47556index 363b210..b241389 100644
47557--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47558+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47559@@ -126,7 +126,7 @@
47560 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47561 {
47562 struct xgbe_prv_data *pdata = mii->priv;
47563- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47564+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47565 int mmd_data;
47566
47567 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47568@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47569 u16 mmd_val)
47570 {
47571 struct xgbe_prv_data *pdata = mii->priv;
47572- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47573+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47574 int mmd_data = mmd_val;
47575
47576 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47577diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47578index a1bf9d1c..84adcab 100644
47579--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47580+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47581@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47582 tstamp_cc);
47583 u64 nsec;
47584
47585- nsec = pdata->hw_if.get_tstamp_time(pdata);
47586+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47587
47588 return nsec;
47589 }
47590@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47591
47592 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47593
47594- pdata->hw_if.update_tstamp_addend(pdata, addend);
47595+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47596
47597 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47598
47599diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47600index f9ec762..988c969 100644
47601--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47602+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47603@@ -668,8 +668,8 @@ struct xgbe_prv_data {
47604 int dev_irq;
47605 unsigned int per_channel_irq;
47606
47607- struct xgbe_hw_if hw_if;
47608- struct xgbe_desc_if desc_if;
47609+ const struct xgbe_hw_if *hw_if;
47610+ const struct xgbe_desc_if *desc_if;
47611
47612 /* AXI DMA settings */
47613 unsigned int axdomain;
47614@@ -787,6 +787,9 @@ struct xgbe_prv_data {
47615 #endif
47616 };
47617
47618+extern const struct xgbe_hw_if default_xgbe_hw_if;
47619+extern const struct xgbe_desc_if default_xgbe_desc_if;
47620+
47621 /* Function prototypes*/
47622
47623 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47624diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47625index adcacda..fa6e0ae 100644
47626--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47627+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47628@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47629 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47630 {
47631 /* RX_MODE controlling object */
47632- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47633+ bnx2x_init_rx_mode_obj(bp);
47634
47635 /* multicast configuration controlling object */
47636 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47637diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47638index 07cdf9b..b08ecc7 100644
47639--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47640+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47641@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47642 return rc;
47643 }
47644
47645-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47646- struct bnx2x_rx_mode_obj *o)
47647+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47648 {
47649 if (CHIP_IS_E1x(bp)) {
47650- o->wait_comp = bnx2x_empty_rx_mode_wait;
47651- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47652+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47653+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47654 } else {
47655- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47656- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47657+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47658+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47659 }
47660 }
47661
47662diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47663index 86baecb..ff3bb46 100644
47664--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47665+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47666@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47667
47668 /********************* RX MODE ****************/
47669
47670-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47671- struct bnx2x_rx_mode_obj *o);
47672+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47673
47674 /**
47675 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47676diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47677index 31c9f82..e65e986 100644
47678--- a/drivers/net/ethernet/broadcom/tg3.h
47679+++ b/drivers/net/ethernet/broadcom/tg3.h
47680@@ -150,6 +150,7 @@
47681 #define CHIPREV_ID_5750_A0 0x4000
47682 #define CHIPREV_ID_5750_A1 0x4001
47683 #define CHIPREV_ID_5750_A3 0x4003
47684+#define CHIPREV_ID_5750_C1 0x4201
47685 #define CHIPREV_ID_5750_C2 0x4202
47686 #define CHIPREV_ID_5752_A0_HW 0x5000
47687 #define CHIPREV_ID_5752_A0 0x6000
47688diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47689index 903466e..b285864 100644
47690--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47691+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47692@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47693 }
47694
47695 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47696- bna_cb_ioceth_enable,
47697- bna_cb_ioceth_disable,
47698- bna_cb_ioceth_hbfail,
47699- bna_cb_ioceth_reset
47700+ .enable_cbfn = bna_cb_ioceth_enable,
47701+ .disable_cbfn = bna_cb_ioceth_disable,
47702+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47703+ .reset_cbfn = bna_cb_ioceth_reset
47704 };
47705
47706 static void bna_attr_init(struct bna_ioceth *ioceth)
47707diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47708index 8cffcdf..aadf043 100644
47709--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47710+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47711@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47712 */
47713 struct l2t_skb_cb {
47714 arp_failure_handler_func arp_failure_handler;
47715-};
47716+} __no_const;
47717
47718 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47719
47720diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47721index ccf3436..b720d77 100644
47722--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47723+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47724@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47725
47726 int i;
47727 struct adapter *ap = netdev2adap(dev);
47728- static const unsigned int *reg_ranges;
47729+ const unsigned int *reg_ranges;
47730 int arr_size = 0, buf_size = 0;
47731
47732 if (is_t4(ap->params.chip)) {
47733diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
47734index badff18..e15c4ec 100644
47735--- a/drivers/net/ethernet/dec/tulip/de4x5.c
47736+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
47737@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47738 for (i=0; i<ETH_ALEN; i++) {
47739 tmp.addr[i] = dev->dev_addr[i];
47740 }
47741- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47742+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47743 break;
47744
47745 case DE4X5_SET_HWADDR: /* Set the hardware address */
47746@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47747 spin_lock_irqsave(&lp->lock, flags);
47748 memcpy(&statbuf, &lp->pktStats, ioc->len);
47749 spin_unlock_irqrestore(&lp->lock, flags);
47750- if (copy_to_user(ioc->data, &statbuf, ioc->len))
47751+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
47752 return -EFAULT;
47753 break;
47754 }
47755diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
47756index d48806b..41cd80f 100644
47757--- a/drivers/net/ethernet/emulex/benet/be_main.c
47758+++ b/drivers/net/ethernet/emulex/benet/be_main.c
47759@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
47760
47761 if (wrapped)
47762 newacc += 65536;
47763- ACCESS_ONCE(*acc) = newacc;
47764+ ACCESS_ONCE_RW(*acc) = newacc;
47765 }
47766
47767 static void populate_erx_stats(struct be_adapter *adapter,
47768diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
47769index 6d0c5d5..55be363 100644
47770--- a/drivers/net/ethernet/faraday/ftgmac100.c
47771+++ b/drivers/net/ethernet/faraday/ftgmac100.c
47772@@ -30,6 +30,8 @@
47773 #include <linux/netdevice.h>
47774 #include <linux/phy.h>
47775 #include <linux/platform_device.h>
47776+#include <linux/interrupt.h>
47777+#include <linux/irqreturn.h>
47778 #include <net/ip.h>
47779
47780 #include "ftgmac100.h"
47781diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
47782index dce5f7b..2433466 100644
47783--- a/drivers/net/ethernet/faraday/ftmac100.c
47784+++ b/drivers/net/ethernet/faraday/ftmac100.c
47785@@ -31,6 +31,8 @@
47786 #include <linux/module.h>
47787 #include <linux/netdevice.h>
47788 #include <linux/platform_device.h>
47789+#include <linux/interrupt.h>
47790+#include <linux/irqreturn.h>
47791
47792 #include "ftmac100.h"
47793
47794diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47795index 6d1ec92..4d5d97d 100644
47796--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47797+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47798@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
47799 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
47800
47801 /* Update the base adjustement value. */
47802- ACCESS_ONCE(pf->ptp_base_adj) = incval;
47803+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
47804 smp_mb(); /* Force the above update. */
47805 }
47806
47807diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47808index 5fd4b52..87aa34b 100644
47809--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47810+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47811@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
47812 }
47813
47814 /* update the base incval used to calculate frequency adjustment */
47815- ACCESS_ONCE(adapter->base_incval) = incval;
47816+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
47817 smp_mb();
47818
47819 /* need lock to prevent incorrect read while modifying cyclecounter */
47820diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47821index e3357bf..d4d5348 100644
47822--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47823+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47824@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
47825 wmb();
47826
47827 /* we want to dirty this cache line once */
47828- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
47829- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
47830+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
47831+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
47832
47833 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
47834
47835diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47836index 2bbd01f..e8baa64 100644
47837--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
47838+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47839@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47840 struct __vxge_hw_fifo *fifo;
47841 struct vxge_hw_fifo_config *config;
47842 u32 txdl_size, txdl_per_memblock;
47843- struct vxge_hw_mempool_cbs fifo_mp_callback;
47844+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
47845+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
47846+ };
47847+
47848 struct __vxge_hw_virtualpath *vpath;
47849
47850 if ((vp == NULL) || (attr == NULL)) {
47851@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47852 goto exit;
47853 }
47854
47855- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
47856-
47857 fifo->mempool =
47858 __vxge_hw_mempool_create(vpath->hldev,
47859 fifo->config->memblock_size,
47860diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47861index 2bb48d5..d1a865d 100644
47862--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47863+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47864@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
47865 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
47866 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
47867 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
47868- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47869+ pax_open_kernel();
47870+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47871+ pax_close_kernel();
47872 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47873 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
47874 max_tx_rings = QLCNIC_MAX_TX_RINGS;
47875diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47876index be7d7a6..a8983f8 100644
47877--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47878+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47879@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
47880 case QLCNIC_NON_PRIV_FUNC:
47881 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
47882 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47883- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47884+ pax_open_kernel();
47885+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47886+ pax_close_kernel();
47887 break;
47888 case QLCNIC_PRIV_FUNC:
47889 ahw->op_mode = QLCNIC_PRIV_FUNC;
47890 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
47891- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
47892+ pax_open_kernel();
47893+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
47894+ pax_close_kernel();
47895 break;
47896 case QLCNIC_MGMT_FUNC:
47897 ahw->op_mode = QLCNIC_MGMT_FUNC;
47898 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47899- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
47900+ pax_open_kernel();
47901+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
47902+ pax_close_kernel();
47903 break;
47904 default:
47905 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
47906diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
47907index c9f57fb..208bdc1 100644
47908--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
47909+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
47910@@ -1285,7 +1285,7 @@ flash_temp:
47911 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
47912 {
47913 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
47914- static const struct qlcnic_dump_operations *fw_dump_ops;
47915+ const struct qlcnic_dump_operations *fw_dump_ops;
47916 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
47917 u32 entry_offset, dump, no_entries, buf_offset = 0;
47918 int i, k, ops_cnt, ops_index, dump_size = 0;
47919diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
47920index 14a1c5c..38a141d 100644
47921--- a/drivers/net/ethernet/realtek/r8169.c
47922+++ b/drivers/net/ethernet/realtek/r8169.c
47923@@ -788,22 +788,22 @@ struct rtl8169_private {
47924 struct mdio_ops {
47925 void (*write)(struct rtl8169_private *, int, int);
47926 int (*read)(struct rtl8169_private *, int);
47927- } mdio_ops;
47928+ } __no_const mdio_ops;
47929
47930 struct pll_power_ops {
47931 void (*down)(struct rtl8169_private *);
47932 void (*up)(struct rtl8169_private *);
47933- } pll_power_ops;
47934+ } __no_const pll_power_ops;
47935
47936 struct jumbo_ops {
47937 void (*enable)(struct rtl8169_private *);
47938 void (*disable)(struct rtl8169_private *);
47939- } jumbo_ops;
47940+ } __no_const jumbo_ops;
47941
47942 struct csi_ops {
47943 void (*write)(struct rtl8169_private *, int, int);
47944 u32 (*read)(struct rtl8169_private *, int);
47945- } csi_ops;
47946+ } __no_const csi_ops;
47947
47948 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
47949 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
47950diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
47951index 6b861e3..204ac86 100644
47952--- a/drivers/net/ethernet/sfc/ptp.c
47953+++ b/drivers/net/ethernet/sfc/ptp.c
47954@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
47955 ptp->start.dma_addr);
47956
47957 /* Clear flag that signals MC ready */
47958- ACCESS_ONCE(*start) = 0;
47959+ ACCESS_ONCE_RW(*start) = 0;
47960 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
47961 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
47962 EFX_BUG_ON_PARANOID(rc);
47963diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
47964index 08c483b..2c4a553 100644
47965--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
47966+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
47967@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
47968
47969 writel(value, ioaddr + MMC_CNTRL);
47970
47971- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
47972- MMC_CNTRL, value);
47973+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
47974+// MMC_CNTRL, value);
47975 }
47976
47977 /* To mask all all interrupts.*/
47978diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
47979index 384ca4f..dd7d4f9 100644
47980--- a/drivers/net/hyperv/hyperv_net.h
47981+++ b/drivers/net/hyperv/hyperv_net.h
47982@@ -171,7 +171,7 @@ struct rndis_device {
47983 enum rndis_device_state state;
47984 bool link_state;
47985 bool link_change;
47986- atomic_t new_req_id;
47987+ atomic_unchecked_t new_req_id;
47988
47989 spinlock_t request_lock;
47990 struct list_head req_list;
47991diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
47992index ec0c40a..c9e42eb 100644
47993--- a/drivers/net/hyperv/rndis_filter.c
47994+++ b/drivers/net/hyperv/rndis_filter.c
47995@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
47996 * template
47997 */
47998 set = &rndis_msg->msg.set_req;
47999- set->req_id = atomic_inc_return(&dev->new_req_id);
48000+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48001
48002 /* Add to the request list */
48003 spin_lock_irqsave(&dev->request_lock, flags);
48004@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48005
48006 /* Setup the rndis set */
48007 halt = &request->request_msg.msg.halt_req;
48008- halt->req_id = atomic_inc_return(&dev->new_req_id);
48009+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48010
48011 /* Ignore return since this msg is optional. */
48012 rndis_filter_send_request(dev, request);
48013diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48014index 34f846b..4a0d5b1 100644
48015--- a/drivers/net/ifb.c
48016+++ b/drivers/net/ifb.c
48017@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48018 return 0;
48019 }
48020
48021-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48022+static struct rtnl_link_ops ifb_link_ops = {
48023 .kind = "ifb",
48024 .priv_size = sizeof(struct ifb_private),
48025 .setup = ifb_setup,
48026diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48027index 612e073..a9f5eda 100644
48028--- a/drivers/net/macvlan.c
48029+++ b/drivers/net/macvlan.c
48030@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48031 free_nskb:
48032 kfree_skb(nskb);
48033 err:
48034- atomic_long_inc(&skb->dev->rx_dropped);
48035+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48036 }
48037
48038 static void macvlan_flush_sources(struct macvlan_port *port,
48039@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48040 int macvlan_link_register(struct rtnl_link_ops *ops)
48041 {
48042 /* common fields */
48043- ops->priv_size = sizeof(struct macvlan_dev);
48044- ops->validate = macvlan_validate;
48045- ops->maxtype = IFLA_MACVLAN_MAX;
48046- ops->policy = macvlan_policy;
48047- ops->changelink = macvlan_changelink;
48048- ops->get_size = macvlan_get_size;
48049- ops->fill_info = macvlan_fill_info;
48050+ pax_open_kernel();
48051+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48052+ *(void **)&ops->validate = macvlan_validate;
48053+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48054+ *(const void **)&ops->policy = macvlan_policy;
48055+ *(void **)&ops->changelink = macvlan_changelink;
48056+ *(void **)&ops->get_size = macvlan_get_size;
48057+ *(void **)&ops->fill_info = macvlan_fill_info;
48058+ pax_close_kernel();
48059
48060 return rtnl_link_register(ops);
48061 };
48062@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48063 return NOTIFY_DONE;
48064 }
48065
48066-static struct notifier_block macvlan_notifier_block __read_mostly = {
48067+static struct notifier_block macvlan_notifier_block = {
48068 .notifier_call = macvlan_device_event,
48069 };
48070
48071diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48072index 919f4fc..012f6dd 100644
48073--- a/drivers/net/macvtap.c
48074+++ b/drivers/net/macvtap.c
48075@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48076 dev->tx_queue_len = TUN_READQ_SIZE;
48077 }
48078
48079-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48080+static struct rtnl_link_ops macvtap_link_ops = {
48081 .kind = "macvtap",
48082 .setup = macvtap_setup,
48083 .newlink = macvtap_newlink,
48084@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
48085 } /* else everything is zero */
48086 }
48087
48088+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
48089+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
48090+
48091 /* Get packet from user space buffer */
48092 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
48093 struct iov_iter *from, int noblock)
48094 {
48095- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
48096+ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
48097 struct sk_buff *skb;
48098 struct macvlan_dev *vlan;
48099 unsigned long total_len = iov_iter_count(from);
48100@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
48101 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
48102 }
48103
48104- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
48105+ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
48106 linear, noblock, &err);
48107 if (!skb)
48108 goto err;
48109@@ -1030,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48110
48111 ret = 0;
48112 u = q->flags;
48113- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48114+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48115 put_user(u, &ifr->ifr_flags))
48116 ret = -EFAULT;
48117 macvtap_put_vlan(vlan);
48118@@ -1214,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48119 return NOTIFY_DONE;
48120 }
48121
48122-static struct notifier_block macvtap_notifier_block __read_mostly = {
48123+static struct notifier_block macvtap_notifier_block = {
48124 .notifier_call = macvtap_device_event,
48125 };
48126
48127diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48128index 34924df..a747360 100644
48129--- a/drivers/net/nlmon.c
48130+++ b/drivers/net/nlmon.c
48131@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48132 return 0;
48133 }
48134
48135-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48136+static struct rtnl_link_ops nlmon_link_ops = {
48137 .kind = "nlmon",
48138 .priv_size = sizeof(struct nlmon),
48139 .setup = nlmon_setup,
48140diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48141index 3fc91e8..6c36337 100644
48142--- a/drivers/net/phy/phy_device.c
48143+++ b/drivers/net/phy/phy_device.c
48144@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48145 * zero on success.
48146 *
48147 */
48148-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48149+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48150 struct phy_c45_device_ids *c45_ids) {
48151 int phy_reg;
48152 int i, reg_addr;
48153@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48154 * its return value is in turn returned.
48155 *
48156 */
48157-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48158+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48159 bool is_c45, struct phy_c45_device_ids *c45_ids)
48160 {
48161 int phy_reg;
48162@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48163 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48164 {
48165 struct phy_c45_device_ids c45_ids = {0};
48166- u32 phy_id = 0;
48167+ int phy_id = 0;
48168 int r;
48169
48170 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48171diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48172index af034db..1611c0b2 100644
48173--- a/drivers/net/ppp/ppp_generic.c
48174+++ b/drivers/net/ppp/ppp_generic.c
48175@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48176 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48177 struct ppp_stats stats;
48178 struct ppp_comp_stats cstats;
48179- char *vers;
48180
48181 switch (cmd) {
48182 case SIOCGPPPSTATS:
48183@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48184 break;
48185
48186 case SIOCGPPPVER:
48187- vers = PPP_VERSION;
48188- if (copy_to_user(addr, vers, strlen(vers) + 1))
48189+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48190 break;
48191 err = 0;
48192 break;
48193diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48194index 079f7ad..b2a2bfa7 100644
48195--- a/drivers/net/slip/slhc.c
48196+++ b/drivers/net/slip/slhc.c
48197@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48198 register struct tcphdr *thp;
48199 register struct iphdr *ip;
48200 register struct cstate *cs;
48201- int len, hdrlen;
48202+ long len, hdrlen;
48203 unsigned char *cp = icp;
48204
48205 /* We've got a compressed packet; read the change byte */
48206diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48207index f7ff493..153e0198 100644
48208--- a/drivers/net/team/team.c
48209+++ b/drivers/net/team/team.c
48210@@ -2105,7 +2105,7 @@ static unsigned int team_get_num_rx_queues(void)
48211 return TEAM_DEFAULT_NUM_RX_QUEUES;
48212 }
48213
48214-static struct rtnl_link_ops team_link_ops __read_mostly = {
48215+static struct rtnl_link_ops team_link_ops = {
48216 .kind = DRV_NAME,
48217 .priv_size = sizeof(struct team),
48218 .setup = team_setup,
48219@@ -2895,7 +2895,7 @@ static int team_device_event(struct notifier_block *unused,
48220 return NOTIFY_DONE;
48221 }
48222
48223-static struct notifier_block team_notifier_block __read_mostly = {
48224+static struct notifier_block team_notifier_block = {
48225 .notifier_call = team_device_event,
48226 };
48227
48228diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48229index 10f9e40..3515e7e 100644
48230--- a/drivers/net/tun.c
48231+++ b/drivers/net/tun.c
48232@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48233 return -EINVAL;
48234 }
48235
48236-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48237+static struct rtnl_link_ops tun_link_ops = {
48238 .kind = DRV_NAME,
48239 .priv_size = sizeof(struct tun_struct),
48240 .setup = tun_setup,
48241@@ -1827,7 +1827,7 @@ unlock:
48242 }
48243
48244 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48245- unsigned long arg, int ifreq_len)
48246+ unsigned long arg, size_t ifreq_len)
48247 {
48248 struct tun_file *tfile = file->private_data;
48249 struct tun_struct *tun;
48250@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48251 int le;
48252 int ret;
48253
48254+ if (ifreq_len > sizeof ifr)
48255+ return -EFAULT;
48256+
48257 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48258 if (copy_from_user(&ifr, argp, ifreq_len))
48259 return -EFAULT;
48260diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48261index 9c5aa92..8cd0405 100644
48262--- a/drivers/net/usb/hso.c
48263+++ b/drivers/net/usb/hso.c
48264@@ -71,7 +71,7 @@
48265 #include <asm/byteorder.h>
48266 #include <linux/serial_core.h>
48267 #include <linux/serial.h>
48268-
48269+#include <asm/local.h>
48270
48271 #define MOD_AUTHOR "Option Wireless"
48272 #define MOD_DESCRIPTION "USB High Speed Option driver"
48273@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48274 struct urb *urb;
48275
48276 urb = serial->rx_urb[0];
48277- if (serial->port.count > 0) {
48278+ if (atomic_read(&serial->port.count) > 0) {
48279 count = put_rxbuf_data(urb, serial);
48280 if (count == -1)
48281 return;
48282@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48283 DUMP1(urb->transfer_buffer, urb->actual_length);
48284
48285 /* Anyone listening? */
48286- if (serial->port.count == 0)
48287+ if (atomic_read(&serial->port.count) == 0)
48288 return;
48289
48290 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48291@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48292 tty_port_tty_set(&serial->port, tty);
48293
48294 /* check for port already opened, if not set the termios */
48295- serial->port.count++;
48296- if (serial->port.count == 1) {
48297+ if (atomic_inc_return(&serial->port.count) == 1) {
48298 serial->rx_state = RX_IDLE;
48299 /* Force default termio settings */
48300 _hso_serial_set_termios(tty, NULL);
48301@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48302 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48303 if (result) {
48304 hso_stop_serial_device(serial->parent);
48305- serial->port.count--;
48306+ atomic_dec(&serial->port.count);
48307 kref_put(&serial->parent->ref, hso_serial_ref_free);
48308 }
48309 } else {
48310@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48311
48312 /* reset the rts and dtr */
48313 /* do the actual close */
48314- serial->port.count--;
48315+ atomic_dec(&serial->port.count);
48316
48317- if (serial->port.count <= 0) {
48318- serial->port.count = 0;
48319+ if (atomic_read(&serial->port.count) <= 0) {
48320+ atomic_set(&serial->port.count, 0);
48321 tty_port_tty_set(&serial->port, NULL);
48322 if (!usb_gone)
48323 hso_stop_serial_device(serial->parent);
48324@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48325
48326 /* the actual setup */
48327 spin_lock_irqsave(&serial->serial_lock, flags);
48328- if (serial->port.count)
48329+ if (atomic_read(&serial->port.count))
48330 _hso_serial_set_termios(tty, old);
48331 else
48332 tty->termios = *old;
48333@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48334 D1("Pending read interrupt on port %d\n", i);
48335 spin_lock(&serial->serial_lock);
48336 if (serial->rx_state == RX_IDLE &&
48337- serial->port.count > 0) {
48338+ atomic_read(&serial->port.count) > 0) {
48339 /* Setup and send a ctrl req read on
48340 * port i */
48341 if (!serial->rx_urb_filled[0]) {
48342@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
48343 /* Start all serial ports */
48344 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48345 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48346- if (dev2ser(serial_table[i])->port.count) {
48347+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48348 result =
48349 hso_start_serial_device(serial_table[i], GFP_NOIO);
48350 hso_kick_transmit(dev2ser(serial_table[i]));
48351diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48352index bf405f1..fd847ee 100644
48353--- a/drivers/net/usb/r8152.c
48354+++ b/drivers/net/usb/r8152.c
48355@@ -571,7 +571,7 @@ struct r8152 {
48356 void (*unload)(struct r8152 *);
48357 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48358 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48359- } rtl_ops;
48360+ } __no_const rtl_ops;
48361
48362 int intr_interval;
48363 u32 saved_wolopts;
48364diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48365index a2515887..6d13233 100644
48366--- a/drivers/net/usb/sierra_net.c
48367+++ b/drivers/net/usb/sierra_net.c
48368@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48369 /* atomic counter partially included in MAC address to make sure 2 devices
48370 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48371 */
48372-static atomic_t iface_counter = ATOMIC_INIT(0);
48373+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48374
48375 /*
48376 * SYNC Timer Delay definition used to set the expiry time
48377@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48378 dev->net->netdev_ops = &sierra_net_device_ops;
48379
48380 /* change MAC addr to include, ifacenum, and to be unique */
48381- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48382+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48383 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48384
48385 /* we will have to manufacture ethernet headers, prepare template */
48386diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48387index 059fdf1..7543217 100644
48388--- a/drivers/net/virtio_net.c
48389+++ b/drivers/net/virtio_net.c
48390@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48391 #define RECEIVE_AVG_WEIGHT 64
48392
48393 /* Minimum alignment for mergeable packet buffers. */
48394-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48395+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48396
48397 #define VIRTNET_DRIVER_VERSION "1.0.0"
48398
48399diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48400index a8c755d..a988b71 100644
48401--- a/drivers/net/vxlan.c
48402+++ b/drivers/net/vxlan.c
48403@@ -2702,7 +2702,7 @@ nla_put_failure:
48404 return -EMSGSIZE;
48405 }
48406
48407-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48408+static struct rtnl_link_ops vxlan_link_ops = {
48409 .kind = "vxlan",
48410 .maxtype = IFLA_VXLAN_MAX,
48411 .policy = vxlan_policy,
48412@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48413 return NOTIFY_DONE;
48414 }
48415
48416-static struct notifier_block vxlan_notifier_block __read_mostly = {
48417+static struct notifier_block vxlan_notifier_block = {
48418 .notifier_call = vxlan_lowerdev_event,
48419 };
48420
48421diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48422index 5920c99..ff2e4a5 100644
48423--- a/drivers/net/wan/lmc/lmc_media.c
48424+++ b/drivers/net/wan/lmc/lmc_media.c
48425@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48426 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48427
48428 lmc_media_t lmc_ds3_media = {
48429- lmc_ds3_init, /* special media init stuff */
48430- lmc_ds3_default, /* reset to default state */
48431- lmc_ds3_set_status, /* reset status to state provided */
48432- lmc_dummy_set_1, /* set clock source */
48433- lmc_dummy_set2_1, /* set line speed */
48434- lmc_ds3_set_100ft, /* set cable length */
48435- lmc_ds3_set_scram, /* set scrambler */
48436- lmc_ds3_get_link_status, /* get link status */
48437- lmc_dummy_set_1, /* set link status */
48438- lmc_ds3_set_crc_length, /* set CRC length */
48439- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48440- lmc_ds3_watchdog
48441+ .init = lmc_ds3_init, /* special media init stuff */
48442+ .defaults = lmc_ds3_default, /* reset to default state */
48443+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48444+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48445+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48446+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48447+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48448+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48449+ .set_link_status = lmc_dummy_set_1, /* set link status */
48450+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48451+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48452+ .watchdog = lmc_ds3_watchdog
48453 };
48454
48455 lmc_media_t lmc_hssi_media = {
48456- lmc_hssi_init, /* special media init stuff */
48457- lmc_hssi_default, /* reset to default state */
48458- lmc_hssi_set_status, /* reset status to state provided */
48459- lmc_hssi_set_clock, /* set clock source */
48460- lmc_dummy_set2_1, /* set line speed */
48461- lmc_dummy_set_1, /* set cable length */
48462- lmc_dummy_set_1, /* set scrambler */
48463- lmc_hssi_get_link_status, /* get link status */
48464- lmc_hssi_set_link_status, /* set link status */
48465- lmc_hssi_set_crc_length, /* set CRC length */
48466- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48467- lmc_hssi_watchdog
48468+ .init = lmc_hssi_init, /* special media init stuff */
48469+ .defaults = lmc_hssi_default, /* reset to default state */
48470+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48471+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48472+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48473+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48474+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48475+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48476+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48477+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48478+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48479+ .watchdog = lmc_hssi_watchdog
48480 };
48481
48482-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48483- lmc_ssi_default, /* reset to default state */
48484- lmc_ssi_set_status, /* reset status to state provided */
48485- lmc_ssi_set_clock, /* set clock source */
48486- lmc_ssi_set_speed, /* set line speed */
48487- lmc_dummy_set_1, /* set cable length */
48488- lmc_dummy_set_1, /* set scrambler */
48489- lmc_ssi_get_link_status, /* get link status */
48490- lmc_ssi_set_link_status, /* set link status */
48491- lmc_ssi_set_crc_length, /* set CRC length */
48492- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48493- lmc_ssi_watchdog
48494+lmc_media_t lmc_ssi_media = {
48495+ .init = lmc_ssi_init, /* special media init stuff */
48496+ .defaults = lmc_ssi_default, /* reset to default state */
48497+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48498+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48499+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48500+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48501+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48502+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48503+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48504+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48505+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48506+ .watchdog = lmc_ssi_watchdog
48507 };
48508
48509 lmc_media_t lmc_t1_media = {
48510- lmc_t1_init, /* special media init stuff */
48511- lmc_t1_default, /* reset to default state */
48512- lmc_t1_set_status, /* reset status to state provided */
48513- lmc_t1_set_clock, /* set clock source */
48514- lmc_dummy_set2_1, /* set line speed */
48515- lmc_dummy_set_1, /* set cable length */
48516- lmc_dummy_set_1, /* set scrambler */
48517- lmc_t1_get_link_status, /* get link status */
48518- lmc_dummy_set_1, /* set link status */
48519- lmc_t1_set_crc_length, /* set CRC length */
48520- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48521- lmc_t1_watchdog
48522+ .init = lmc_t1_init, /* special media init stuff */
48523+ .defaults = lmc_t1_default, /* reset to default state */
48524+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48525+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48526+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48527+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48528+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48529+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48530+ .set_link_status = lmc_dummy_set_1, /* set link status */
48531+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48532+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48533+ .watchdog = lmc_t1_watchdog
48534 };
48535
48536 static void
48537diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48538index feacc3b..5bac0de 100644
48539--- a/drivers/net/wan/z85230.c
48540+++ b/drivers/net/wan/z85230.c
48541@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48542
48543 struct z8530_irqhandler z8530_sync =
48544 {
48545- z8530_rx,
48546- z8530_tx,
48547- z8530_status
48548+ .rx = z8530_rx,
48549+ .tx = z8530_tx,
48550+ .status = z8530_status
48551 };
48552
48553 EXPORT_SYMBOL(z8530_sync);
48554@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48555 }
48556
48557 static struct z8530_irqhandler z8530_dma_sync = {
48558- z8530_dma_rx,
48559- z8530_dma_tx,
48560- z8530_dma_status
48561+ .rx = z8530_dma_rx,
48562+ .tx = z8530_dma_tx,
48563+ .status = z8530_dma_status
48564 };
48565
48566 static struct z8530_irqhandler z8530_txdma_sync = {
48567- z8530_rx,
48568- z8530_dma_tx,
48569- z8530_dma_status
48570+ .rx = z8530_rx,
48571+ .tx = z8530_dma_tx,
48572+ .status = z8530_dma_status
48573 };
48574
48575 /**
48576@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48577
48578 struct z8530_irqhandler z8530_nop=
48579 {
48580- z8530_rx_clear,
48581- z8530_tx_clear,
48582- z8530_status_clear
48583+ .rx = z8530_rx_clear,
48584+ .tx = z8530_tx_clear,
48585+ .status = z8530_status_clear
48586 };
48587
48588
48589diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48590index 0b60295..b8bfa5b 100644
48591--- a/drivers/net/wimax/i2400m/rx.c
48592+++ b/drivers/net/wimax/i2400m/rx.c
48593@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48594 if (i2400m->rx_roq == NULL)
48595 goto error_roq_alloc;
48596
48597- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48598+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48599 GFP_KERNEL);
48600 if (rd == NULL) {
48601 result = -ENOMEM;
48602diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48603index e71a2ce..2268d61 100644
48604--- a/drivers/net/wireless/airo.c
48605+++ b/drivers/net/wireless/airo.c
48606@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48607 struct airo_info *ai = dev->ml_priv;
48608 int ridcode;
48609 int enabled;
48610- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48611+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48612 unsigned char *iobuf;
48613
48614 /* Only super-user can write RIDs */
48615diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48616index da92bfa..5a9001a 100644
48617--- a/drivers/net/wireless/at76c50x-usb.c
48618+++ b/drivers/net/wireless/at76c50x-usb.c
48619@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48620 }
48621
48622 /* Convert timeout from the DFU status to jiffies */
48623-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48624+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48625 {
48626 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48627 | (s->poll_timeout[1] << 8)
48628diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48629index f1946a6..cd367fb 100644
48630--- a/drivers/net/wireless/ath/ath10k/htc.c
48631+++ b/drivers/net/wireless/ath/ath10k/htc.c
48632@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48633 /* registered target arrival callback from the HIF layer */
48634 int ath10k_htc_init(struct ath10k *ar)
48635 {
48636- struct ath10k_hif_cb htc_callbacks;
48637+ static struct ath10k_hif_cb htc_callbacks = {
48638+ .rx_completion = ath10k_htc_rx_completion_handler,
48639+ .tx_completion = ath10k_htc_tx_completion_handler,
48640+ };
48641 struct ath10k_htc_ep *ep = NULL;
48642 struct ath10k_htc *htc = &ar->htc;
48643
48644@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
48645 ath10k_htc_reset_endpoint_states(htc);
48646
48647 /* setup HIF layer callbacks */
48648- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48649- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48650 htc->ar = ar;
48651
48652 /* Get HIF default pipe for HTC message exchange */
48653diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48654index 527179c..a890150 100644
48655--- a/drivers/net/wireless/ath/ath10k/htc.h
48656+++ b/drivers/net/wireless/ath/ath10k/htc.h
48657@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48658
48659 struct ath10k_htc_ops {
48660 void (*target_send_suspend_complete)(struct ath10k *ar);
48661-};
48662+} __no_const;
48663
48664 struct ath10k_htc_ep_ops {
48665 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48666 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48667 void (*ep_tx_credits)(struct ath10k *);
48668-};
48669+} __no_const;
48670
48671 /* service connection information */
48672 struct ath10k_htc_svc_conn_req {
48673diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48674index f816909..e56cd8b 100644
48675--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48676+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48677@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48678 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48679 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48680
48681- ACCESS_ONCE(ads->ds_link) = i->link;
48682- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48683+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48684+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48685
48686 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48687 ctl6 = SM(i->keytype, AR_EncrType);
48688@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48689
48690 if ((i->is_first || i->is_last) &&
48691 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48692- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48693+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48694 | set11nTries(i->rates, 1)
48695 | set11nTries(i->rates, 2)
48696 | set11nTries(i->rates, 3)
48697 | (i->dur_update ? AR_DurUpdateEna : 0)
48698 | SM(0, AR_BurstDur);
48699
48700- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48701+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48702 | set11nRate(i->rates, 1)
48703 | set11nRate(i->rates, 2)
48704 | set11nRate(i->rates, 3);
48705 } else {
48706- ACCESS_ONCE(ads->ds_ctl2) = 0;
48707- ACCESS_ONCE(ads->ds_ctl3) = 0;
48708+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48709+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48710 }
48711
48712 if (!i->is_first) {
48713- ACCESS_ONCE(ads->ds_ctl0) = 0;
48714- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48715- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48716+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48717+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48718+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48719 return;
48720 }
48721
48722@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48723 break;
48724 }
48725
48726- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48727+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48728 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48729 | SM(i->txpower[0], AR_XmitPower0)
48730 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48731@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48732 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48733 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48734
48735- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48736- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48737+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48738+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48739
48740 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48741 return;
48742
48743- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48744+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48745 | set11nPktDurRTSCTS(i->rates, 1);
48746
48747- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48748+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48749 | set11nPktDurRTSCTS(i->rates, 3);
48750
48751- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48752+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48753 | set11nRateFlags(i->rates, 1)
48754 | set11nRateFlags(i->rates, 2)
48755 | set11nRateFlags(i->rates, 3)
48756 | SM(i->rtscts_rate, AR_RTSCTSRate);
48757
48758- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48759- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48760- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48761+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48762+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48763+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48764 }
48765
48766 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
48767diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48768index da84b70..83e4978 100644
48769--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48770+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48771@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48772 (i->qcu << AR_TxQcuNum_S) | desc_len;
48773
48774 checksum += val;
48775- ACCESS_ONCE(ads->info) = val;
48776+ ACCESS_ONCE_RW(ads->info) = val;
48777
48778 checksum += i->link;
48779- ACCESS_ONCE(ads->link) = i->link;
48780+ ACCESS_ONCE_RW(ads->link) = i->link;
48781
48782 checksum += i->buf_addr[0];
48783- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
48784+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
48785 checksum += i->buf_addr[1];
48786- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
48787+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
48788 checksum += i->buf_addr[2];
48789- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
48790+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
48791 checksum += i->buf_addr[3];
48792- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
48793+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
48794
48795 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
48796- ACCESS_ONCE(ads->ctl3) = val;
48797+ ACCESS_ONCE_RW(ads->ctl3) = val;
48798 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
48799- ACCESS_ONCE(ads->ctl5) = val;
48800+ ACCESS_ONCE_RW(ads->ctl5) = val;
48801 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
48802- ACCESS_ONCE(ads->ctl7) = val;
48803+ ACCESS_ONCE_RW(ads->ctl7) = val;
48804 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
48805- ACCESS_ONCE(ads->ctl9) = val;
48806+ ACCESS_ONCE_RW(ads->ctl9) = val;
48807
48808 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
48809- ACCESS_ONCE(ads->ctl10) = checksum;
48810+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
48811
48812 if (i->is_first || i->is_last) {
48813- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
48814+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
48815 | set11nTries(i->rates, 1)
48816 | set11nTries(i->rates, 2)
48817 | set11nTries(i->rates, 3)
48818 | (i->dur_update ? AR_DurUpdateEna : 0)
48819 | SM(0, AR_BurstDur);
48820
48821- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
48822+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
48823 | set11nRate(i->rates, 1)
48824 | set11nRate(i->rates, 2)
48825 | set11nRate(i->rates, 3);
48826 } else {
48827- ACCESS_ONCE(ads->ctl13) = 0;
48828- ACCESS_ONCE(ads->ctl14) = 0;
48829+ ACCESS_ONCE_RW(ads->ctl13) = 0;
48830+ ACCESS_ONCE_RW(ads->ctl14) = 0;
48831 }
48832
48833 ads->ctl20 = 0;
48834@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48835
48836 ctl17 = SM(i->keytype, AR_EncrType);
48837 if (!i->is_first) {
48838- ACCESS_ONCE(ads->ctl11) = 0;
48839- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48840- ACCESS_ONCE(ads->ctl15) = 0;
48841- ACCESS_ONCE(ads->ctl16) = 0;
48842- ACCESS_ONCE(ads->ctl17) = ctl17;
48843- ACCESS_ONCE(ads->ctl18) = 0;
48844- ACCESS_ONCE(ads->ctl19) = 0;
48845+ ACCESS_ONCE_RW(ads->ctl11) = 0;
48846+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48847+ ACCESS_ONCE_RW(ads->ctl15) = 0;
48848+ ACCESS_ONCE_RW(ads->ctl16) = 0;
48849+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48850+ ACCESS_ONCE_RW(ads->ctl18) = 0;
48851+ ACCESS_ONCE_RW(ads->ctl19) = 0;
48852 return;
48853 }
48854
48855- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48856+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48857 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48858 | SM(i->txpower[0], AR_XmitPower0)
48859 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48860@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48861 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
48862 ctl12 |= SM(val, AR_PAPRDChainMask);
48863
48864- ACCESS_ONCE(ads->ctl12) = ctl12;
48865- ACCESS_ONCE(ads->ctl17) = ctl17;
48866+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
48867+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48868
48869- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48870+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48871 | set11nPktDurRTSCTS(i->rates, 1);
48872
48873- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48874+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48875 | set11nPktDurRTSCTS(i->rates, 3);
48876
48877- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
48878+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
48879 | set11nRateFlags(i->rates, 1)
48880 | set11nRateFlags(i->rates, 2)
48881 | set11nRateFlags(i->rates, 3)
48882 | SM(i->rtscts_rate, AR_RTSCTSRate);
48883
48884- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
48885+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
48886
48887- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48888- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48889- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48890+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48891+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48892+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48893 }
48894
48895 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
48896diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
48897index 1cbd335..27dfb40 100644
48898--- a/drivers/net/wireless/ath/ath9k/hw.h
48899+++ b/drivers/net/wireless/ath/ath9k/hw.h
48900@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
48901
48902 /* ANI */
48903 void (*ani_cache_ini_regs)(struct ath_hw *ah);
48904-};
48905+} __no_const;
48906
48907 /**
48908 * struct ath_spec_scan - parameters for Atheros spectral scan
48909@@ -716,7 +716,7 @@ struct ath_hw_ops {
48910 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
48911 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
48912 #endif
48913-};
48914+} __no_const;
48915
48916 struct ath_nf_limits {
48917 s16 max;
48918diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
48919index 62b0bf4..4ae094c 100644
48920--- a/drivers/net/wireless/ath/ath9k/main.c
48921+++ b/drivers/net/wireless/ath/ath9k/main.c
48922@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
48923 if (!ath9k_is_chanctx_enabled())
48924 return;
48925
48926- ath9k_ops.hw_scan = ath9k_hw_scan;
48927- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
48928- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
48929- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
48930- ath9k_ops.add_chanctx = ath9k_add_chanctx;
48931- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
48932- ath9k_ops.change_chanctx = ath9k_change_chanctx;
48933- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
48934- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
48935- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
48936+ pax_open_kernel();
48937+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
48938+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
48939+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
48940+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
48941+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
48942+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
48943+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
48944+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
48945+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
48946+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
48947+ pax_close_kernel();
48948 }
48949
48950 #endif
48951diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
48952index 058a9f2..d5cb1ba 100644
48953--- a/drivers/net/wireless/b43/phy_lp.c
48954+++ b/drivers/net/wireless/b43/phy_lp.c
48955@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
48956 {
48957 struct ssb_bus *bus = dev->dev->sdev->bus;
48958
48959- static const struct b206x_channel *chandata = NULL;
48960+ const struct b206x_channel *chandata = NULL;
48961 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
48962 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
48963 u16 old_comm15, scale;
48964diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
48965index dc1d20c..f7a4f06 100644
48966--- a/drivers/net/wireless/iwlegacy/3945-mac.c
48967+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
48968@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
48969 */
48970 if (il3945_mod_params.disable_hw_scan) {
48971 D_INFO("Disabling hw_scan\n");
48972- il3945_mac_ops.hw_scan = NULL;
48973+ pax_open_kernel();
48974+ *(void **)&il3945_mac_ops.hw_scan = NULL;
48975+ pax_close_kernel();
48976 }
48977
48978 D_INFO("*** LOAD DRIVER ***\n");
48979diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
48980index 0ffb6ff..c0b7f0e 100644
48981--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
48982+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
48983@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
48984 {
48985 struct iwl_priv *priv = file->private_data;
48986 char buf[64];
48987- int buf_size;
48988+ size_t buf_size;
48989 u32 offset, len;
48990
48991 memset(buf, 0, sizeof(buf));
48992@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
48993 struct iwl_priv *priv = file->private_data;
48994
48995 char buf[8];
48996- int buf_size;
48997+ size_t buf_size;
48998 u32 reset_flag;
48999
49000 memset(buf, 0, sizeof(buf));
49001@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49002 {
49003 struct iwl_priv *priv = file->private_data;
49004 char buf[8];
49005- int buf_size;
49006+ size_t buf_size;
49007 int ht40;
49008
49009 memset(buf, 0, sizeof(buf));
49010@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49011 {
49012 struct iwl_priv *priv = file->private_data;
49013 char buf[8];
49014- int buf_size;
49015+ size_t buf_size;
49016 int value;
49017
49018 memset(buf, 0, sizeof(buf));
49019@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49020 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49021 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49022
49023-static const char *fmt_value = " %-30s %10u\n";
49024-static const char *fmt_hex = " %-30s 0x%02X\n";
49025-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49026-static const char *fmt_header =
49027+static const char fmt_value[] = " %-30s %10u\n";
49028+static const char fmt_hex[] = " %-30s 0x%02X\n";
49029+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49030+static const char fmt_header[] =
49031 "%-32s current cumulative delta max\n";
49032
49033 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49034@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49035 {
49036 struct iwl_priv *priv = file->private_data;
49037 char buf[8];
49038- int buf_size;
49039+ size_t buf_size;
49040 int clear;
49041
49042 memset(buf, 0, sizeof(buf));
49043@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49044 {
49045 struct iwl_priv *priv = file->private_data;
49046 char buf[8];
49047- int buf_size;
49048+ size_t buf_size;
49049 int trace;
49050
49051 memset(buf, 0, sizeof(buf));
49052@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49053 {
49054 struct iwl_priv *priv = file->private_data;
49055 char buf[8];
49056- int buf_size;
49057+ size_t buf_size;
49058 int missed;
49059
49060 memset(buf, 0, sizeof(buf));
49061@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49062
49063 struct iwl_priv *priv = file->private_data;
49064 char buf[8];
49065- int buf_size;
49066+ size_t buf_size;
49067 int plcp;
49068
49069 memset(buf, 0, sizeof(buf));
49070@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49071
49072 struct iwl_priv *priv = file->private_data;
49073 char buf[8];
49074- int buf_size;
49075+ size_t buf_size;
49076 int flush;
49077
49078 memset(buf, 0, sizeof(buf));
49079@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49080
49081 struct iwl_priv *priv = file->private_data;
49082 char buf[8];
49083- int buf_size;
49084+ size_t buf_size;
49085 int rts;
49086
49087 if (!priv->cfg->ht_params)
49088@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49089 {
49090 struct iwl_priv *priv = file->private_data;
49091 char buf[8];
49092- int buf_size;
49093+ size_t buf_size;
49094
49095 memset(buf, 0, sizeof(buf));
49096 buf_size = min(count, sizeof(buf) - 1);
49097@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49098 struct iwl_priv *priv = file->private_data;
49099 u32 event_log_flag;
49100 char buf[8];
49101- int buf_size;
49102+ size_t buf_size;
49103
49104 /* check that the interface is up */
49105 if (!iwl_is_ready(priv))
49106@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49107 struct iwl_priv *priv = file->private_data;
49108 char buf[8];
49109 u32 calib_disabled;
49110- int buf_size;
49111+ size_t buf_size;
49112
49113 memset(buf, 0, sizeof(buf));
49114 buf_size = min(count, sizeof(buf) - 1);
49115diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49116index 523fe0c..0d9473b 100644
49117--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49118+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49119@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49120 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49121
49122 char buf[8];
49123- int buf_size;
49124+ size_t buf_size;
49125 u32 reset_flag;
49126
49127 memset(buf, 0, sizeof(buf));
49128@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49129 {
49130 struct iwl_trans *trans = file->private_data;
49131 char buf[8];
49132- int buf_size;
49133+ size_t buf_size;
49134 int csr;
49135
49136 memset(buf, 0, sizeof(buf));
49137diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49138index ef58a88..fafa731 100644
49139--- a/drivers/net/wireless/mac80211_hwsim.c
49140+++ b/drivers/net/wireless/mac80211_hwsim.c
49141@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
49142 if (channels < 1)
49143 return -EINVAL;
49144
49145- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49146- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49147- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49148- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49149- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49150- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49151- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49152- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49153- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49154- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49155- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49156- mac80211_hwsim_assign_vif_chanctx;
49157- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49158- mac80211_hwsim_unassign_vif_chanctx;
49159+ pax_open_kernel();
49160+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49161+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49162+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49163+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49164+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49165+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49166+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49167+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49168+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49169+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49170+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49171+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49172+ pax_close_kernel();
49173
49174 spin_lock_init(&hwsim_radio_lock);
49175 INIT_LIST_HEAD(&hwsim_radios);
49176diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49177index 1a4facd..a2ecbbd 100644
49178--- a/drivers/net/wireless/rndis_wlan.c
49179+++ b/drivers/net/wireless/rndis_wlan.c
49180@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49181
49182 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49183
49184- if (rts_threshold < 0 || rts_threshold > 2347)
49185+ if (rts_threshold > 2347)
49186 rts_threshold = 2347;
49187
49188 tmp = cpu_to_le32(rts_threshold);
49189diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49190index 9bb398b..b0cc047 100644
49191--- a/drivers/net/wireless/rt2x00/rt2x00.h
49192+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49193@@ -375,7 +375,7 @@ struct rt2x00_intf {
49194 * for hardware which doesn't support hardware
49195 * sequence counting.
49196 */
49197- atomic_t seqno;
49198+ atomic_unchecked_t seqno;
49199 };
49200
49201 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49202diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49203index 66ff364..3ce34f7 100644
49204--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49205+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49206@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49207 * sequence counter given by mac80211.
49208 */
49209 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49210- seqno = atomic_add_return(0x10, &intf->seqno);
49211+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49212 else
49213- seqno = atomic_read(&intf->seqno);
49214+ seqno = atomic_read_unchecked(&intf->seqno);
49215
49216 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49217 hdr->seq_ctrl |= cpu_to_le16(seqno);
49218diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49219index b661f896..ddf7d2b 100644
49220--- a/drivers/net/wireless/ti/wl1251/sdio.c
49221+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49222@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49223
49224 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49225
49226- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49227- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49228+ pax_open_kernel();
49229+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49230+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49231+ pax_close_kernel();
49232
49233 wl1251_info("using dedicated interrupt line");
49234 } else {
49235- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49236- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49237+ pax_open_kernel();
49238+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49239+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49240+ pax_close_kernel();
49241
49242 wl1251_info("using SDIO interrupt");
49243 }
49244diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49245index d6d0d6d..60c23a0 100644
49246--- a/drivers/net/wireless/ti/wl12xx/main.c
49247+++ b/drivers/net/wireless/ti/wl12xx/main.c
49248@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49249 sizeof(wl->conf.mem));
49250
49251 /* read data preparation is only needed by wl127x */
49252- wl->ops->prepare_read = wl127x_prepare_read;
49253+ pax_open_kernel();
49254+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49255+ pax_close_kernel();
49256
49257 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49258 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49259@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49260 sizeof(wl->conf.mem));
49261
49262 /* read data preparation is only needed by wl127x */
49263- wl->ops->prepare_read = wl127x_prepare_read;
49264+ pax_open_kernel();
49265+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49266+ pax_close_kernel();
49267
49268 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49269 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49270diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49271index 8e56261..9140678 100644
49272--- a/drivers/net/wireless/ti/wl18xx/main.c
49273+++ b/drivers/net/wireless/ti/wl18xx/main.c
49274@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49275 }
49276
49277 if (!checksum_param) {
49278- wl18xx_ops.set_rx_csum = NULL;
49279- wl18xx_ops.init_vif = NULL;
49280+ pax_open_kernel();
49281+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49282+ *(void **)&wl18xx_ops.init_vif = NULL;
49283+ pax_close_kernel();
49284 }
49285
49286 /* Enable 11a Band only if we have 5G antennas */
49287diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49288index a912dc0..a8225ba 100644
49289--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49290+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49291@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49292 {
49293 struct zd_usb *usb = urb->context;
49294 struct zd_usb_interrupt *intr = &usb->intr;
49295- int len;
49296+ unsigned int len;
49297 u16 int_num;
49298
49299 ZD_ASSERT(in_interrupt());
49300diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49301index ce2e2cf..f81e500 100644
49302--- a/drivers/nfc/nfcwilink.c
49303+++ b/drivers/nfc/nfcwilink.c
49304@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49305
49306 static int nfcwilink_probe(struct platform_device *pdev)
49307 {
49308- static struct nfcwilink *drv;
49309+ struct nfcwilink *drv;
49310 int rc;
49311 __u32 protocols;
49312
49313diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49314index f2596c8..50d53af 100644
49315--- a/drivers/nfc/st21nfca/st21nfca.c
49316+++ b/drivers/nfc/st21nfca/st21nfca.c
49317@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49318 goto exit;
49319 }
49320
49321- gate = uid_skb->data;
49322+ memcpy(gate, uid_skb->data, uid_skb->len);
49323 *len = uid_skb->len;
49324 exit:
49325 kfree_skb(uid_skb);
49326diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49327index d93b2b6..ae50401 100644
49328--- a/drivers/oprofile/buffer_sync.c
49329+++ b/drivers/oprofile/buffer_sync.c
49330@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49331 if (cookie == NO_COOKIE)
49332 offset = pc;
49333 if (cookie == INVALID_COOKIE) {
49334- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49335+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49336 offset = pc;
49337 }
49338 if (cookie != last_cookie) {
49339@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49340 /* add userspace sample */
49341
49342 if (!mm) {
49343- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49344+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49345 return 0;
49346 }
49347
49348 cookie = lookup_dcookie(mm, s->eip, &offset);
49349
49350 if (cookie == INVALID_COOKIE) {
49351- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49352+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49353 return 0;
49354 }
49355
49356@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49357 /* ignore backtraces if failed to add a sample */
49358 if (state == sb_bt_start) {
49359 state = sb_bt_ignore;
49360- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49361+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49362 }
49363 }
49364 release_mm(mm);
49365diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49366index c0cc4e7..44d4e54 100644
49367--- a/drivers/oprofile/event_buffer.c
49368+++ b/drivers/oprofile/event_buffer.c
49369@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49370 }
49371
49372 if (buffer_pos == buffer_size) {
49373- atomic_inc(&oprofile_stats.event_lost_overflow);
49374+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49375 return;
49376 }
49377
49378diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49379index ed2c3ec..deda85a 100644
49380--- a/drivers/oprofile/oprof.c
49381+++ b/drivers/oprofile/oprof.c
49382@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49383 if (oprofile_ops.switch_events())
49384 return;
49385
49386- atomic_inc(&oprofile_stats.multiplex_counter);
49387+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49388 start_switch_worker();
49389 }
49390
49391diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49392index ee2cfce..7f8f699 100644
49393--- a/drivers/oprofile/oprofile_files.c
49394+++ b/drivers/oprofile/oprofile_files.c
49395@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49396
49397 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49398
49399-static ssize_t timeout_read(struct file *file, char __user *buf,
49400+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49401 size_t count, loff_t *offset)
49402 {
49403 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49404diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49405index 59659ce..6c860a0 100644
49406--- a/drivers/oprofile/oprofile_stats.c
49407+++ b/drivers/oprofile/oprofile_stats.c
49408@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49409 cpu_buf->sample_invalid_eip = 0;
49410 }
49411
49412- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49413- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49414- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49415- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49416- atomic_set(&oprofile_stats.multiplex_counter, 0);
49417+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49418+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49419+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49420+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49421+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49422 }
49423
49424
49425diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49426index 1fc622b..8c48fc3 100644
49427--- a/drivers/oprofile/oprofile_stats.h
49428+++ b/drivers/oprofile/oprofile_stats.h
49429@@ -13,11 +13,11 @@
49430 #include <linux/atomic.h>
49431
49432 struct oprofile_stat_struct {
49433- atomic_t sample_lost_no_mm;
49434- atomic_t sample_lost_no_mapping;
49435- atomic_t bt_lost_no_mapping;
49436- atomic_t event_lost_overflow;
49437- atomic_t multiplex_counter;
49438+ atomic_unchecked_t sample_lost_no_mm;
49439+ atomic_unchecked_t sample_lost_no_mapping;
49440+ atomic_unchecked_t bt_lost_no_mapping;
49441+ atomic_unchecked_t event_lost_overflow;
49442+ atomic_unchecked_t multiplex_counter;
49443 };
49444
49445 extern struct oprofile_stat_struct oprofile_stats;
49446diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49447index 3f49345..c750d0b 100644
49448--- a/drivers/oprofile/oprofilefs.c
49449+++ b/drivers/oprofile/oprofilefs.c
49450@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49451
49452 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49453 {
49454- atomic_t *val = file->private_data;
49455- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49456+ atomic_unchecked_t *val = file->private_data;
49457+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49458 }
49459
49460
49461@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49462
49463
49464 int oprofilefs_create_ro_atomic(struct dentry *root,
49465- char const *name, atomic_t *val)
49466+ char const *name, atomic_unchecked_t *val)
49467 {
49468 return __oprofilefs_create_file(root, name,
49469 &atomic_ro_fops, 0444, val);
49470diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49471index bdef916..88c7dee 100644
49472--- a/drivers/oprofile/timer_int.c
49473+++ b/drivers/oprofile/timer_int.c
49474@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49475 return NOTIFY_OK;
49476 }
49477
49478-static struct notifier_block __refdata oprofile_cpu_notifier = {
49479+static struct notifier_block oprofile_cpu_notifier = {
49480 .notifier_call = oprofile_cpu_notify,
49481 };
49482
49483diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49484index 3b47080..6cd05dd 100644
49485--- a/drivers/parport/procfs.c
49486+++ b/drivers/parport/procfs.c
49487@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49488
49489 *ppos += len;
49490
49491- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49492+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49493 }
49494
49495 #ifdef CONFIG_PARPORT_1284
49496@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49497
49498 *ppos += len;
49499
49500- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49501+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49502 }
49503 #endif /* IEEE1284.3 support. */
49504
49505diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49506index 6ca2399..68d866b 100644
49507--- a/drivers/pci/hotplug/acpiphp_ibm.c
49508+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49509@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49510 goto init_cleanup;
49511 }
49512
49513- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49514+ pax_open_kernel();
49515+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49516+ pax_close_kernel();
49517 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49518
49519 return retval;
49520diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49521index 66b7bbe..26bee78 100644
49522--- a/drivers/pci/hotplug/cpcihp_generic.c
49523+++ b/drivers/pci/hotplug/cpcihp_generic.c
49524@@ -73,7 +73,6 @@ static u16 port;
49525 static unsigned int enum_bit;
49526 static u8 enum_mask;
49527
49528-static struct cpci_hp_controller_ops generic_hpc_ops;
49529 static struct cpci_hp_controller generic_hpc;
49530
49531 static int __init validate_parameters(void)
49532@@ -139,6 +138,10 @@ static int query_enum(void)
49533 return ((value & enum_mask) == enum_mask);
49534 }
49535
49536+static struct cpci_hp_controller_ops generic_hpc_ops = {
49537+ .query_enum = query_enum,
49538+};
49539+
49540 static int __init cpcihp_generic_init(void)
49541 {
49542 int status;
49543@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49544 pci_dev_put(dev);
49545
49546 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49547- generic_hpc_ops.query_enum = query_enum;
49548 generic_hpc.ops = &generic_hpc_ops;
49549
49550 status = cpci_hp_register_controller(&generic_hpc);
49551diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49552index 7ecf34e..effed62 100644
49553--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49554+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49555@@ -59,7 +59,6 @@
49556 /* local variables */
49557 static bool debug;
49558 static bool poll;
49559-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49560 static struct cpci_hp_controller zt5550_hpc;
49561
49562 /* Primary cPCI bus bridge device */
49563@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49564 return 0;
49565 }
49566
49567+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49568+ .query_enum = zt5550_hc_query_enum,
49569+};
49570+
49571 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49572 {
49573 int status;
49574@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49575 dbg("returned from zt5550_hc_config");
49576
49577 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49578- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49579 zt5550_hpc.ops = &zt5550_hpc_ops;
49580 if (!poll) {
49581 zt5550_hpc.irq = hc_dev->irq;
49582 zt5550_hpc.irq_flags = IRQF_SHARED;
49583 zt5550_hpc.dev_id = hc_dev;
49584
49585- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49586- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49587- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49588+ pax_open_kernel();
49589+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49590+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49591+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49592+ pax_open_kernel();
49593 } else {
49594 info("using ENUM# polling mode");
49595 }
49596diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49597index 1e08ff8c..3cd145f 100644
49598--- a/drivers/pci/hotplug/cpqphp_nvram.c
49599+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49600@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49601
49602 void compaq_nvram_init (void __iomem *rom_start)
49603 {
49604+#ifndef CONFIG_PAX_KERNEXEC
49605 if (rom_start)
49606 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49607+#endif
49608
49609 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49610
49611diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49612index 56d8486..f26113f 100644
49613--- a/drivers/pci/hotplug/pci_hotplug_core.c
49614+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49615@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49616 return -EINVAL;
49617 }
49618
49619- slot->ops->owner = owner;
49620- slot->ops->mod_name = mod_name;
49621+ pax_open_kernel();
49622+ *(struct module **)&slot->ops->owner = owner;
49623+ *(const char **)&slot->ops->mod_name = mod_name;
49624+ pax_close_kernel();
49625
49626 mutex_lock(&pci_hp_mutex);
49627 /*
49628diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49629index 07aa722..84514b4 100644
49630--- a/drivers/pci/hotplug/pciehp_core.c
49631+++ b/drivers/pci/hotplug/pciehp_core.c
49632@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49633 struct slot *slot = ctrl->slot;
49634 struct hotplug_slot *hotplug = NULL;
49635 struct hotplug_slot_info *info = NULL;
49636- struct hotplug_slot_ops *ops = NULL;
49637+ hotplug_slot_ops_no_const *ops = NULL;
49638 char name[SLOT_NAME_SIZE];
49639 int retval = -ENOMEM;
49640
49641diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49642index fd60806..ab6c565 100644
49643--- a/drivers/pci/msi.c
49644+++ b/drivers/pci/msi.c
49645@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49646 {
49647 struct attribute **msi_attrs;
49648 struct attribute *msi_attr;
49649- struct device_attribute *msi_dev_attr;
49650- struct attribute_group *msi_irq_group;
49651+ device_attribute_no_const *msi_dev_attr;
49652+ attribute_group_no_const *msi_irq_group;
49653 const struct attribute_group **msi_irq_groups;
49654 struct msi_desc *entry;
49655 int ret = -ENOMEM;
49656@@ -573,7 +573,7 @@ error_attrs:
49657 count = 0;
49658 msi_attr = msi_attrs[count];
49659 while (msi_attr) {
49660- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49661+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49662 kfree(msi_attr->name);
49663 kfree(msi_dev_attr);
49664 ++count;
49665diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49666index aa012fb..63fac5d 100644
49667--- a/drivers/pci/pci-sysfs.c
49668+++ b/drivers/pci/pci-sysfs.c
49669@@ -1139,7 +1139,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49670 {
49671 /* allocate attribute structure, piggyback attribute name */
49672 int name_len = write_combine ? 13 : 10;
49673- struct bin_attribute *res_attr;
49674+ bin_attribute_no_const *res_attr;
49675 int retval;
49676
49677 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49678@@ -1316,7 +1316,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49679 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49680 {
49681 int retval;
49682- struct bin_attribute *attr;
49683+ bin_attribute_no_const *attr;
49684
49685 /* If the device has VPD, try to expose it in sysfs. */
49686 if (dev->vpd) {
49687@@ -1363,7 +1363,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49688 {
49689 int retval;
49690 int rom_size = 0;
49691- struct bin_attribute *attr;
49692+ bin_attribute_no_const *attr;
49693
49694 if (!sysfs_initialized)
49695 return -EACCES;
49696diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49697index d54632a..198c84d 100644
49698--- a/drivers/pci/pci.h
49699+++ b/drivers/pci/pci.h
49700@@ -93,7 +93,7 @@ struct pci_vpd_ops {
49701 struct pci_vpd {
49702 unsigned int len;
49703 const struct pci_vpd_ops *ops;
49704- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49705+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49706 };
49707
49708 int pci_vpd_pci22_init(struct pci_dev *dev);
49709diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49710index e1e7026..d28dd33 100644
49711--- a/drivers/pci/pcie/aspm.c
49712+++ b/drivers/pci/pcie/aspm.c
49713@@ -27,9 +27,9 @@
49714 #define MODULE_PARAM_PREFIX "pcie_aspm."
49715
49716 /* Note: those are not register definitions */
49717-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49718-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49719-#define ASPM_STATE_L1 (4) /* L1 state */
49720+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49721+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49722+#define ASPM_STATE_L1 (4U) /* L1 state */
49723 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49724 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
49725
49726diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
49727index 23212f8..65e945b 100644
49728--- a/drivers/pci/probe.c
49729+++ b/drivers/pci/probe.c
49730@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
49731 u16 orig_cmd;
49732 struct pci_bus_region region, inverted_region;
49733
49734- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
49735+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
49736
49737 /* No printks while decoding is disabled! */
49738 if (!dev->mmio_always_on) {
49739diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
49740index 3f155e7..0f4b1f0 100644
49741--- a/drivers/pci/proc.c
49742+++ b/drivers/pci/proc.c
49743@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
49744 static int __init pci_proc_init(void)
49745 {
49746 struct pci_dev *dev = NULL;
49747+
49748+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49749+#ifdef CONFIG_GRKERNSEC_PROC_USER
49750+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
49751+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49752+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49753+#endif
49754+#else
49755 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
49756+#endif
49757 proc_create("devices", 0, proc_bus_pci_dir,
49758 &proc_bus_pci_dev_operations);
49759 proc_initialized = 1;
49760diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
49761index b84fdd6..b89d829 100644
49762--- a/drivers/platform/chrome/chromeos_laptop.c
49763+++ b/drivers/platform/chrome/chromeos_laptop.c
49764@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
49765 .callback = chromeos_laptop_dmi_matched, \
49766 .driver_data = (void *)&board_
49767
49768-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
49769+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
49770 {
49771 .ident = "Samsung Series 5 550",
49772 .matches = {
49773diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
49774index 1e1e594..8fe59c5 100644
49775--- a/drivers/platform/x86/alienware-wmi.c
49776+++ b/drivers/platform/x86/alienware-wmi.c
49777@@ -150,7 +150,7 @@ struct wmax_led_args {
49778 } __packed;
49779
49780 static struct platform_device *platform_device;
49781-static struct device_attribute *zone_dev_attrs;
49782+static device_attribute_no_const *zone_dev_attrs;
49783 static struct attribute **zone_attrs;
49784 static struct platform_zone *zone_data;
49785
49786@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
49787 }
49788 };
49789
49790-static struct attribute_group zone_attribute_group = {
49791+static attribute_group_no_const zone_attribute_group = {
49792 .name = "rgb_zones",
49793 };
49794
49795diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
49796index 7543a56..367ca8ed 100644
49797--- a/drivers/platform/x86/asus-wmi.c
49798+++ b/drivers/platform/x86/asus-wmi.c
49799@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
49800 int err;
49801 u32 retval = -1;
49802
49803+#ifdef CONFIG_GRKERNSEC_KMEM
49804+ return -EPERM;
49805+#endif
49806+
49807 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
49808
49809 if (err < 0)
49810@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
49811 int err;
49812 u32 retval = -1;
49813
49814+#ifdef CONFIG_GRKERNSEC_KMEM
49815+ return -EPERM;
49816+#endif
49817+
49818 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
49819 &retval);
49820
49821@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
49822 union acpi_object *obj;
49823 acpi_status status;
49824
49825+#ifdef CONFIG_GRKERNSEC_KMEM
49826+ return -EPERM;
49827+#endif
49828+
49829 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
49830 1, asus->debug.method_id,
49831 &input, &output);
49832diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
49833index 0859877..1cf7d08 100644
49834--- a/drivers/platform/x86/msi-laptop.c
49835+++ b/drivers/platform/x86/msi-laptop.c
49836@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
49837
49838 if (!quirks->ec_read_only) {
49839 /* allow userland write sysfs file */
49840- dev_attr_bluetooth.store = store_bluetooth;
49841- dev_attr_wlan.store = store_wlan;
49842- dev_attr_threeg.store = store_threeg;
49843- dev_attr_bluetooth.attr.mode |= S_IWUSR;
49844- dev_attr_wlan.attr.mode |= S_IWUSR;
49845- dev_attr_threeg.attr.mode |= S_IWUSR;
49846+ pax_open_kernel();
49847+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
49848+ *(void **)&dev_attr_wlan.store = store_wlan;
49849+ *(void **)&dev_attr_threeg.store = store_threeg;
49850+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
49851+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
49852+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
49853+ pax_close_kernel();
49854 }
49855
49856 /* disable hardware control by fn key */
49857diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
49858index 6d2bac0..ec2b029 100644
49859--- a/drivers/platform/x86/msi-wmi.c
49860+++ b/drivers/platform/x86/msi-wmi.c
49861@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
49862 static void msi_wmi_notify(u32 value, void *context)
49863 {
49864 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
49865- static struct key_entry *key;
49866+ struct key_entry *key;
49867 union acpi_object *obj;
49868 acpi_status status;
49869
49870diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
49871index 6dd1c0e..5d602c7 100644
49872--- a/drivers/platform/x86/sony-laptop.c
49873+++ b/drivers/platform/x86/sony-laptop.c
49874@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
49875 }
49876
49877 /* High speed charging function */
49878-static struct device_attribute *hsc_handle;
49879+static device_attribute_no_const *hsc_handle;
49880
49881 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
49882 struct device_attribute *attr,
49883@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
49884 }
49885
49886 /* low battery function */
49887-static struct device_attribute *lowbatt_handle;
49888+static device_attribute_no_const *lowbatt_handle;
49889
49890 static ssize_t sony_nc_lowbatt_store(struct device *dev,
49891 struct device_attribute *attr,
49892@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
49893 }
49894
49895 /* fan speed function */
49896-static struct device_attribute *fan_handle, *hsf_handle;
49897+static device_attribute_no_const *fan_handle, *hsf_handle;
49898
49899 static ssize_t sony_nc_hsfan_store(struct device *dev,
49900 struct device_attribute *attr,
49901@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
49902 }
49903
49904 /* USB charge function */
49905-static struct device_attribute *uc_handle;
49906+static device_attribute_no_const *uc_handle;
49907
49908 static ssize_t sony_nc_usb_charge_store(struct device *dev,
49909 struct device_attribute *attr,
49910@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
49911 }
49912
49913 /* Panel ID function */
49914-static struct device_attribute *panel_handle;
49915+static device_attribute_no_const *panel_handle;
49916
49917 static ssize_t sony_nc_panelid_show(struct device *dev,
49918 struct device_attribute *attr, char *buffer)
49919@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
49920 }
49921
49922 /* smart connect function */
49923-static struct device_attribute *sc_handle;
49924+static device_attribute_no_const *sc_handle;
49925
49926 static ssize_t sony_nc_smart_conn_store(struct device *dev,
49927 struct device_attribute *attr,
49928diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
49929index c3d11fa..f83cded 100644
49930--- a/drivers/platform/x86/thinkpad_acpi.c
49931+++ b/drivers/platform/x86/thinkpad_acpi.c
49932@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
49933 return 0;
49934 }
49935
49936-void static hotkey_mask_warn_incomplete_mask(void)
49937+static void hotkey_mask_warn_incomplete_mask(void)
49938 {
49939 /* log only what the user can fix... */
49940 const u32 wantedmask = hotkey_driver_mask &
49941@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
49942 && !tp_features.bright_unkfw)
49943 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
49944 }
49945+}
49946
49947 #undef TPACPI_COMPARE_KEY
49948 #undef TPACPI_MAY_SEND_KEY
49949-}
49950
49951 /*
49952 * Polling driver
49953diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
49954index 438d4c7..ca8a2fb 100644
49955--- a/drivers/pnp/pnpbios/bioscalls.c
49956+++ b/drivers/pnp/pnpbios/bioscalls.c
49957@@ -59,7 +59,7 @@ do { \
49958 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
49959 } while(0)
49960
49961-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
49962+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
49963 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
49964
49965 /*
49966@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
49967
49968 cpu = get_cpu();
49969 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
49970+
49971+ pax_open_kernel();
49972 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
49973+ pax_close_kernel();
49974
49975 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
49976 spin_lock_irqsave(&pnp_bios_lock, flags);
49977@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
49978 :"memory");
49979 spin_unlock_irqrestore(&pnp_bios_lock, flags);
49980
49981+ pax_open_kernel();
49982 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
49983+ pax_close_kernel();
49984+
49985 put_cpu();
49986
49987 /* If we get here and this is set then the PnP BIOS faulted on us. */
49988@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
49989 return status;
49990 }
49991
49992-void pnpbios_calls_init(union pnp_bios_install_struct *header)
49993+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
49994 {
49995 int i;
49996
49997@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
49998 pnp_bios_callpoint.offset = header->fields.pm16offset;
49999 pnp_bios_callpoint.segment = PNP_CS16;
50000
50001+ pax_open_kernel();
50002+
50003 for_each_possible_cpu(i) {
50004 struct desc_struct *gdt = get_cpu_gdt_table(i);
50005 if (!gdt)
50006@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50007 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50008 (unsigned long)__va(header->fields.pm16dseg));
50009 }
50010+
50011+ pax_close_kernel();
50012 }
50013diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50014index 0c52e2a..3421ab7 100644
50015--- a/drivers/power/pda_power.c
50016+++ b/drivers/power/pda_power.c
50017@@ -37,7 +37,11 @@ static int polling;
50018
50019 #if IS_ENABLED(CONFIG_USB_PHY)
50020 static struct usb_phy *transceiver;
50021-static struct notifier_block otg_nb;
50022+static int otg_handle_notification(struct notifier_block *nb,
50023+ unsigned long event, void *unused);
50024+static struct notifier_block otg_nb = {
50025+ .notifier_call = otg_handle_notification
50026+};
50027 #endif
50028
50029 static struct regulator *ac_draw;
50030@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50031
50032 #if IS_ENABLED(CONFIG_USB_PHY)
50033 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50034- otg_nb.notifier_call = otg_handle_notification;
50035 ret = usb_register_notifier(transceiver, &otg_nb);
50036 if (ret) {
50037 dev_err(dev, "failure to register otg notifier\n");
50038diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50039index cc439fd..8fa30df 100644
50040--- a/drivers/power/power_supply.h
50041+++ b/drivers/power/power_supply.h
50042@@ -16,12 +16,12 @@ struct power_supply;
50043
50044 #ifdef CONFIG_SYSFS
50045
50046-extern void power_supply_init_attrs(struct device_type *dev_type);
50047+extern void power_supply_init_attrs(void);
50048 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50049
50050 #else
50051
50052-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50053+static inline void power_supply_init_attrs(void) {}
50054 #define power_supply_uevent NULL
50055
50056 #endif /* CONFIG_SYSFS */
50057diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50058index 694e8cd..9f03483 100644
50059--- a/drivers/power/power_supply_core.c
50060+++ b/drivers/power/power_supply_core.c
50061@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50062 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50063 EXPORT_SYMBOL_GPL(power_supply_notifier);
50064
50065-static struct device_type power_supply_dev_type;
50066+extern const struct attribute_group *power_supply_attr_groups[];
50067+static struct device_type power_supply_dev_type = {
50068+ .groups = power_supply_attr_groups,
50069+};
50070
50071 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50072 struct power_supply *supply)
50073@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50074 return PTR_ERR(power_supply_class);
50075
50076 power_supply_class->dev_uevent = power_supply_uevent;
50077- power_supply_init_attrs(&power_supply_dev_type);
50078+ power_supply_init_attrs();
50079
50080 return 0;
50081 }
50082diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50083index 62653f5..d0bb485 100644
50084--- a/drivers/power/power_supply_sysfs.c
50085+++ b/drivers/power/power_supply_sysfs.c
50086@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50087 .is_visible = power_supply_attr_is_visible,
50088 };
50089
50090-static const struct attribute_group *power_supply_attr_groups[] = {
50091+const struct attribute_group *power_supply_attr_groups[] = {
50092 &power_supply_attr_group,
50093 NULL,
50094 };
50095
50096-void power_supply_init_attrs(struct device_type *dev_type)
50097+void power_supply_init_attrs(void)
50098 {
50099 int i;
50100
50101- dev_type->groups = power_supply_attr_groups;
50102-
50103 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50104 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50105 }
50106diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50107index 84419af..268ede8 100644
50108--- a/drivers/powercap/powercap_sys.c
50109+++ b/drivers/powercap/powercap_sys.c
50110@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50111 struct device_attribute name_attr;
50112 };
50113
50114+static ssize_t show_constraint_name(struct device *dev,
50115+ struct device_attribute *dev_attr,
50116+ char *buf);
50117+
50118 static struct powercap_constraint_attr
50119- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50120+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50121+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50122+ .power_limit_attr = {
50123+ .attr = {
50124+ .name = NULL,
50125+ .mode = S_IWUSR | S_IRUGO
50126+ },
50127+ .show = show_constraint_power_limit_uw,
50128+ .store = store_constraint_power_limit_uw
50129+ },
50130+
50131+ .time_window_attr = {
50132+ .attr = {
50133+ .name = NULL,
50134+ .mode = S_IWUSR | S_IRUGO
50135+ },
50136+ .show = show_constraint_time_window_us,
50137+ .store = store_constraint_time_window_us
50138+ },
50139+
50140+ .max_power_attr = {
50141+ .attr = {
50142+ .name = NULL,
50143+ .mode = S_IRUGO
50144+ },
50145+ .show = show_constraint_max_power_uw,
50146+ .store = NULL
50147+ },
50148+
50149+ .min_power_attr = {
50150+ .attr = {
50151+ .name = NULL,
50152+ .mode = S_IRUGO
50153+ },
50154+ .show = show_constraint_min_power_uw,
50155+ .store = NULL
50156+ },
50157+
50158+ .max_time_window_attr = {
50159+ .attr = {
50160+ .name = NULL,
50161+ .mode = S_IRUGO
50162+ },
50163+ .show = show_constraint_max_time_window_us,
50164+ .store = NULL
50165+ },
50166+
50167+ .min_time_window_attr = {
50168+ .attr = {
50169+ .name = NULL,
50170+ .mode = S_IRUGO
50171+ },
50172+ .show = show_constraint_min_time_window_us,
50173+ .store = NULL
50174+ },
50175+
50176+ .name_attr = {
50177+ .attr = {
50178+ .name = NULL,
50179+ .mode = S_IRUGO
50180+ },
50181+ .show = show_constraint_name,
50182+ .store = NULL
50183+ }
50184+ }
50185+};
50186
50187 /* A list of powercap control_types */
50188 static LIST_HEAD(powercap_cntrl_list);
50189@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50190 }
50191
50192 static int create_constraint_attribute(int id, const char *name,
50193- int mode,
50194- struct device_attribute *dev_attr,
50195- ssize_t (*show)(struct device *,
50196- struct device_attribute *, char *),
50197- ssize_t (*store)(struct device *,
50198- struct device_attribute *,
50199- const char *, size_t)
50200- )
50201+ struct device_attribute *dev_attr)
50202 {
50203+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50204
50205- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50206- id, name);
50207- if (!dev_attr->attr.name)
50208+ if (!name)
50209 return -ENOMEM;
50210- dev_attr->attr.mode = mode;
50211- dev_attr->show = show;
50212- dev_attr->store = store;
50213+
50214+ pax_open_kernel();
50215+ *(const char **)&dev_attr->attr.name = name;
50216+ pax_close_kernel();
50217
50218 return 0;
50219 }
50220@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50221
50222 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50223 ret = create_constraint_attribute(i, "power_limit_uw",
50224- S_IWUSR | S_IRUGO,
50225- &constraint_attrs[i].power_limit_attr,
50226- show_constraint_power_limit_uw,
50227- store_constraint_power_limit_uw);
50228+ &constraint_attrs[i].power_limit_attr);
50229 if (ret)
50230 goto err_alloc;
50231 ret = create_constraint_attribute(i, "time_window_us",
50232- S_IWUSR | S_IRUGO,
50233- &constraint_attrs[i].time_window_attr,
50234- show_constraint_time_window_us,
50235- store_constraint_time_window_us);
50236+ &constraint_attrs[i].time_window_attr);
50237 if (ret)
50238 goto err_alloc;
50239- ret = create_constraint_attribute(i, "name", S_IRUGO,
50240- &constraint_attrs[i].name_attr,
50241- show_constraint_name,
50242- NULL);
50243+ ret = create_constraint_attribute(i, "name",
50244+ &constraint_attrs[i].name_attr);
50245 if (ret)
50246 goto err_alloc;
50247- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50248- &constraint_attrs[i].max_power_attr,
50249- show_constraint_max_power_uw,
50250- NULL);
50251+ ret = create_constraint_attribute(i, "max_power_uw",
50252+ &constraint_attrs[i].max_power_attr);
50253 if (ret)
50254 goto err_alloc;
50255- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50256- &constraint_attrs[i].min_power_attr,
50257- show_constraint_min_power_uw,
50258- NULL);
50259+ ret = create_constraint_attribute(i, "min_power_uw",
50260+ &constraint_attrs[i].min_power_attr);
50261 if (ret)
50262 goto err_alloc;
50263 ret = create_constraint_attribute(i, "max_time_window_us",
50264- S_IRUGO,
50265- &constraint_attrs[i].max_time_window_attr,
50266- show_constraint_max_time_window_us,
50267- NULL);
50268+ &constraint_attrs[i].max_time_window_attr);
50269 if (ret)
50270 goto err_alloc;
50271 ret = create_constraint_attribute(i, "min_time_window_us",
50272- S_IRUGO,
50273- &constraint_attrs[i].min_time_window_attr,
50274- show_constraint_min_time_window_us,
50275- NULL);
50276+ &constraint_attrs[i].min_time_window_attr);
50277 if (ret)
50278 goto err_alloc;
50279
50280@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50281 power_zone->zone_dev_attrs[count++] =
50282 &dev_attr_max_energy_range_uj.attr;
50283 if (power_zone->ops->get_energy_uj) {
50284+ pax_open_kernel();
50285 if (power_zone->ops->reset_energy_uj)
50286- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50287+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50288 else
50289- dev_attr_energy_uj.attr.mode = S_IRUGO;
50290+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50291+ pax_close_kernel();
50292 power_zone->zone_dev_attrs[count++] =
50293 &dev_attr_energy_uj.attr;
50294 }
50295diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50296index 9c5d414..c7900ce 100644
50297--- a/drivers/ptp/ptp_private.h
50298+++ b/drivers/ptp/ptp_private.h
50299@@ -51,7 +51,7 @@ struct ptp_clock {
50300 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50301 wait_queue_head_t tsev_wq;
50302 int defunct; /* tells readers to go away when clock is being removed */
50303- struct device_attribute *pin_dev_attr;
50304+ device_attribute_no_const *pin_dev_attr;
50305 struct attribute **pin_attr;
50306 struct attribute_group pin_attr_group;
50307 };
50308diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50309index 302e626..12579af 100644
50310--- a/drivers/ptp/ptp_sysfs.c
50311+++ b/drivers/ptp/ptp_sysfs.c
50312@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50313 goto no_pin_attr;
50314
50315 for (i = 0; i < n_pins; i++) {
50316- struct device_attribute *da = &ptp->pin_dev_attr[i];
50317+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50318 sysfs_attr_init(&da->attr);
50319 da->attr.name = info->pin_config[i].name;
50320 da->attr.mode = 0644;
50321diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50322index 9c48fb3..5b494fa 100644
50323--- a/drivers/regulator/core.c
50324+++ b/drivers/regulator/core.c
50325@@ -3587,7 +3587,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50326 {
50327 const struct regulation_constraints *constraints = NULL;
50328 const struct regulator_init_data *init_data;
50329- static atomic_t regulator_no = ATOMIC_INIT(0);
50330+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50331 struct regulator_dev *rdev;
50332 struct device *dev;
50333 int ret, i;
50334@@ -3661,7 +3661,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50335 rdev->dev.class = &regulator_class;
50336 rdev->dev.parent = dev;
50337 dev_set_name(&rdev->dev, "regulator.%d",
50338- atomic_inc_return(&regulator_no) - 1);
50339+ atomic_inc_return_unchecked(&regulator_no) - 1);
50340 ret = device_register(&rdev->dev);
50341 if (ret != 0) {
50342 put_device(&rdev->dev);
50343diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50344index 7eee2ca..4024513 100644
50345--- a/drivers/regulator/max8660.c
50346+++ b/drivers/regulator/max8660.c
50347@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50348 max8660->shadow_regs[MAX8660_OVER1] = 5;
50349 } else {
50350 /* Otherwise devices can be toggled via software */
50351- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50352- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50353+ pax_open_kernel();
50354+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50355+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50356+ pax_close_kernel();
50357 }
50358
50359 /*
50360diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50361index c3d55c2..0dddfe6 100644
50362--- a/drivers/regulator/max8973-regulator.c
50363+++ b/drivers/regulator/max8973-regulator.c
50364@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50365 if (!pdata || !pdata->enable_ext_control) {
50366 max->desc.enable_reg = MAX8973_VOUT;
50367 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50368- max->ops.enable = regulator_enable_regmap;
50369- max->ops.disable = regulator_disable_regmap;
50370- max->ops.is_enabled = regulator_is_enabled_regmap;
50371+ pax_open_kernel();
50372+ *(void **)&max->ops.enable = regulator_enable_regmap;
50373+ *(void **)&max->ops.disable = regulator_disable_regmap;
50374+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50375+ pax_close_kernel();
50376 }
50377
50378 if (pdata) {
50379diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50380index 0d17c92..a29f627 100644
50381--- a/drivers/regulator/mc13892-regulator.c
50382+++ b/drivers/regulator/mc13892-regulator.c
50383@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50384 mc13xxx_unlock(mc13892);
50385
50386 /* update mc13892_vcam ops */
50387- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50388+ pax_open_kernel();
50389+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50390 sizeof(struct regulator_ops));
50391- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50392- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50393+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50394+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50395+ pax_close_kernel();
50396 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50397
50398 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50399diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50400index 5b2e761..c8c8a4a 100644
50401--- a/drivers/rtc/rtc-cmos.c
50402+++ b/drivers/rtc/rtc-cmos.c
50403@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50404 hpet_rtc_timer_init();
50405
50406 /* export at least the first block of NVRAM */
50407- nvram.size = address_space - NVRAM_OFFSET;
50408+ pax_open_kernel();
50409+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50410+ pax_close_kernel();
50411 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50412 if (retval < 0) {
50413 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50414diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50415index d049393..bb20be0 100644
50416--- a/drivers/rtc/rtc-dev.c
50417+++ b/drivers/rtc/rtc-dev.c
50418@@ -16,6 +16,7 @@
50419 #include <linux/module.h>
50420 #include <linux/rtc.h>
50421 #include <linux/sched.h>
50422+#include <linux/grsecurity.h>
50423 #include "rtc-core.h"
50424
50425 static dev_t rtc_devt;
50426@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50427 if (copy_from_user(&tm, uarg, sizeof(tm)))
50428 return -EFAULT;
50429
50430+ gr_log_timechange();
50431+
50432 return rtc_set_time(rtc, &tm);
50433
50434 case RTC_PIE_ON:
50435diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50436index 4ffabb3..1f87fca 100644
50437--- a/drivers/rtc/rtc-ds1307.c
50438+++ b/drivers/rtc/rtc-ds1307.c
50439@@ -107,7 +107,7 @@ struct ds1307 {
50440 u8 offset; /* register's offset */
50441 u8 regs[11];
50442 u16 nvram_offset;
50443- struct bin_attribute *nvram;
50444+ bin_attribute_no_const *nvram;
50445 enum ds_type type;
50446 unsigned long flags;
50447 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50448diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50449index 90abb5b..e0bf6dd 100644
50450--- a/drivers/rtc/rtc-m48t59.c
50451+++ b/drivers/rtc/rtc-m48t59.c
50452@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50453 if (IS_ERR(m48t59->rtc))
50454 return PTR_ERR(m48t59->rtc);
50455
50456- m48t59_nvram_attr.size = pdata->offset;
50457+ pax_open_kernel();
50458+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50459+ pax_close_kernel();
50460
50461 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50462 if (ret)
50463diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50464index e693af6..2e525b6 100644
50465--- a/drivers/scsi/bfa/bfa_fcpim.h
50466+++ b/drivers/scsi/bfa/bfa_fcpim.h
50467@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50468
50469 struct bfa_itn_s {
50470 bfa_isr_func_t isr;
50471-};
50472+} __no_const;
50473
50474 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50475 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50476diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50477index 0f19455..ef7adb5 100644
50478--- a/drivers/scsi/bfa/bfa_fcs.c
50479+++ b/drivers/scsi/bfa/bfa_fcs.c
50480@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50481 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50482
50483 static struct bfa_fcs_mod_s fcs_modules[] = {
50484- { bfa_fcs_port_attach, NULL, NULL },
50485- { bfa_fcs_uf_attach, NULL, NULL },
50486- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50487- bfa_fcs_fabric_modexit },
50488+ {
50489+ .attach = bfa_fcs_port_attach,
50490+ .modinit = NULL,
50491+ .modexit = NULL
50492+ },
50493+ {
50494+ .attach = bfa_fcs_uf_attach,
50495+ .modinit = NULL,
50496+ .modexit = NULL
50497+ },
50498+ {
50499+ .attach = bfa_fcs_fabric_attach,
50500+ .modinit = bfa_fcs_fabric_modinit,
50501+ .modexit = bfa_fcs_fabric_modexit
50502+ },
50503 };
50504
50505 /*
50506diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50507index ff75ef8..2dfe00a 100644
50508--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50509+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50510@@ -89,15 +89,26 @@ static struct {
50511 void (*offline) (struct bfa_fcs_lport_s *port);
50512 } __port_action[] = {
50513 {
50514- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50515- bfa_fcs_lport_unknown_offline}, {
50516- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50517- bfa_fcs_lport_fab_offline}, {
50518- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50519- bfa_fcs_lport_n2n_offline}, {
50520- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50521- bfa_fcs_lport_loop_offline},
50522- };
50523+ .init = bfa_fcs_lport_unknown_init,
50524+ .online = bfa_fcs_lport_unknown_online,
50525+ .offline = bfa_fcs_lport_unknown_offline
50526+ },
50527+ {
50528+ .init = bfa_fcs_lport_fab_init,
50529+ .online = bfa_fcs_lport_fab_online,
50530+ .offline = bfa_fcs_lport_fab_offline
50531+ },
50532+ {
50533+ .init = bfa_fcs_lport_n2n_init,
50534+ .online = bfa_fcs_lport_n2n_online,
50535+ .offline = bfa_fcs_lport_n2n_offline
50536+ },
50537+ {
50538+ .init = bfa_fcs_lport_loop_init,
50539+ .online = bfa_fcs_lport_loop_online,
50540+ .offline = bfa_fcs_lport_loop_offline
50541+ },
50542+};
50543
50544 /*
50545 * fcs_port_sm FCS logical port state machine
50546diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50547index a38aafa0..fe8f03b 100644
50548--- a/drivers/scsi/bfa/bfa_ioc.h
50549+++ b/drivers/scsi/bfa/bfa_ioc.h
50550@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50551 bfa_ioc_disable_cbfn_t disable_cbfn;
50552 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50553 bfa_ioc_reset_cbfn_t reset_cbfn;
50554-};
50555+} __no_const;
50556
50557 /*
50558 * IOC event notification mechanism.
50559@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50560 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50561 enum bfi_ioc_state fwstate);
50562 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50563-};
50564+} __no_const;
50565
50566 /*
50567 * Queue element to wait for room in request queue. FIFO order is
50568diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50569index a14c784..6de6790 100644
50570--- a/drivers/scsi/bfa/bfa_modules.h
50571+++ b/drivers/scsi/bfa/bfa_modules.h
50572@@ -78,12 +78,12 @@ enum {
50573 \
50574 extern struct bfa_module_s hal_mod_ ## __mod; \
50575 struct bfa_module_s hal_mod_ ## __mod = { \
50576- bfa_ ## __mod ## _meminfo, \
50577- bfa_ ## __mod ## _attach, \
50578- bfa_ ## __mod ## _detach, \
50579- bfa_ ## __mod ## _start, \
50580- bfa_ ## __mod ## _stop, \
50581- bfa_ ## __mod ## _iocdisable, \
50582+ .meminfo = bfa_ ## __mod ## _meminfo, \
50583+ .attach = bfa_ ## __mod ## _attach, \
50584+ .detach = bfa_ ## __mod ## _detach, \
50585+ .start = bfa_ ## __mod ## _start, \
50586+ .stop = bfa_ ## __mod ## _stop, \
50587+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50588 }
50589
50590 #define BFA_CACHELINE_SZ (256)
50591diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50592index 045c4e1..13de803 100644
50593--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50594+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50595@@ -33,8 +33,8 @@
50596 */
50597 #include "libfcoe.h"
50598
50599-static atomic_t ctlr_num;
50600-static atomic_t fcf_num;
50601+static atomic_unchecked_t ctlr_num;
50602+static atomic_unchecked_t fcf_num;
50603
50604 /*
50605 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
50606@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
50607 if (!ctlr)
50608 goto out;
50609
50610- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
50611+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
50612 ctlr->f = f;
50613 ctlr->mode = FIP_CONN_TYPE_FABRIC;
50614 INIT_LIST_HEAD(&ctlr->fcfs);
50615@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
50616 fcf->dev.parent = &ctlr->dev;
50617 fcf->dev.bus = &fcoe_bus_type;
50618 fcf->dev.type = &fcoe_fcf_device_type;
50619- fcf->id = atomic_inc_return(&fcf_num) - 1;
50620+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
50621 fcf->state = FCOE_FCF_STATE_UNKNOWN;
50622
50623 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
50624@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
50625 {
50626 int error;
50627
50628- atomic_set(&ctlr_num, 0);
50629- atomic_set(&fcf_num, 0);
50630+ atomic_set_unchecked(&ctlr_num, 0);
50631+ atomic_set_unchecked(&fcf_num, 0);
50632
50633 error = bus_register(&fcoe_bus_type);
50634 if (error)
50635diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
50636index 8bb173e..20236b4 100644
50637--- a/drivers/scsi/hosts.c
50638+++ b/drivers/scsi/hosts.c
50639@@ -42,7 +42,7 @@
50640 #include "scsi_logging.h"
50641
50642
50643-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50644+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50645
50646
50647 static void scsi_host_cls_release(struct device *dev)
50648@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
50649 * subtract one because we increment first then return, but we need to
50650 * know what the next host number was before increment
50651 */
50652- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
50653+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
50654 shost->dma_channel = 0xff;
50655
50656 /* These three are default values which can be overridden */
50657diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
50658index 6bb4611..0203251 100644
50659--- a/drivers/scsi/hpsa.c
50660+++ b/drivers/scsi/hpsa.c
50661@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
50662 struct reply_queue_buffer *rq = &h->reply_queue[q];
50663
50664 if (h->transMethod & CFGTBL_Trans_io_accel1)
50665- return h->access.command_completed(h, q);
50666+ return h->access->command_completed(h, q);
50667
50668 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
50669- return h->access.command_completed(h, q);
50670+ return h->access->command_completed(h, q);
50671
50672 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
50673 a = rq->head[rq->current_entry];
50674@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50675 while (!list_empty(&h->reqQ)) {
50676 c = list_entry(h->reqQ.next, struct CommandList, list);
50677 /* can't do anything if fifo is full */
50678- if ((h->access.fifo_full(h))) {
50679+ if ((h->access->fifo_full(h))) {
50680 h->fifo_recently_full = 1;
50681 dev_warn(&h->pdev->dev, "fifo full\n");
50682 break;
50683@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50684 atomic_inc(&h->commands_outstanding);
50685 spin_unlock_irqrestore(&h->lock, *flags);
50686 /* Tell the controller execute command */
50687- h->access.submit_command(h, c);
50688+ h->access->submit_command(h, c);
50689 spin_lock_irqsave(&h->lock, *flags);
50690 }
50691 }
50692@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
50693
50694 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
50695 {
50696- return h->access.command_completed(h, q);
50697+ return h->access->command_completed(h, q);
50698 }
50699
50700 static inline bool interrupt_pending(struct ctlr_info *h)
50701 {
50702- return h->access.intr_pending(h);
50703+ return h->access->intr_pending(h);
50704 }
50705
50706 static inline long interrupt_not_for_us(struct ctlr_info *h)
50707 {
50708- return (h->access.intr_pending(h) == 0) ||
50709+ return (h->access->intr_pending(h) == 0) ||
50710 (h->interrupts_enabled == 0);
50711 }
50712
50713@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
50714 if (prod_index < 0)
50715 return -ENODEV;
50716 h->product_name = products[prod_index].product_name;
50717- h->access = *(products[prod_index].access);
50718+ h->access = products[prod_index].access;
50719
50720 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
50721 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
50722@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
50723 unsigned long flags;
50724 u32 lockup_detected;
50725
50726- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50727+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50728 spin_lock_irqsave(&h->lock, flags);
50729 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
50730 if (!lockup_detected) {
50731@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
50732 }
50733
50734 /* make sure the board interrupts are off */
50735- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50736+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50737
50738 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
50739 goto clean2;
50740@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
50741 * fake ones to scoop up any residual completions.
50742 */
50743 spin_lock_irqsave(&h->lock, flags);
50744- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50745+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50746 spin_unlock_irqrestore(&h->lock, flags);
50747 free_irqs(h);
50748 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
50749@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
50750 dev_info(&h->pdev->dev, "Board READY.\n");
50751 dev_info(&h->pdev->dev,
50752 "Waiting for stale completions to drain.\n");
50753- h->access.set_intr_mask(h, HPSA_INTR_ON);
50754+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50755 msleep(10000);
50756- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50757+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50758
50759 rc = controller_reset_failed(h->cfgtable);
50760 if (rc)
50761@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
50762 h->drv_req_rescan = 0;
50763
50764 /* Turn the interrupts on so we can service requests */
50765- h->access.set_intr_mask(h, HPSA_INTR_ON);
50766+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50767
50768 hpsa_hba_inquiry(h);
50769 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
50770@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
50771 * To write all data in the battery backed cache to disks
50772 */
50773 hpsa_flush_cache(h);
50774- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50775+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50776 hpsa_free_irqs_and_disable_msix(h);
50777 }
50778
50779@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50780 CFGTBL_Trans_enable_directed_msix |
50781 (trans_support & (CFGTBL_Trans_io_accel1 |
50782 CFGTBL_Trans_io_accel2));
50783- struct access_method access = SA5_performant_access;
50784+ struct access_method *access = &SA5_performant_access;
50785
50786 /* This is a bit complicated. There are 8 registers on
50787 * the controller which we write to to tell it 8 different
50788@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50789 * perform the superfluous readl() after each command submission.
50790 */
50791 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
50792- access = SA5_performant_access_no_read;
50793+ access = &SA5_performant_access_no_read;
50794
50795 /* Controller spec: zero out this buffer. */
50796 for (i = 0; i < h->nreply_queues; i++)
50797@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50798 * enable outbound interrupt coalescing in accelerator mode;
50799 */
50800 if (trans_support & CFGTBL_Trans_io_accel1) {
50801- access = SA5_ioaccel_mode1_access;
50802+ access = &SA5_ioaccel_mode1_access;
50803 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50804 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50805 } else {
50806 if (trans_support & CFGTBL_Trans_io_accel2) {
50807- access = SA5_ioaccel_mode2_access;
50808+ access = &SA5_ioaccel_mode2_access;
50809 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50810 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50811 }
50812diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
50813index 8e06d9e..396e0a1 100644
50814--- a/drivers/scsi/hpsa.h
50815+++ b/drivers/scsi/hpsa.h
50816@@ -127,7 +127,7 @@ struct ctlr_info {
50817 unsigned int msix_vector;
50818 unsigned int msi_vector;
50819 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
50820- struct access_method access;
50821+ struct access_method *access;
50822 char hba_mode_enabled;
50823
50824 /* queue and queue Info */
50825@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
50826 }
50827
50828 static struct access_method SA5_access = {
50829- SA5_submit_command,
50830- SA5_intr_mask,
50831- SA5_fifo_full,
50832- SA5_intr_pending,
50833- SA5_completed,
50834+ .submit_command = SA5_submit_command,
50835+ .set_intr_mask = SA5_intr_mask,
50836+ .fifo_full = SA5_fifo_full,
50837+ .intr_pending = SA5_intr_pending,
50838+ .command_completed = SA5_completed,
50839 };
50840
50841 static struct access_method SA5_ioaccel_mode1_access = {
50842- SA5_submit_command,
50843- SA5_performant_intr_mask,
50844- SA5_fifo_full,
50845- SA5_ioaccel_mode1_intr_pending,
50846- SA5_ioaccel_mode1_completed,
50847+ .submit_command = SA5_submit_command,
50848+ .set_intr_mask = SA5_performant_intr_mask,
50849+ .fifo_full = SA5_fifo_full,
50850+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
50851+ .command_completed = SA5_ioaccel_mode1_completed,
50852 };
50853
50854 static struct access_method SA5_ioaccel_mode2_access = {
50855- SA5_submit_command_ioaccel2,
50856- SA5_performant_intr_mask,
50857- SA5_fifo_full,
50858- SA5_performant_intr_pending,
50859- SA5_performant_completed,
50860+ .submit_command = SA5_submit_command_ioaccel2,
50861+ .set_intr_mask = SA5_performant_intr_mask,
50862+ .fifo_full = SA5_fifo_full,
50863+ .intr_pending = SA5_performant_intr_pending,
50864+ .command_completed = SA5_performant_completed,
50865 };
50866
50867 static struct access_method SA5_performant_access = {
50868- SA5_submit_command,
50869- SA5_performant_intr_mask,
50870- SA5_fifo_full,
50871- SA5_performant_intr_pending,
50872- SA5_performant_completed,
50873+ .submit_command = SA5_submit_command,
50874+ .set_intr_mask = SA5_performant_intr_mask,
50875+ .fifo_full = SA5_fifo_full,
50876+ .intr_pending = SA5_performant_intr_pending,
50877+ .command_completed = SA5_performant_completed,
50878 };
50879
50880 static struct access_method SA5_performant_access_no_read = {
50881- SA5_submit_command_no_read,
50882- SA5_performant_intr_mask,
50883- SA5_fifo_full,
50884- SA5_performant_intr_pending,
50885- SA5_performant_completed,
50886+ .submit_command = SA5_submit_command_no_read,
50887+ .set_intr_mask = SA5_performant_intr_mask,
50888+ .fifo_full = SA5_fifo_full,
50889+ .intr_pending = SA5_performant_intr_pending,
50890+ .command_completed = SA5_performant_completed,
50891 };
50892
50893 struct board_type {
50894diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
50895index 1b3a094..068e683 100644
50896--- a/drivers/scsi/libfc/fc_exch.c
50897+++ b/drivers/scsi/libfc/fc_exch.c
50898@@ -101,12 +101,12 @@ struct fc_exch_mgr {
50899 u16 pool_max_index;
50900
50901 struct {
50902- atomic_t no_free_exch;
50903- atomic_t no_free_exch_xid;
50904- atomic_t xid_not_found;
50905- atomic_t xid_busy;
50906- atomic_t seq_not_found;
50907- atomic_t non_bls_resp;
50908+ atomic_unchecked_t no_free_exch;
50909+ atomic_unchecked_t no_free_exch_xid;
50910+ atomic_unchecked_t xid_not_found;
50911+ atomic_unchecked_t xid_busy;
50912+ atomic_unchecked_t seq_not_found;
50913+ atomic_unchecked_t non_bls_resp;
50914 } stats;
50915 };
50916
50917@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
50918 /* allocate memory for exchange */
50919 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
50920 if (!ep) {
50921- atomic_inc(&mp->stats.no_free_exch);
50922+ atomic_inc_unchecked(&mp->stats.no_free_exch);
50923 goto out;
50924 }
50925 memset(ep, 0, sizeof(*ep));
50926@@ -874,7 +874,7 @@ out:
50927 return ep;
50928 err:
50929 spin_unlock_bh(&pool->lock);
50930- atomic_inc(&mp->stats.no_free_exch_xid);
50931+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
50932 mempool_free(ep, mp->ep_pool);
50933 return NULL;
50934 }
50935@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
50936 xid = ntohs(fh->fh_ox_id); /* we originated exch */
50937 ep = fc_exch_find(mp, xid);
50938 if (!ep) {
50939- atomic_inc(&mp->stats.xid_not_found);
50940+ atomic_inc_unchecked(&mp->stats.xid_not_found);
50941 reject = FC_RJT_OX_ID;
50942 goto out;
50943 }
50944@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
50945 ep = fc_exch_find(mp, xid);
50946 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
50947 if (ep) {
50948- atomic_inc(&mp->stats.xid_busy);
50949+ atomic_inc_unchecked(&mp->stats.xid_busy);
50950 reject = FC_RJT_RX_ID;
50951 goto rel;
50952 }
50953@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
50954 }
50955 xid = ep->xid; /* get our XID */
50956 } else if (!ep) {
50957- atomic_inc(&mp->stats.xid_not_found);
50958+ atomic_inc_unchecked(&mp->stats.xid_not_found);
50959 reject = FC_RJT_RX_ID; /* XID not found */
50960 goto out;
50961 }
50962@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
50963 } else {
50964 sp = &ep->seq;
50965 if (sp->id != fh->fh_seq_id) {
50966- atomic_inc(&mp->stats.seq_not_found);
50967+ atomic_inc_unchecked(&mp->stats.seq_not_found);
50968 if (f_ctl & FC_FC_END_SEQ) {
50969 /*
50970 * Update sequence_id based on incoming last
50971@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
50972
50973 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
50974 if (!ep) {
50975- atomic_inc(&mp->stats.xid_not_found);
50976+ atomic_inc_unchecked(&mp->stats.xid_not_found);
50977 goto out;
50978 }
50979 if (ep->esb_stat & ESB_ST_COMPLETE) {
50980- atomic_inc(&mp->stats.xid_not_found);
50981+ atomic_inc_unchecked(&mp->stats.xid_not_found);
50982 goto rel;
50983 }
50984 if (ep->rxid == FC_XID_UNKNOWN)
50985 ep->rxid = ntohs(fh->fh_rx_id);
50986 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
50987- atomic_inc(&mp->stats.xid_not_found);
50988+ atomic_inc_unchecked(&mp->stats.xid_not_found);
50989 goto rel;
50990 }
50991 if (ep->did != ntoh24(fh->fh_s_id) &&
50992 ep->did != FC_FID_FLOGI) {
50993- atomic_inc(&mp->stats.xid_not_found);
50994+ atomic_inc_unchecked(&mp->stats.xid_not_found);
50995 goto rel;
50996 }
50997 sof = fr_sof(fp);
50998@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
50999 sp->ssb_stat |= SSB_ST_RESP;
51000 sp->id = fh->fh_seq_id;
51001 } else if (sp->id != fh->fh_seq_id) {
51002- atomic_inc(&mp->stats.seq_not_found);
51003+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51004 goto rel;
51005 }
51006
51007@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51008 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51009
51010 if (!sp)
51011- atomic_inc(&mp->stats.xid_not_found);
51012+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51013 else
51014- atomic_inc(&mp->stats.non_bls_resp);
51015+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51016
51017 fc_frame_free(fp);
51018 }
51019@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51020
51021 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51022 mp = ema->mp;
51023- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51024+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51025 st->fc_no_free_exch_xid +=
51026- atomic_read(&mp->stats.no_free_exch_xid);
51027- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51028- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51029- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51030- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51031+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51032+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51033+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51034+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51035+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51036 }
51037 }
51038 EXPORT_SYMBOL(fc_exch_update_stats);
51039diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51040index 932d9cc..50c7ee9 100644
51041--- a/drivers/scsi/libsas/sas_ata.c
51042+++ b/drivers/scsi/libsas/sas_ata.c
51043@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51044 .postreset = ata_std_postreset,
51045 .error_handler = ata_std_error_handler,
51046 .post_internal_cmd = sas_ata_post_internal,
51047- .qc_defer = ata_std_qc_defer,
51048+ .qc_defer = ata_std_qc_defer,
51049 .qc_prep = ata_noop_qc_prep,
51050 .qc_issue = sas_ata_qc_issue,
51051 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51052diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51053index 434e903..5a4a79b 100644
51054--- a/drivers/scsi/lpfc/lpfc.h
51055+++ b/drivers/scsi/lpfc/lpfc.h
51056@@ -430,7 +430,7 @@ struct lpfc_vport {
51057 struct dentry *debug_nodelist;
51058 struct dentry *vport_debugfs_root;
51059 struct lpfc_debugfs_trc *disc_trc;
51060- atomic_t disc_trc_cnt;
51061+ atomic_unchecked_t disc_trc_cnt;
51062 #endif
51063 uint8_t stat_data_enabled;
51064 uint8_t stat_data_blocked;
51065@@ -880,8 +880,8 @@ struct lpfc_hba {
51066 struct timer_list fabric_block_timer;
51067 unsigned long bit_flags;
51068 #define FABRIC_COMANDS_BLOCKED 0
51069- atomic_t num_rsrc_err;
51070- atomic_t num_cmd_success;
51071+ atomic_unchecked_t num_rsrc_err;
51072+ atomic_unchecked_t num_cmd_success;
51073 unsigned long last_rsrc_error_time;
51074 unsigned long last_ramp_down_time;
51075 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51076@@ -916,7 +916,7 @@ struct lpfc_hba {
51077
51078 struct dentry *debug_slow_ring_trc;
51079 struct lpfc_debugfs_trc *slow_ring_trc;
51080- atomic_t slow_ring_trc_cnt;
51081+ atomic_unchecked_t slow_ring_trc_cnt;
51082 /* iDiag debugfs sub-directory */
51083 struct dentry *idiag_root;
51084 struct dentry *idiag_pci_cfg;
51085diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51086index 5633e7d..8272114 100644
51087--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51088+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51089@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51090
51091 #include <linux/debugfs.h>
51092
51093-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51094+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51095 static unsigned long lpfc_debugfs_start_time = 0L;
51096
51097 /* iDiag */
51098@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51099 lpfc_debugfs_enable = 0;
51100
51101 len = 0;
51102- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51103+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51104 (lpfc_debugfs_max_disc_trc - 1);
51105 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51106 dtp = vport->disc_trc + i;
51107@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51108 lpfc_debugfs_enable = 0;
51109
51110 len = 0;
51111- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51112+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51113 (lpfc_debugfs_max_slow_ring_trc - 1);
51114 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51115 dtp = phba->slow_ring_trc + i;
51116@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51117 !vport || !vport->disc_trc)
51118 return;
51119
51120- index = atomic_inc_return(&vport->disc_trc_cnt) &
51121+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51122 (lpfc_debugfs_max_disc_trc - 1);
51123 dtp = vport->disc_trc + index;
51124 dtp->fmt = fmt;
51125 dtp->data1 = data1;
51126 dtp->data2 = data2;
51127 dtp->data3 = data3;
51128- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51129+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51130 dtp->jif = jiffies;
51131 #endif
51132 return;
51133@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51134 !phba || !phba->slow_ring_trc)
51135 return;
51136
51137- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51138+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51139 (lpfc_debugfs_max_slow_ring_trc - 1);
51140 dtp = phba->slow_ring_trc + index;
51141 dtp->fmt = fmt;
51142 dtp->data1 = data1;
51143 dtp->data2 = data2;
51144 dtp->data3 = data3;
51145- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51146+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51147 dtp->jif = jiffies;
51148 #endif
51149 return;
51150@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51151 "slow_ring buffer\n");
51152 goto debug_failed;
51153 }
51154- atomic_set(&phba->slow_ring_trc_cnt, 0);
51155+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51156 memset(phba->slow_ring_trc, 0,
51157 (sizeof(struct lpfc_debugfs_trc) *
51158 lpfc_debugfs_max_slow_ring_trc));
51159@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51160 "buffer\n");
51161 goto debug_failed;
51162 }
51163- atomic_set(&vport->disc_trc_cnt, 0);
51164+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51165
51166 snprintf(name, sizeof(name), "discovery_trace");
51167 vport->debug_disc_trc =
51168diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51169index 0b2c53a..aec2b45 100644
51170--- a/drivers/scsi/lpfc/lpfc_init.c
51171+++ b/drivers/scsi/lpfc/lpfc_init.c
51172@@ -11290,8 +11290,10 @@ lpfc_init(void)
51173 "misc_register returned with status %d", error);
51174
51175 if (lpfc_enable_npiv) {
51176- lpfc_transport_functions.vport_create = lpfc_vport_create;
51177- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51178+ pax_open_kernel();
51179+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51180+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51181+ pax_close_kernel();
51182 }
51183 lpfc_transport_template =
51184 fc_attach_transport(&lpfc_transport_functions);
51185diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51186index 4f9222e..f1850e3 100644
51187--- a/drivers/scsi/lpfc/lpfc_scsi.c
51188+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51189@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51190 unsigned long expires;
51191
51192 spin_lock_irqsave(&phba->hbalock, flags);
51193- atomic_inc(&phba->num_rsrc_err);
51194+ atomic_inc_unchecked(&phba->num_rsrc_err);
51195 phba->last_rsrc_error_time = jiffies;
51196
51197 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51198@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51199 unsigned long num_rsrc_err, num_cmd_success;
51200 int i;
51201
51202- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51203- num_cmd_success = atomic_read(&phba->num_cmd_success);
51204+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51205+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51206
51207 /*
51208 * The error and success command counters are global per
51209@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51210 }
51211 }
51212 lpfc_destroy_vport_work_array(phba, vports);
51213- atomic_set(&phba->num_rsrc_err, 0);
51214- atomic_set(&phba->num_cmd_success, 0);
51215+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51216+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51217 }
51218
51219 /**
51220diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51221index 6a1c036..38e0e8d 100644
51222--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51223+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51224@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
51225 {
51226 struct scsi_device *sdev = to_scsi_device(dev);
51227 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51228- static struct _raid_device *raid_device;
51229+ struct _raid_device *raid_device;
51230 unsigned long flags;
51231 Mpi2RaidVolPage0_t vol_pg0;
51232 Mpi2ConfigReply_t mpi_reply;
51233@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
51234 {
51235 struct scsi_device *sdev = to_scsi_device(dev);
51236 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51237- static struct _raid_device *raid_device;
51238+ struct _raid_device *raid_device;
51239 unsigned long flags;
51240 Mpi2RaidVolPage0_t vol_pg0;
51241 Mpi2ConfigReply_t mpi_reply;
51242@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51243 Mpi2EventDataIrOperationStatus_t *event_data =
51244 (Mpi2EventDataIrOperationStatus_t *)
51245 fw_event->event_data;
51246- static struct _raid_device *raid_device;
51247+ struct _raid_device *raid_device;
51248 unsigned long flags;
51249 u16 handle;
51250
51251@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51252 u64 sas_address;
51253 struct _sas_device *sas_device;
51254 struct _sas_node *expander_device;
51255- static struct _raid_device *raid_device;
51256+ struct _raid_device *raid_device;
51257 u8 retry_count;
51258 unsigned long flags;
51259
51260diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51261index 8c27b6a..607f56e 100644
51262--- a/drivers/scsi/pmcraid.c
51263+++ b/drivers/scsi/pmcraid.c
51264@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51265 res->scsi_dev = scsi_dev;
51266 scsi_dev->hostdata = res;
51267 res->change_detected = 0;
51268- atomic_set(&res->read_failures, 0);
51269- atomic_set(&res->write_failures, 0);
51270+ atomic_set_unchecked(&res->read_failures, 0);
51271+ atomic_set_unchecked(&res->write_failures, 0);
51272 rc = 0;
51273 }
51274 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51275@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51276
51277 /* If this was a SCSI read/write command keep count of errors */
51278 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51279- atomic_inc(&res->read_failures);
51280+ atomic_inc_unchecked(&res->read_failures);
51281 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51282- atomic_inc(&res->write_failures);
51283+ atomic_inc_unchecked(&res->write_failures);
51284
51285 if (!RES_IS_GSCSI(res->cfg_entry) &&
51286 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51287@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
51288 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51289 * hrrq_id assigned here in queuecommand
51290 */
51291- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51292+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51293 pinstance->num_hrrq;
51294 cmd->cmd_done = pmcraid_io_done;
51295
51296@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
51297 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51298 * hrrq_id assigned here in queuecommand
51299 */
51300- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51301+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51302 pinstance->num_hrrq;
51303
51304 if (request_size) {
51305@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51306
51307 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51308 /* add resources only after host is added into system */
51309- if (!atomic_read(&pinstance->expose_resources))
51310+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51311 return;
51312
51313 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51314@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51315 init_waitqueue_head(&pinstance->reset_wait_q);
51316
51317 atomic_set(&pinstance->outstanding_cmds, 0);
51318- atomic_set(&pinstance->last_message_id, 0);
51319- atomic_set(&pinstance->expose_resources, 0);
51320+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51321+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51322
51323 INIT_LIST_HEAD(&pinstance->free_res_q);
51324 INIT_LIST_HEAD(&pinstance->used_res_q);
51325@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51326 /* Schedule worker thread to handle CCN and take care of adding and
51327 * removing devices to OS
51328 */
51329- atomic_set(&pinstance->expose_resources, 1);
51330+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51331 schedule_work(&pinstance->worker_q);
51332 return rc;
51333
51334diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51335index e1d150f..6c6df44 100644
51336--- a/drivers/scsi/pmcraid.h
51337+++ b/drivers/scsi/pmcraid.h
51338@@ -748,7 +748,7 @@ struct pmcraid_instance {
51339 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51340
51341 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51342- atomic_t last_message_id;
51343+ atomic_unchecked_t last_message_id;
51344
51345 /* configuration table */
51346 struct pmcraid_config_table *cfg_table;
51347@@ -777,7 +777,7 @@ struct pmcraid_instance {
51348 atomic_t outstanding_cmds;
51349
51350 /* should add/delete resources to mid-layer now ?*/
51351- atomic_t expose_resources;
51352+ atomic_unchecked_t expose_resources;
51353
51354
51355
51356@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51357 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51358 };
51359 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51360- atomic_t read_failures; /* count of failed READ commands */
51361- atomic_t write_failures; /* count of failed WRITE commands */
51362+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51363+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51364
51365 /* To indicate add/delete/modify during CCN */
51366 u8 change_detected;
51367diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51368index 82b92c4..3178171 100644
51369--- a/drivers/scsi/qla2xxx/qla_attr.c
51370+++ b/drivers/scsi/qla2xxx/qla_attr.c
51371@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51372 return 0;
51373 }
51374
51375-struct fc_function_template qla2xxx_transport_functions = {
51376+fc_function_template_no_const qla2xxx_transport_functions = {
51377
51378 .show_host_node_name = 1,
51379 .show_host_port_name = 1,
51380@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51381 .bsg_timeout = qla24xx_bsg_timeout,
51382 };
51383
51384-struct fc_function_template qla2xxx_transport_vport_functions = {
51385+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51386
51387 .show_host_node_name = 1,
51388 .show_host_port_name = 1,
51389diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51390index 7686bfe..4710893 100644
51391--- a/drivers/scsi/qla2xxx/qla_gbl.h
51392+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51393@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51394 struct device_attribute;
51395 extern struct device_attribute *qla2x00_host_attrs[];
51396 struct fc_function_template;
51397-extern struct fc_function_template qla2xxx_transport_functions;
51398-extern struct fc_function_template qla2xxx_transport_vport_functions;
51399+extern fc_function_template_no_const qla2xxx_transport_functions;
51400+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51401 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51402 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51403 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51404diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51405index cce1cbc..5b9f0fe 100644
51406--- a/drivers/scsi/qla2xxx/qla_os.c
51407+++ b/drivers/scsi/qla2xxx/qla_os.c
51408@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51409 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51410 /* Ok, a 64bit DMA mask is applicable. */
51411 ha->flags.enable_64bit_addressing = 1;
51412- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51413- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51414+ pax_open_kernel();
51415+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51416+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51417+ pax_close_kernel();
51418 return;
51419 }
51420 }
51421diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51422index 8f6d0fb..1b21097 100644
51423--- a/drivers/scsi/qla4xxx/ql4_def.h
51424+++ b/drivers/scsi/qla4xxx/ql4_def.h
51425@@ -305,7 +305,7 @@ struct ddb_entry {
51426 * (4000 only) */
51427 atomic_t relogin_timer; /* Max Time to wait for
51428 * relogin to complete */
51429- atomic_t relogin_retry_count; /* Num of times relogin has been
51430+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51431 * retried */
51432 uint32_t default_time2wait; /* Default Min time between
51433 * relogins (+aens) */
51434diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51435index 6d25879..3031a9f 100644
51436--- a/drivers/scsi/qla4xxx/ql4_os.c
51437+++ b/drivers/scsi/qla4xxx/ql4_os.c
51438@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51439 */
51440 if (!iscsi_is_session_online(cls_sess)) {
51441 /* Reset retry relogin timer */
51442- atomic_inc(&ddb_entry->relogin_retry_count);
51443+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51444 DEBUG2(ql4_printk(KERN_INFO, ha,
51445 "%s: index[%d] relogin timed out-retrying"
51446 " relogin (%d), retry (%d)\n", __func__,
51447 ddb_entry->fw_ddb_index,
51448- atomic_read(&ddb_entry->relogin_retry_count),
51449+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51450 ddb_entry->default_time2wait + 4));
51451 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51452 atomic_set(&ddb_entry->retry_relogin_timer,
51453@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51454
51455 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51456 atomic_set(&ddb_entry->relogin_timer, 0);
51457- atomic_set(&ddb_entry->relogin_retry_count, 0);
51458+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51459 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51460 ddb_entry->default_relogin_timeout =
51461 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51462diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51463index 17bb541..85f4508 100644
51464--- a/drivers/scsi/scsi_lib.c
51465+++ b/drivers/scsi/scsi_lib.c
51466@@ -1595,7 +1595,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51467 shost = sdev->host;
51468 scsi_init_cmd_errh(cmd);
51469 cmd->result = DID_NO_CONNECT << 16;
51470- atomic_inc(&cmd->device->iorequest_cnt);
51471+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51472
51473 /*
51474 * SCSI request completion path will do scsi_device_unbusy(),
51475@@ -1618,9 +1618,9 @@ static void scsi_softirq_done(struct request *rq)
51476
51477 INIT_LIST_HEAD(&cmd->eh_entry);
51478
51479- atomic_inc(&cmd->device->iodone_cnt);
51480+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51481 if (cmd->result)
51482- atomic_inc(&cmd->device->ioerr_cnt);
51483+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51484
51485 disposition = scsi_decide_disposition(cmd);
51486 if (disposition != SUCCESS &&
51487@@ -1661,7 +1661,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51488 struct Scsi_Host *host = cmd->device->host;
51489 int rtn = 0;
51490
51491- atomic_inc(&cmd->device->iorequest_cnt);
51492+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51493
51494 /* check if the device is still usable */
51495 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51496diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51497index 1ac38e7..6acc656 100644
51498--- a/drivers/scsi/scsi_sysfs.c
51499+++ b/drivers/scsi/scsi_sysfs.c
51500@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51501 char *buf) \
51502 { \
51503 struct scsi_device *sdev = to_scsi_device(dev); \
51504- unsigned long long count = atomic_read(&sdev->field); \
51505+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51506 return snprintf(buf, 20, "0x%llx\n", count); \
51507 } \
51508 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51509diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51510index 5d6f348..18778a6b 100644
51511--- a/drivers/scsi/scsi_transport_fc.c
51512+++ b/drivers/scsi/scsi_transport_fc.c
51513@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51514 * Netlink Infrastructure
51515 */
51516
51517-static atomic_t fc_event_seq;
51518+static atomic_unchecked_t fc_event_seq;
51519
51520 /**
51521 * fc_get_event_number - Obtain the next sequential FC event number
51522@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51523 u32
51524 fc_get_event_number(void)
51525 {
51526- return atomic_add_return(1, &fc_event_seq);
51527+ return atomic_add_return_unchecked(1, &fc_event_seq);
51528 }
51529 EXPORT_SYMBOL(fc_get_event_number);
51530
51531@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51532 {
51533 int error;
51534
51535- atomic_set(&fc_event_seq, 0);
51536+ atomic_set_unchecked(&fc_event_seq, 0);
51537
51538 error = transport_class_register(&fc_host_class);
51539 if (error)
51540@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51541 char *cp;
51542
51543 *val = simple_strtoul(buf, &cp, 0);
51544- if ((*cp && (*cp != '\n')) || (*val < 0))
51545+ if (*cp && (*cp != '\n'))
51546 return -EINVAL;
51547 /*
51548 * Check for overflow; dev_loss_tmo is u32
51549diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51550index 67d43e3..8cee73c 100644
51551--- a/drivers/scsi/scsi_transport_iscsi.c
51552+++ b/drivers/scsi/scsi_transport_iscsi.c
51553@@ -79,7 +79,7 @@ struct iscsi_internal {
51554 struct transport_container session_cont;
51555 };
51556
51557-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51558+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51559 static struct workqueue_struct *iscsi_eh_timer_workq;
51560
51561 static DEFINE_IDA(iscsi_sess_ida);
51562@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51563 int err;
51564
51565 ihost = shost->shost_data;
51566- session->sid = atomic_add_return(1, &iscsi_session_nr);
51567+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51568
51569 if (target_id == ISCSI_MAX_TARGET) {
51570 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51571@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51572 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51573 ISCSI_TRANSPORT_VERSION);
51574
51575- atomic_set(&iscsi_session_nr, 0);
51576+ atomic_set_unchecked(&iscsi_session_nr, 0);
51577
51578 err = class_register(&iscsi_transport_class);
51579 if (err)
51580diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51581index ae45bd9..c32a586 100644
51582--- a/drivers/scsi/scsi_transport_srp.c
51583+++ b/drivers/scsi/scsi_transport_srp.c
51584@@ -35,7 +35,7 @@
51585 #include "scsi_priv.h"
51586
51587 struct srp_host_attrs {
51588- atomic_t next_port_id;
51589+ atomic_unchecked_t next_port_id;
51590 };
51591 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51592
51593@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51594 struct Scsi_Host *shost = dev_to_shost(dev);
51595 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51596
51597- atomic_set(&srp_host->next_port_id, 0);
51598+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51599 return 0;
51600 }
51601
51602@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51603 rport_fast_io_fail_timedout);
51604 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51605
51606- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51607+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51608 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51609
51610 transport_setup_device(&rport->dev);
51611diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51612index 05ea0d4..5af8049 100644
51613--- a/drivers/scsi/sd.c
51614+++ b/drivers/scsi/sd.c
51615@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51616 sdkp->disk = gd;
51617 sdkp->index = index;
51618 atomic_set(&sdkp->openers, 0);
51619- atomic_set(&sdkp->device->ioerr_cnt, 0);
51620+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
51621
51622 if (!sdp->request_queue->rq_timeout) {
51623 if (sdp->type != TYPE_MOD)
51624diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
51625index 763bffe..e0eacf4 100644
51626--- a/drivers/scsi/sg.c
51627+++ b/drivers/scsi/sg.c
51628@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
51629 sdp->disk->disk_name,
51630 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
51631 NULL,
51632- (char *)arg);
51633+ (char __user *)arg);
51634 case BLKTRACESTART:
51635 return blk_trace_startstop(sdp->device->request_queue, 1);
51636 case BLKTRACESTOP:
51637diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
51638index 011a336..fb2b7a0 100644
51639--- a/drivers/soc/tegra/fuse/fuse-tegra.c
51640+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
51641@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
51642 return i;
51643 }
51644
51645-static struct bin_attribute fuse_bin_attr = {
51646+static bin_attribute_no_const fuse_bin_attr = {
51647 .attr = { .name = "fuse", .mode = S_IRUGO, },
51648 .read = fuse_read,
51649 };
51650diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51651index 66a70e9..f82cea4 100644
51652--- a/drivers/spi/spi.c
51653+++ b/drivers/spi/spi.c
51654@@ -2238,7 +2238,7 @@ int spi_bus_unlock(struct spi_master *master)
51655 EXPORT_SYMBOL_GPL(spi_bus_unlock);
51656
51657 /* portable code must never pass more than 32 bytes */
51658-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
51659+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
51660
51661 static u8 *buf;
51662
51663diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
51664index b41429f..2de5373 100644
51665--- a/drivers/staging/android/timed_output.c
51666+++ b/drivers/staging/android/timed_output.c
51667@@ -25,7 +25,7 @@
51668 #include "timed_output.h"
51669
51670 static struct class *timed_output_class;
51671-static atomic_t device_count;
51672+static atomic_unchecked_t device_count;
51673
51674 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
51675 char *buf)
51676@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
51677 timed_output_class = class_create(THIS_MODULE, "timed_output");
51678 if (IS_ERR(timed_output_class))
51679 return PTR_ERR(timed_output_class);
51680- atomic_set(&device_count, 0);
51681+ atomic_set_unchecked(&device_count, 0);
51682 timed_output_class->dev_groups = timed_output_groups;
51683 }
51684
51685@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
51686 if (ret < 0)
51687 return ret;
51688
51689- tdev->index = atomic_inc_return(&device_count);
51690+ tdev->index = atomic_inc_return_unchecked(&device_count);
51691 tdev->dev = device_create(timed_output_class, NULL,
51692 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
51693 if (IS_ERR(tdev->dev))
51694diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
51695index f143cb6..6fb8255 100644
51696--- a/drivers/staging/comedi/comedi_fops.c
51697+++ b/drivers/staging/comedi/comedi_fops.c
51698@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
51699 }
51700 cfp->last_attached = dev->attached;
51701 cfp->last_detach_count = dev->detach_count;
51702- ACCESS_ONCE(cfp->read_subdev) = read_s;
51703- ACCESS_ONCE(cfp->write_subdev) = write_s;
51704+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
51705+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
51706 }
51707
51708 static void comedi_file_check(struct file *file)
51709@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51710 !(s_old->async->cmd.flags & CMDF_WRITE))
51711 return -EBUSY;
51712
51713- ACCESS_ONCE(cfp->read_subdev) = s_new;
51714+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
51715 return 0;
51716 }
51717
51718@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51719 (s_old->async->cmd.flags & CMDF_WRITE))
51720 return -EBUSY;
51721
51722- ACCESS_ONCE(cfp->write_subdev) = s_new;
51723+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
51724 return 0;
51725 }
51726
51727diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
51728index 001348c..cfaac8a 100644
51729--- a/drivers/staging/gdm724x/gdm_tty.c
51730+++ b/drivers/staging/gdm724x/gdm_tty.c
51731@@ -44,7 +44,7 @@
51732 #define gdm_tty_send_control(n, r, v, d, l) (\
51733 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
51734
51735-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
51736+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
51737
51738 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
51739 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
51740diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
51741index 503b2d7..c904931 100644
51742--- a/drivers/staging/line6/driver.c
51743+++ b/drivers/staging/line6/driver.c
51744@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51745 {
51746 struct usb_device *usbdev = line6->usbdev;
51747 int ret;
51748- unsigned char len;
51749+ unsigned char *plen;
51750
51751 /* query the serial number: */
51752 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51753@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51754 return ret;
51755 }
51756
51757+ plen = kmalloc(1, GFP_KERNEL);
51758+ if (plen == NULL)
51759+ return -ENOMEM;
51760+
51761 /* Wait for data length. We'll get 0xff until length arrives. */
51762 do {
51763 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51764 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51765 USB_DIR_IN,
51766- 0x0012, 0x0000, &len, 1,
51767+ 0x0012, 0x0000, plen, 1,
51768 LINE6_TIMEOUT * HZ);
51769 if (ret < 0) {
51770 dev_err(line6->ifcdev,
51771 "receive length failed (error %d)\n", ret);
51772+ kfree(plen);
51773 return ret;
51774 }
51775- } while (len == 0xff);
51776+ } while (*plen == 0xff);
51777
51778- if (len != datalen) {
51779+ if (*plen != datalen) {
51780 /* should be equal or something went wrong */
51781 dev_err(line6->ifcdev,
51782 "length mismatch (expected %d, got %d)\n",
51783- (int)datalen, (int)len);
51784+ (int)datalen, (int)*plen);
51785+ kfree(plen);
51786 return -EINVAL;
51787 }
51788+ kfree(plen);
51789
51790 /* receive the result: */
51791 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51792@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51793 {
51794 struct usb_device *usbdev = line6->usbdev;
51795 int ret;
51796- unsigned char status;
51797+ unsigned char *status;
51798
51799 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51800 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
51801@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51802 return ret;
51803 }
51804
51805+ status = kmalloc(1, GFP_KERNEL);
51806+ if (status == NULL)
51807+ return -ENOMEM;
51808+
51809 do {
51810 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
51811 0x67,
51812 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51813 USB_DIR_IN,
51814 0x0012, 0x0000,
51815- &status, 1, LINE6_TIMEOUT * HZ);
51816+ status, 1, LINE6_TIMEOUT * HZ);
51817
51818 if (ret < 0) {
51819 dev_err(line6->ifcdev,
51820 "receiving status failed (error %d)\n", ret);
51821+ kfree(status);
51822 return ret;
51823 }
51824- } while (status == 0xff);
51825+ } while (*status == 0xff);
51826
51827- if (status != 0) {
51828+ if (*status != 0) {
51829 dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
51830+ kfree(status);
51831 return -EINVAL;
51832 }
51833
51834+ kfree(status);
51835+
51836 return 0;
51837 }
51838
51839diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
51840index 6943715..0a93632 100644
51841--- a/drivers/staging/line6/toneport.c
51842+++ b/drivers/staging/line6/toneport.c
51843@@ -11,6 +11,7 @@
51844 */
51845
51846 #include <linux/wait.h>
51847+#include <linux/slab.h>
51848 #include <sound/control.h>
51849
51850 #include "audio.h"
51851@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
51852 */
51853 static void toneport_setup(struct usb_line6_toneport *toneport)
51854 {
51855- int ticks;
51856+ int *ticks;
51857 struct usb_line6 *line6 = &toneport->line6;
51858 struct usb_device *usbdev = line6->usbdev;
51859 u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
51860
51861+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
51862+ if (ticks == NULL)
51863+ return;
51864+
51865 /* sync time on device with host: */
51866- ticks = (int)get_seconds();
51867- line6_write_data(line6, 0x80c6, &ticks, 4);
51868+ *ticks = (int)get_seconds();
51869+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
51870+
51871+ kfree(ticks);
51872
51873 /* enable device: */
51874 toneport_send_cmd(usbdev, 0x0301, 0x0000);
51875diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
51876index 463da07..e791ce9 100644
51877--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
51878+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
51879@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
51880 return 0;
51881 }
51882
51883-sfw_test_client_ops_t brw_test_client;
51884-void brw_init_test_client(void)
51885-{
51886- brw_test_client.tso_init = brw_client_init;
51887- brw_test_client.tso_fini = brw_client_fini;
51888- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
51889- brw_test_client.tso_done_rpc = brw_client_done_rpc;
51890+sfw_test_client_ops_t brw_test_client = {
51891+ .tso_init = brw_client_init,
51892+ .tso_fini = brw_client_fini,
51893+ .tso_prep_rpc = brw_client_prep_rpc,
51894+ .tso_done_rpc = brw_client_done_rpc,
51895 };
51896
51897 srpc_service_t brw_test_service;
51898diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
51899index cc9d182..8fabce3 100644
51900--- a/drivers/staging/lustre/lnet/selftest/framework.c
51901+++ b/drivers/staging/lustre/lnet/selftest/framework.c
51902@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
51903
51904 extern sfw_test_client_ops_t ping_test_client;
51905 extern srpc_service_t ping_test_service;
51906-extern void ping_init_test_client(void);
51907 extern void ping_init_test_service(void);
51908
51909 extern sfw_test_client_ops_t brw_test_client;
51910 extern srpc_service_t brw_test_service;
51911-extern void brw_init_test_client(void);
51912 extern void brw_init_test_service(void);
51913
51914
51915@@ -1675,12 +1673,10 @@ sfw_startup (void)
51916 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
51917 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
51918
51919- brw_init_test_client();
51920 brw_init_test_service();
51921 rc = sfw_register_test(&brw_test_service, &brw_test_client);
51922 LASSERT (rc == 0);
51923
51924- ping_init_test_client();
51925 ping_init_test_service();
51926 rc = sfw_register_test(&ping_test_service, &ping_test_client);
51927 LASSERT (rc == 0);
51928diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
51929index d8c0df6..5041cbb 100644
51930--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
51931+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
51932@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
51933 return 0;
51934 }
51935
51936-sfw_test_client_ops_t ping_test_client;
51937-void ping_init_test_client(void)
51938-{
51939- ping_test_client.tso_init = ping_client_init;
51940- ping_test_client.tso_fini = ping_client_fini;
51941- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
51942- ping_test_client.tso_done_rpc = ping_client_done_rpc;
51943-}
51944+sfw_test_client_ops_t ping_test_client = {
51945+ .tso_init = ping_client_init,
51946+ .tso_fini = ping_client_fini,
51947+ .tso_prep_rpc = ping_client_prep_rpc,
51948+ .tso_done_rpc = ping_client_done_rpc,
51949+};
51950
51951 srpc_service_t ping_test_service;
51952 void ping_init_test_service(void)
51953diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
51954index 83bc0a9..12ba00a 100644
51955--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
51956+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
51957@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
51958 ldlm_completion_callback lcs_completion;
51959 ldlm_blocking_callback lcs_blocking;
51960 ldlm_glimpse_callback lcs_glimpse;
51961-};
51962+} __no_const;
51963
51964 /* ldlm_lockd.c */
51965 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
51966diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
51967index 2a88b80..62e7e5f 100644
51968--- a/drivers/staging/lustre/lustre/include/obd.h
51969+++ b/drivers/staging/lustre/lustre/include/obd.h
51970@@ -1362,7 +1362,7 @@ struct md_ops {
51971 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
51972 * wrapper function in include/linux/obd_class.h.
51973 */
51974-};
51975+} __no_const;
51976
51977 struct lsm_operations {
51978 void (*lsm_free)(struct lov_stripe_md *);
51979diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
51980index a4c252f..b21acac 100644
51981--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
51982+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
51983@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
51984 int added = (mode == LCK_NL);
51985 int overlaps = 0;
51986 int splitted = 0;
51987- const struct ldlm_callback_suite null_cbs = { NULL };
51988+ const struct ldlm_callback_suite null_cbs = { };
51989
51990 CDEBUG(D_DLMTRACE,
51991 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
51992diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
51993index 83d3f08..b03adad 100644
51994--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
51995+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
51996@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
51997 void __user *buffer, size_t *lenp, loff_t *ppos)
51998 {
51999 int rc, max_delay_cs;
52000- struct ctl_table dummy = *table;
52001+ ctl_table_no_const dummy = *table;
52002 long d;
52003
52004 dummy.data = &max_delay_cs;
52005@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52006 void __user *buffer, size_t *lenp, loff_t *ppos)
52007 {
52008 int rc, min_delay_cs;
52009- struct ctl_table dummy = *table;
52010+ ctl_table_no_const dummy = *table;
52011 long d;
52012
52013 dummy.data = &min_delay_cs;
52014@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52015 void __user *buffer, size_t *lenp, loff_t *ppos)
52016 {
52017 int rc, backoff;
52018- struct ctl_table dummy = *table;
52019+ ctl_table_no_const dummy = *table;
52020
52021 dummy.data = &backoff;
52022 dummy.proc_handler = &proc_dointvec;
52023diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52024index 2c4fc74..b04ca79 100644
52025--- a/drivers/staging/lustre/lustre/libcfs/module.c
52026+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52027@@ -315,11 +315,11 @@ out:
52028
52029
52030 struct cfs_psdev_ops libcfs_psdev_ops = {
52031- libcfs_psdev_open,
52032- libcfs_psdev_release,
52033- NULL,
52034- NULL,
52035- libcfs_ioctl
52036+ .p_open = libcfs_psdev_open,
52037+ .p_close = libcfs_psdev_release,
52038+ .p_read = NULL,
52039+ .p_write = NULL,
52040+ .p_ioctl = libcfs_ioctl
52041 };
52042
52043 extern int insert_proc(void);
52044diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52045index fcbe836..8a7ada4 100644
52046--- a/drivers/staging/octeon/ethernet-rx.c
52047+++ b/drivers/staging/octeon/ethernet-rx.c
52048@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52049 /* Increment RX stats for virtual ports */
52050 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52051 #ifdef CONFIG_64BIT
52052- atomic64_add(1,
52053+ atomic64_add_unchecked(1,
52054 (atomic64_t *)&priv->stats.rx_packets);
52055- atomic64_add(skb->len,
52056+ atomic64_add_unchecked(skb->len,
52057 (atomic64_t *)&priv->stats.rx_bytes);
52058 #else
52059- atomic_add(1,
52060+ atomic_add_unchecked(1,
52061 (atomic_t *)&priv->stats.rx_packets);
52062- atomic_add(skb->len,
52063+ atomic_add_unchecked(skb->len,
52064 (atomic_t *)&priv->stats.rx_bytes);
52065 #endif
52066 }
52067@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52068 dev->name);
52069 */
52070 #ifdef CONFIG_64BIT
52071- atomic64_add(1,
52072+ atomic64_add_unchecked(1,
52073 (atomic64_t *)&priv->stats.rx_dropped);
52074 #else
52075- atomic_add(1,
52076+ atomic_add_unchecked(1,
52077 (atomic_t *)&priv->stats.rx_dropped);
52078 #endif
52079 dev_kfree_skb_irq(skb);
52080diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52081index ee32149..052d1836 100644
52082--- a/drivers/staging/octeon/ethernet.c
52083+++ b/drivers/staging/octeon/ethernet.c
52084@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52085 * since the RX tasklet also increments it.
52086 */
52087 #ifdef CONFIG_64BIT
52088- atomic64_add(rx_status.dropped_packets,
52089- (atomic64_t *)&priv->stats.rx_dropped);
52090+ atomic64_add_unchecked(rx_status.dropped_packets,
52091+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52092 #else
52093- atomic_add(rx_status.dropped_packets,
52094- (atomic_t *)&priv->stats.rx_dropped);
52095+ atomic_add_unchecked(rx_status.dropped_packets,
52096+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52097 #endif
52098 }
52099
52100diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52101index 3b476d8..f522d68 100644
52102--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52103+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52104@@ -225,7 +225,7 @@ struct hal_ops {
52105
52106 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52107 void (*hal_reset_security_engine)(struct adapter *adapter);
52108-};
52109+} __no_const;
52110
52111 enum rt_eeprom_type {
52112 EEPROM_93C46,
52113diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52114index 070cc03..6806e37 100644
52115--- a/drivers/staging/rtl8712/rtl871x_io.h
52116+++ b/drivers/staging/rtl8712/rtl871x_io.h
52117@@ -108,7 +108,7 @@ struct _io_ops {
52118 u8 *pmem);
52119 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52120 u8 *pmem);
52121-};
52122+} __no_const;
52123
52124 struct io_req {
52125 struct list_head list;
52126diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52127index 46dad63..fe4acdc 100644
52128--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52129+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52130@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
52131 void (*device_resume)(ulong bus_no, ulong dev_no);
52132 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52133 ulong *max_size);
52134-};
52135+} __no_const;
52136
52137 /* These functions live inside visorchipset, and will be called to indicate
52138 * responses to specific events (by code outside of visorchipset).
52139@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
52140 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52141 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52142 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52143-};
52144+} __no_const;
52145
52146 /** Register functions (in the bus driver) to get called by visorchipset
52147 * whenever a bus or device appears for which this service partition is
52148diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52149index 9512af6..045bf5a 100644
52150--- a/drivers/target/sbp/sbp_target.c
52151+++ b/drivers/target/sbp/sbp_target.c
52152@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52153
52154 #define SESSION_MAINTENANCE_INTERVAL HZ
52155
52156-static atomic_t login_id = ATOMIC_INIT(0);
52157+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52158
52159 static void session_maintenance_work(struct work_struct *);
52160 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52161@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52162 login->lun = se_lun;
52163 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52164 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52165- login->login_id = atomic_inc_return(&login_id);
52166+ login->login_id = atomic_inc_return_unchecked(&login_id);
52167
52168 login->tgt_agt = sbp_target_agent_register(login);
52169 if (IS_ERR(login->tgt_agt)) {
52170diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52171index 58f49ff..2669604 100644
52172--- a/drivers/target/target_core_device.c
52173+++ b/drivers/target/target_core_device.c
52174@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52175 spin_lock_init(&dev->se_tmr_lock);
52176 spin_lock_init(&dev->qf_cmd_lock);
52177 sema_init(&dev->caw_sem, 1);
52178- atomic_set(&dev->dev_ordered_id, 0);
52179+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52180 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52181 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52182 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52183diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52184index 0adc0f6..7757bfe 100644
52185--- a/drivers/target/target_core_transport.c
52186+++ b/drivers/target/target_core_transport.c
52187@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52188 * Used to determine when ORDERED commands should go from
52189 * Dormant to Active status.
52190 */
52191- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52192+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52193 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52194 cmd->se_ordered_id, cmd->sam_task_attr,
52195 dev->transport->name);
52196diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52197index 65a98a9..d93d3a8 100644
52198--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52199+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52200@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52201 platform_set_drvdata(pdev, priv);
52202
52203 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52204- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52205- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52206+ pax_open_kernel();
52207+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52208+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52209+ pax_close_kernel();
52210 }
52211 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52212 priv, &int3400_thermal_ops,
52213diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52214index d717f3d..cae1cc3e 100644
52215--- a/drivers/thermal/of-thermal.c
52216+++ b/drivers/thermal/of-thermal.c
52217@@ -31,6 +31,7 @@
52218 #include <linux/export.h>
52219 #include <linux/string.h>
52220 #include <linux/thermal.h>
52221+#include <linux/mm.h>
52222
52223 #include "thermal_core.h"
52224
52225@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52226 tz->ops = ops;
52227 tz->sensor_data = data;
52228
52229- tzd->ops->get_temp = of_thermal_get_temp;
52230- tzd->ops->get_trend = of_thermal_get_trend;
52231- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52232+ pax_open_kernel();
52233+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52234+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52235+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52236+ pax_close_kernel();
52237 mutex_unlock(&tzd->lock);
52238
52239 return tzd;
52240@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52241 return;
52242
52243 mutex_lock(&tzd->lock);
52244- tzd->ops->get_temp = NULL;
52245- tzd->ops->get_trend = NULL;
52246- tzd->ops->set_emul_temp = NULL;
52247+ pax_open_kernel();
52248+ *(void **)&tzd->ops->get_temp = NULL;
52249+ *(void **)&tzd->ops->get_trend = NULL;
52250+ *(void **)&tzd->ops->set_emul_temp = NULL;
52251+ pax_close_kernel();
52252
52253 tz->ops = NULL;
52254 tz->sensor_data = NULL;
52255diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52256index fd66f57..48e6376 100644
52257--- a/drivers/tty/cyclades.c
52258+++ b/drivers/tty/cyclades.c
52259@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52260 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52261 info->port.count);
52262 #endif
52263- info->port.count++;
52264+ atomic_inc(&info->port.count);
52265 #ifdef CY_DEBUG_COUNT
52266 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52267- current->pid, info->port.count);
52268+ current->pid, atomic_read(&info->port.count));
52269 #endif
52270
52271 /*
52272@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52273 for (j = 0; j < cy_card[i].nports; j++) {
52274 info = &cy_card[i].ports[j];
52275
52276- if (info->port.count) {
52277+ if (atomic_read(&info->port.count)) {
52278 /* XXX is the ldisc num worth this? */
52279 struct tty_struct *tty;
52280 struct tty_ldisc *ld;
52281diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52282index 4fcec1d..5a036f7 100644
52283--- a/drivers/tty/hvc/hvc_console.c
52284+++ b/drivers/tty/hvc/hvc_console.c
52285@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52286
52287 spin_lock_irqsave(&hp->port.lock, flags);
52288 /* Check and then increment for fast path open. */
52289- if (hp->port.count++ > 0) {
52290+ if (atomic_inc_return(&hp->port.count) > 1) {
52291 spin_unlock_irqrestore(&hp->port.lock, flags);
52292 hvc_kick();
52293 return 0;
52294@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52295
52296 spin_lock_irqsave(&hp->port.lock, flags);
52297
52298- if (--hp->port.count == 0) {
52299+ if (atomic_dec_return(&hp->port.count) == 0) {
52300 spin_unlock_irqrestore(&hp->port.lock, flags);
52301 /* We are done with the tty pointer now. */
52302 tty_port_tty_set(&hp->port, NULL);
52303@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52304 */
52305 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52306 } else {
52307- if (hp->port.count < 0)
52308+ if (atomic_read(&hp->port.count) < 0)
52309 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52310- hp->vtermno, hp->port.count);
52311+ hp->vtermno, atomic_read(&hp->port.count));
52312 spin_unlock_irqrestore(&hp->port.lock, flags);
52313 }
52314 }
52315@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52316 * open->hangup case this can be called after the final close so prevent
52317 * that from happening for now.
52318 */
52319- if (hp->port.count <= 0) {
52320+ if (atomic_read(&hp->port.count) <= 0) {
52321 spin_unlock_irqrestore(&hp->port.lock, flags);
52322 return;
52323 }
52324
52325- hp->port.count = 0;
52326+ atomic_set(&hp->port.count, 0);
52327 spin_unlock_irqrestore(&hp->port.lock, flags);
52328 tty_port_tty_set(&hp->port, NULL);
52329
52330@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52331 return -EPIPE;
52332
52333 /* FIXME what's this (unprotected) check for? */
52334- if (hp->port.count <= 0)
52335+ if (atomic_read(&hp->port.count) <= 0)
52336 return -EIO;
52337
52338 spin_lock_irqsave(&hp->lock, flags);
52339diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52340index 81ff7e1..dfb7b71 100644
52341--- a/drivers/tty/hvc/hvcs.c
52342+++ b/drivers/tty/hvc/hvcs.c
52343@@ -83,6 +83,7 @@
52344 #include <asm/hvcserver.h>
52345 #include <asm/uaccess.h>
52346 #include <asm/vio.h>
52347+#include <asm/local.h>
52348
52349 /*
52350 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52351@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52352
52353 spin_lock_irqsave(&hvcsd->lock, flags);
52354
52355- if (hvcsd->port.count > 0) {
52356+ if (atomic_read(&hvcsd->port.count) > 0) {
52357 spin_unlock_irqrestore(&hvcsd->lock, flags);
52358 printk(KERN_INFO "HVCS: vterm state unchanged. "
52359 "The hvcs device node is still in use.\n");
52360@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52361 }
52362 }
52363
52364- hvcsd->port.count = 0;
52365+ atomic_set(&hvcsd->port.count, 0);
52366 hvcsd->port.tty = tty;
52367 tty->driver_data = hvcsd;
52368
52369@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52370 unsigned long flags;
52371
52372 spin_lock_irqsave(&hvcsd->lock, flags);
52373- hvcsd->port.count++;
52374+ atomic_inc(&hvcsd->port.count);
52375 hvcsd->todo_mask |= HVCS_SCHED_READ;
52376 spin_unlock_irqrestore(&hvcsd->lock, flags);
52377
52378@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52379 hvcsd = tty->driver_data;
52380
52381 spin_lock_irqsave(&hvcsd->lock, flags);
52382- if (--hvcsd->port.count == 0) {
52383+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52384
52385 vio_disable_interrupts(hvcsd->vdev);
52386
52387@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52388
52389 free_irq(irq, hvcsd);
52390 return;
52391- } else if (hvcsd->port.count < 0) {
52392+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52393 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52394 " is missmanaged.\n",
52395- hvcsd->vdev->unit_address, hvcsd->port.count);
52396+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52397 }
52398
52399 spin_unlock_irqrestore(&hvcsd->lock, flags);
52400@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52401
52402 spin_lock_irqsave(&hvcsd->lock, flags);
52403 /* Preserve this so that we know how many kref refs to put */
52404- temp_open_count = hvcsd->port.count;
52405+ temp_open_count = atomic_read(&hvcsd->port.count);
52406
52407 /*
52408 * Don't kref put inside the spinlock because the destruction
52409@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52410 tty->driver_data = NULL;
52411 hvcsd->port.tty = NULL;
52412
52413- hvcsd->port.count = 0;
52414+ atomic_set(&hvcsd->port.count, 0);
52415
52416 /* This will drop any buffered data on the floor which is OK in a hangup
52417 * scenario. */
52418@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52419 * the middle of a write operation? This is a crummy place to do this
52420 * but we want to keep it all in the spinlock.
52421 */
52422- if (hvcsd->port.count <= 0) {
52423+ if (atomic_read(&hvcsd->port.count) <= 0) {
52424 spin_unlock_irqrestore(&hvcsd->lock, flags);
52425 return -ENODEV;
52426 }
52427@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52428 {
52429 struct hvcs_struct *hvcsd = tty->driver_data;
52430
52431- if (!hvcsd || hvcsd->port.count <= 0)
52432+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52433 return 0;
52434
52435 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52436diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52437index 4190199..06d5bfa 100644
52438--- a/drivers/tty/hvc/hvsi.c
52439+++ b/drivers/tty/hvc/hvsi.c
52440@@ -85,7 +85,7 @@ struct hvsi_struct {
52441 int n_outbuf;
52442 uint32_t vtermno;
52443 uint32_t virq;
52444- atomic_t seqno; /* HVSI packet sequence number */
52445+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52446 uint16_t mctrl;
52447 uint8_t state; /* HVSI protocol state */
52448 uint8_t flags;
52449@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52450
52451 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52452 packet.hdr.len = sizeof(struct hvsi_query_response);
52453- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52454+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52455 packet.verb = VSV_SEND_VERSION_NUMBER;
52456 packet.u.version = HVSI_VERSION;
52457 packet.query_seqno = query_seqno+1;
52458@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52459
52460 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52461 packet.hdr.len = sizeof(struct hvsi_query);
52462- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52463+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52464 packet.verb = verb;
52465
52466 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52467@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52468 int wrote;
52469
52470 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52471- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52472+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52473 packet.hdr.len = sizeof(struct hvsi_control);
52474 packet.verb = VSV_SET_MODEM_CTL;
52475 packet.mask = HVSI_TSDTR;
52476@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52477 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52478
52479 packet.hdr.type = VS_DATA_PACKET_HEADER;
52480- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52481+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52482 packet.hdr.len = count + sizeof(struct hvsi_header);
52483 memcpy(&packet.data, buf, count);
52484
52485@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52486 struct hvsi_control packet __ALIGNED__;
52487
52488 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52489- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52490+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52491 packet.hdr.len = 6;
52492 packet.verb = VSV_CLOSE_PROTOCOL;
52493
52494@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52495
52496 tty_port_tty_set(&hp->port, tty);
52497 spin_lock_irqsave(&hp->lock, flags);
52498- hp->port.count++;
52499+ atomic_inc(&hp->port.count);
52500 atomic_set(&hp->seqno, 0);
52501 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52502 spin_unlock_irqrestore(&hp->lock, flags);
52503@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52504
52505 spin_lock_irqsave(&hp->lock, flags);
52506
52507- if (--hp->port.count == 0) {
52508+ if (atomic_dec_return(&hp->port.count) == 0) {
52509 tty_port_tty_set(&hp->port, NULL);
52510 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
52511
52512@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52513
52514 spin_lock_irqsave(&hp->lock, flags);
52515 }
52516- } else if (hp->port.count < 0)
52517+ } else if (atomic_read(&hp->port.count) < 0)
52518 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
52519- hp - hvsi_ports, hp->port.count);
52520+ hp - hvsi_ports, atomic_read(&hp->port.count));
52521
52522 spin_unlock_irqrestore(&hp->lock, flags);
52523 }
52524@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
52525 tty_port_tty_set(&hp->port, NULL);
52526
52527 spin_lock_irqsave(&hp->lock, flags);
52528- hp->port.count = 0;
52529+ atomic_set(&hp->port.count, 0);
52530 hp->n_outbuf = 0;
52531 spin_unlock_irqrestore(&hp->lock, flags);
52532 }
52533diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
52534index a270f04..7c77b5d 100644
52535--- a/drivers/tty/hvc/hvsi_lib.c
52536+++ b/drivers/tty/hvc/hvsi_lib.c
52537@@ -8,7 +8,7 @@
52538
52539 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
52540 {
52541- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
52542+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
52543
52544 /* Assumes that always succeeds, works in practice */
52545 return pv->put_chars(pv->termno, (char *)packet, packet->len);
52546@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
52547
52548 /* Reset state */
52549 pv->established = 0;
52550- atomic_set(&pv->seqno, 0);
52551+ atomic_set_unchecked(&pv->seqno, 0);
52552
52553 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
52554
52555diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
52556index 345cebb..d5a1e9e 100644
52557--- a/drivers/tty/ipwireless/tty.c
52558+++ b/drivers/tty/ipwireless/tty.c
52559@@ -28,6 +28,7 @@
52560 #include <linux/tty_driver.h>
52561 #include <linux/tty_flip.h>
52562 #include <linux/uaccess.h>
52563+#include <asm/local.h>
52564
52565 #include "tty.h"
52566 #include "network.h"
52567@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52568 return -ENODEV;
52569
52570 mutex_lock(&tty->ipw_tty_mutex);
52571- if (tty->port.count == 0)
52572+ if (atomic_read(&tty->port.count) == 0)
52573 tty->tx_bytes_queued = 0;
52574
52575- tty->port.count++;
52576+ atomic_inc(&tty->port.count);
52577
52578 tty->port.tty = linux_tty;
52579 linux_tty->driver_data = tty;
52580@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52581
52582 static void do_ipw_close(struct ipw_tty *tty)
52583 {
52584- tty->port.count--;
52585-
52586- if (tty->port.count == 0) {
52587+ if (atomic_dec_return(&tty->port.count) == 0) {
52588 struct tty_struct *linux_tty = tty->port.tty;
52589
52590 if (linux_tty != NULL) {
52591@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
52592 return;
52593
52594 mutex_lock(&tty->ipw_tty_mutex);
52595- if (tty->port.count == 0) {
52596+ if (atomic_read(&tty->port.count) == 0) {
52597 mutex_unlock(&tty->ipw_tty_mutex);
52598 return;
52599 }
52600@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
52601
52602 mutex_lock(&tty->ipw_tty_mutex);
52603
52604- if (!tty->port.count) {
52605+ if (!atomic_read(&tty->port.count)) {
52606 mutex_unlock(&tty->ipw_tty_mutex);
52607 return;
52608 }
52609@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
52610 return -ENODEV;
52611
52612 mutex_lock(&tty->ipw_tty_mutex);
52613- if (!tty->port.count) {
52614+ if (!atomic_read(&tty->port.count)) {
52615 mutex_unlock(&tty->ipw_tty_mutex);
52616 return -EINVAL;
52617 }
52618@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
52619 if (!tty)
52620 return -ENODEV;
52621
52622- if (!tty->port.count)
52623+ if (!atomic_read(&tty->port.count))
52624 return -EINVAL;
52625
52626 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
52627@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
52628 if (!tty)
52629 return 0;
52630
52631- if (!tty->port.count)
52632+ if (!atomic_read(&tty->port.count))
52633 return 0;
52634
52635 return tty->tx_bytes_queued;
52636@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
52637 if (!tty)
52638 return -ENODEV;
52639
52640- if (!tty->port.count)
52641+ if (!atomic_read(&tty->port.count))
52642 return -EINVAL;
52643
52644 return get_control_lines(tty);
52645@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
52646 if (!tty)
52647 return -ENODEV;
52648
52649- if (!tty->port.count)
52650+ if (!atomic_read(&tty->port.count))
52651 return -EINVAL;
52652
52653 return set_control_lines(tty, set, clear);
52654@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
52655 if (!tty)
52656 return -ENODEV;
52657
52658- if (!tty->port.count)
52659+ if (!atomic_read(&tty->port.count))
52660 return -EINVAL;
52661
52662 /* FIXME: Exactly how is the tty object locked here .. */
52663@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
52664 * are gone */
52665 mutex_lock(&ttyj->ipw_tty_mutex);
52666 }
52667- while (ttyj->port.count)
52668+ while (atomic_read(&ttyj->port.count))
52669 do_ipw_close(ttyj);
52670 ipwireless_disassociate_network_ttys(network,
52671 ttyj->channel_idx);
52672diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52673index 14c54e0..1efd4f2 100644
52674--- a/drivers/tty/moxa.c
52675+++ b/drivers/tty/moxa.c
52676@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
52677 }
52678
52679 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
52680- ch->port.count++;
52681+ atomic_inc(&ch->port.count);
52682 tty->driver_data = ch;
52683 tty_port_tty_set(&ch->port, tty);
52684 mutex_lock(&ch->port.mutex);
52685diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52686index c434376..114ce13 100644
52687--- a/drivers/tty/n_gsm.c
52688+++ b/drivers/tty/n_gsm.c
52689@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
52690 spin_lock_init(&dlci->lock);
52691 mutex_init(&dlci->mutex);
52692 dlci->fifo = &dlci->_fifo;
52693- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
52694+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
52695 kfree(dlci);
52696 return NULL;
52697 }
52698@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
52699 struct gsm_dlci *dlci = tty->driver_data;
52700 struct tty_port *port = &dlci->port;
52701
52702- port->count++;
52703+ atomic_inc(&port->count);
52704 tty_port_tty_set(port, tty);
52705
52706 dlci->modem_rx = 0;
52707diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
52708index 4ddfa60..1b7e112 100644
52709--- a/drivers/tty/n_tty.c
52710+++ b/drivers/tty/n_tty.c
52711@@ -115,7 +115,7 @@ struct n_tty_data {
52712 int minimum_to_wake;
52713
52714 /* consumer-published */
52715- size_t read_tail;
52716+ size_t read_tail __intentional_overflow(-1);
52717 size_t line_start;
52718
52719 /* protected by output lock */
52720@@ -2503,6 +2503,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
52721 {
52722 *ops = tty_ldisc_N_TTY;
52723 ops->owner = NULL;
52724- ops->refcount = ops->flags = 0;
52725+ atomic_set(&ops->refcount, 0);
52726+ ops->flags = 0;
52727 }
52728 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
52729diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
52730index 6e1f150..c3ba598 100644
52731--- a/drivers/tty/pty.c
52732+++ b/drivers/tty/pty.c
52733@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
52734 panic("Couldn't register Unix98 pts driver");
52735
52736 /* Now create the /dev/ptmx special device */
52737+ pax_open_kernel();
52738 tty_default_fops(&ptmx_fops);
52739- ptmx_fops.open = ptmx_open;
52740+ *(void **)&ptmx_fops.open = ptmx_open;
52741+ pax_close_kernel();
52742
52743 cdev_init(&ptmx_cdev, &ptmx_fops);
52744 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
52745diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
52746index 383c4c7..d408e21 100644
52747--- a/drivers/tty/rocket.c
52748+++ b/drivers/tty/rocket.c
52749@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52750 tty->driver_data = info;
52751 tty_port_tty_set(port, tty);
52752
52753- if (port->count++ == 0) {
52754+ if (atomic_inc_return(&port->count) == 1) {
52755 atomic_inc(&rp_num_ports_open);
52756
52757 #ifdef ROCKET_DEBUG_OPEN
52758@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52759 #endif
52760 }
52761 #ifdef ROCKET_DEBUG_OPEN
52762- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
52763+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
52764 #endif
52765
52766 /*
52767@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
52768 spin_unlock_irqrestore(&info->port.lock, flags);
52769 return;
52770 }
52771- if (info->port.count)
52772+ if (atomic_read(&info->port.count))
52773 atomic_dec(&rp_num_ports_open);
52774 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
52775 spin_unlock_irqrestore(&info->port.lock, flags);
52776diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
52777index aa28209..e08fb85 100644
52778--- a/drivers/tty/serial/ioc4_serial.c
52779+++ b/drivers/tty/serial/ioc4_serial.c
52780@@ -437,7 +437,7 @@ struct ioc4_soft {
52781 } is_intr_info[MAX_IOC4_INTR_ENTS];
52782
52783 /* Number of entries active in the above array */
52784- atomic_t is_num_intrs;
52785+ atomic_unchecked_t is_num_intrs;
52786 } is_intr_type[IOC4_NUM_INTR_TYPES];
52787
52788 /* is_ir_lock must be held while
52789@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
52790 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
52791 || (type == IOC4_OTHER_INTR_TYPE)));
52792
52793- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
52794+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
52795 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
52796
52797 /* Save off the lower level interrupt handler */
52798@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
52799
52800 soft = arg;
52801 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
52802- num_intrs = (int)atomic_read(
52803+ num_intrs = (int)atomic_read_unchecked(
52804 &soft->is_intr_type[intr_type].is_num_intrs);
52805
52806 this_mir = this_ir = pending_intrs(soft, intr_type);
52807diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
52808index 129dc5b..1da5bb8 100644
52809--- a/drivers/tty/serial/kgdb_nmi.c
52810+++ b/drivers/tty/serial/kgdb_nmi.c
52811@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
52812 * I/O utilities that messages sent to the console will automatically
52813 * be displayed on the dbg_io.
52814 */
52815- dbg_io_ops->is_console = true;
52816+ pax_open_kernel();
52817+ *(int *)&dbg_io_ops->is_console = true;
52818+ pax_close_kernel();
52819
52820 return 0;
52821 }
52822diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
52823index a260cde..6b2b5ce 100644
52824--- a/drivers/tty/serial/kgdboc.c
52825+++ b/drivers/tty/serial/kgdboc.c
52826@@ -24,8 +24,9 @@
52827 #define MAX_CONFIG_LEN 40
52828
52829 static struct kgdb_io kgdboc_io_ops;
52830+static struct kgdb_io kgdboc_io_ops_console;
52831
52832-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
52833+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
52834 static int configured = -1;
52835
52836 static char config[MAX_CONFIG_LEN];
52837@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
52838 kgdboc_unregister_kbd();
52839 if (configured == 1)
52840 kgdb_unregister_io_module(&kgdboc_io_ops);
52841+ else if (configured == 2)
52842+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
52843 }
52844
52845 static int configure_kgdboc(void)
52846@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
52847 int err;
52848 char *cptr = config;
52849 struct console *cons;
52850+ int is_console = 0;
52851
52852 err = kgdboc_option_setup(config);
52853 if (err || !strlen(config) || isspace(config[0]))
52854 goto noconfig;
52855
52856 err = -ENODEV;
52857- kgdboc_io_ops.is_console = 0;
52858 kgdb_tty_driver = NULL;
52859
52860 kgdboc_use_kms = 0;
52861@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
52862 int idx;
52863 if (cons->device && cons->device(cons, &idx) == p &&
52864 idx == tty_line) {
52865- kgdboc_io_ops.is_console = 1;
52866+ is_console = 1;
52867 break;
52868 }
52869 cons = cons->next;
52870@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
52871 kgdb_tty_line = tty_line;
52872
52873 do_register:
52874- err = kgdb_register_io_module(&kgdboc_io_ops);
52875+ if (is_console) {
52876+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
52877+ configured = 2;
52878+ } else {
52879+ err = kgdb_register_io_module(&kgdboc_io_ops);
52880+ configured = 1;
52881+ }
52882 if (err)
52883 goto noconfig;
52884
52885@@ -205,8 +214,6 @@ do_register:
52886 if (err)
52887 goto nmi_con_failed;
52888
52889- configured = 1;
52890-
52891 return 0;
52892
52893 nmi_con_failed:
52894@@ -223,7 +230,7 @@ noconfig:
52895 static int __init init_kgdboc(void)
52896 {
52897 /* Already configured? */
52898- if (configured == 1)
52899+ if (configured >= 1)
52900 return 0;
52901
52902 return configure_kgdboc();
52903@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
52904 if (config[len - 1] == '\n')
52905 config[len - 1] = '\0';
52906
52907- if (configured == 1)
52908+ if (configured >= 1)
52909 cleanup_kgdboc();
52910
52911 /* Go and configure with the new params. */
52912@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
52913 .post_exception = kgdboc_post_exp_handler,
52914 };
52915
52916+static struct kgdb_io kgdboc_io_ops_console = {
52917+ .name = "kgdboc",
52918+ .read_char = kgdboc_get_char,
52919+ .write_char = kgdboc_put_char,
52920+ .pre_exception = kgdboc_pre_exp_handler,
52921+ .post_exception = kgdboc_post_exp_handler,
52922+ .is_console = 1
52923+};
52924+
52925 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
52926 /* This is only available if kgdboc is a built in for early debugging */
52927 static int __init kgdboc_early_init(char *opt)
52928diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
52929index c88b522..e763029 100644
52930--- a/drivers/tty/serial/msm_serial.c
52931+++ b/drivers/tty/serial/msm_serial.c
52932@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
52933 .cons = MSM_CONSOLE,
52934 };
52935
52936-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
52937+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
52938
52939 static const struct of_device_id msm_uartdm_table[] = {
52940 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
52941@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
52942 line = pdev->id;
52943
52944 if (line < 0)
52945- line = atomic_inc_return(&msm_uart_next_id) - 1;
52946+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
52947
52948 if (unlikely(line < 0 || line >= UART_NR))
52949 return -ENXIO;
52950diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
52951index 107e807..d4a02fa 100644
52952--- a/drivers/tty/serial/samsung.c
52953+++ b/drivers/tty/serial/samsung.c
52954@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
52955 }
52956 }
52957
52958+static int s3c64xx_serial_startup(struct uart_port *port);
52959 static int s3c24xx_serial_startup(struct uart_port *port)
52960 {
52961 struct s3c24xx_uart_port *ourport = to_ourport(port);
52962 int ret;
52963
52964+ /* Startup sequence is different for s3c64xx and higher SoC's */
52965+ if (s3c24xx_serial_has_interrupt_mask(port))
52966+ return s3c64xx_serial_startup(port);
52967+
52968 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
52969 port, (unsigned long long)port->mapbase, port->membase);
52970
52971@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
52972 /* setup info for port */
52973 port->dev = &platdev->dev;
52974
52975- /* Startup sequence is different for s3c64xx and higher SoC's */
52976- if (s3c24xx_serial_has_interrupt_mask(port))
52977- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
52978-
52979 port->uartclk = 1;
52980
52981 if (cfg->uart_flags & UPF_CONS_FLOW) {
52982diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
52983index 984605b..e538330 100644
52984--- a/drivers/tty/serial/serial_core.c
52985+++ b/drivers/tty/serial/serial_core.c
52986@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
52987 state = drv->state + tty->index;
52988 port = &state->port;
52989 spin_lock_irq(&port->lock);
52990- --port->count;
52991+ atomic_dec(&port->count);
52992 spin_unlock_irq(&port->lock);
52993 return;
52994 }
52995@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
52996
52997 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
52998
52999- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53000+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53001 return;
53002
53003 /*
53004@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
53005 uart_flush_buffer(tty);
53006 uart_shutdown(tty, state);
53007 spin_lock_irqsave(&port->lock, flags);
53008- port->count = 0;
53009+ atomic_set(&port->count, 0);
53010 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53011 spin_unlock_irqrestore(&port->lock, flags);
53012 tty_port_tty_set(port, NULL);
53013@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53014 pr_debug("uart_open(%d) called\n", line);
53015
53016 spin_lock_irq(&port->lock);
53017- ++port->count;
53018+ atomic_inc(&port->count);
53019 spin_unlock_irq(&port->lock);
53020
53021 /*
53022diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53023index b799170..87dafd5 100644
53024--- a/drivers/tty/synclink.c
53025+++ b/drivers/tty/synclink.c
53026@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53027
53028 if (debug_level >= DEBUG_LEVEL_INFO)
53029 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53030- __FILE__,__LINE__, info->device_name, info->port.count);
53031+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53032
53033 if (tty_port_close_start(&info->port, tty, filp) == 0)
53034 goto cleanup;
53035@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53036 cleanup:
53037 if (debug_level >= DEBUG_LEVEL_INFO)
53038 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53039- tty->driver->name, info->port.count);
53040+ tty->driver->name, atomic_read(&info->port.count));
53041
53042 } /* end of mgsl_close() */
53043
53044@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53045
53046 mgsl_flush_buffer(tty);
53047 shutdown(info);
53048-
53049- info->port.count = 0;
53050+
53051+ atomic_set(&info->port.count, 0);
53052 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53053 info->port.tty = NULL;
53054
53055@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53056
53057 if (debug_level >= DEBUG_LEVEL_INFO)
53058 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53059- __FILE__,__LINE__, tty->driver->name, port->count );
53060+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53061
53062 spin_lock_irqsave(&info->irq_spinlock, flags);
53063- port->count--;
53064+ atomic_dec(&port->count);
53065 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53066 port->blocked_open++;
53067
53068@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53069
53070 if (debug_level >= DEBUG_LEVEL_INFO)
53071 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53072- __FILE__,__LINE__, tty->driver->name, port->count );
53073+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53074
53075 tty_unlock(tty);
53076 schedule();
53077@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53078
53079 /* FIXME: Racy on hangup during close wait */
53080 if (!tty_hung_up_p(filp))
53081- port->count++;
53082+ atomic_inc(&port->count);
53083 port->blocked_open--;
53084
53085 if (debug_level >= DEBUG_LEVEL_INFO)
53086 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53087- __FILE__,__LINE__, tty->driver->name, port->count );
53088+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53089
53090 if (!retval)
53091 port->flags |= ASYNC_NORMAL_ACTIVE;
53092@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53093
53094 if (debug_level >= DEBUG_LEVEL_INFO)
53095 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53096- __FILE__,__LINE__,tty->driver->name, info->port.count);
53097+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53098
53099 /* If port is closing, signal caller to try again */
53100 if (info->port.flags & ASYNC_CLOSING){
53101@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53102 spin_unlock_irqrestore(&info->netlock, flags);
53103 goto cleanup;
53104 }
53105- info->port.count++;
53106+ atomic_inc(&info->port.count);
53107 spin_unlock_irqrestore(&info->netlock, flags);
53108
53109- if (info->port.count == 1) {
53110+ if (atomic_read(&info->port.count) == 1) {
53111 /* 1st open on this device, init hardware */
53112 retval = startup(info);
53113 if (retval < 0)
53114@@ -3442,8 +3442,8 @@ cleanup:
53115 if (retval) {
53116 if (tty->count == 1)
53117 info->port.tty = NULL; /* tty layer will release tty struct */
53118- if(info->port.count)
53119- info->port.count--;
53120+ if (atomic_read(&info->port.count))
53121+ atomic_dec(&info->port.count);
53122 }
53123
53124 return retval;
53125@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53126 unsigned short new_crctype;
53127
53128 /* return error if TTY interface open */
53129- if (info->port.count)
53130+ if (atomic_read(&info->port.count))
53131 return -EBUSY;
53132
53133 switch (encoding)
53134@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53135
53136 /* arbitrate between network and tty opens */
53137 spin_lock_irqsave(&info->netlock, flags);
53138- if (info->port.count != 0 || info->netcount != 0) {
53139+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53140 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53141 spin_unlock_irqrestore(&info->netlock, flags);
53142 return -EBUSY;
53143@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53144 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53145
53146 /* return error if TTY interface open */
53147- if (info->port.count)
53148+ if (atomic_read(&info->port.count))
53149 return -EBUSY;
53150
53151 if (cmd != SIOCWANDEV)
53152diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53153index 0e8c39b..e0cb171 100644
53154--- a/drivers/tty/synclink_gt.c
53155+++ b/drivers/tty/synclink_gt.c
53156@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53157 tty->driver_data = info;
53158 info->port.tty = tty;
53159
53160- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53161+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53162
53163 /* If port is closing, signal caller to try again */
53164 if (info->port.flags & ASYNC_CLOSING){
53165@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53166 mutex_unlock(&info->port.mutex);
53167 goto cleanup;
53168 }
53169- info->port.count++;
53170+ atomic_inc(&info->port.count);
53171 spin_unlock_irqrestore(&info->netlock, flags);
53172
53173- if (info->port.count == 1) {
53174+ if (atomic_read(&info->port.count) == 1) {
53175 /* 1st open on this device, init hardware */
53176 retval = startup(info);
53177 if (retval < 0) {
53178@@ -715,8 +715,8 @@ cleanup:
53179 if (retval) {
53180 if (tty->count == 1)
53181 info->port.tty = NULL; /* tty layer will release tty struct */
53182- if(info->port.count)
53183- info->port.count--;
53184+ if(atomic_read(&info->port.count))
53185+ atomic_dec(&info->port.count);
53186 }
53187
53188 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53189@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53190
53191 if (sanity_check(info, tty->name, "close"))
53192 return;
53193- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53194+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53195
53196 if (tty_port_close_start(&info->port, tty, filp) == 0)
53197 goto cleanup;
53198@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53199 tty_port_close_end(&info->port, tty);
53200 info->port.tty = NULL;
53201 cleanup:
53202- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53203+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53204 }
53205
53206 static void hangup(struct tty_struct *tty)
53207@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53208 shutdown(info);
53209
53210 spin_lock_irqsave(&info->port.lock, flags);
53211- info->port.count = 0;
53212+ atomic_set(&info->port.count, 0);
53213 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53214 info->port.tty = NULL;
53215 spin_unlock_irqrestore(&info->port.lock, flags);
53216@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53217 unsigned short new_crctype;
53218
53219 /* return error if TTY interface open */
53220- if (info->port.count)
53221+ if (atomic_read(&info->port.count))
53222 return -EBUSY;
53223
53224 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53225@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53226
53227 /* arbitrate between network and tty opens */
53228 spin_lock_irqsave(&info->netlock, flags);
53229- if (info->port.count != 0 || info->netcount != 0) {
53230+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53231 DBGINFO(("%s hdlc_open busy\n", dev->name));
53232 spin_unlock_irqrestore(&info->netlock, flags);
53233 return -EBUSY;
53234@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53235 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53236
53237 /* return error if TTY interface open */
53238- if (info->port.count)
53239+ if (atomic_read(&info->port.count))
53240 return -EBUSY;
53241
53242 if (cmd != SIOCWANDEV)
53243@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53244 if (port == NULL)
53245 continue;
53246 spin_lock(&port->lock);
53247- if ((port->port.count || port->netcount) &&
53248+ if ((atomic_read(&port->port.count) || port->netcount) &&
53249 port->pending_bh && !port->bh_running &&
53250 !port->bh_requested) {
53251 DBGISR(("%s bh queued\n", port->device_name));
53252@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53253 add_wait_queue(&port->open_wait, &wait);
53254
53255 spin_lock_irqsave(&info->lock, flags);
53256- port->count--;
53257+ atomic_dec(&port->count);
53258 spin_unlock_irqrestore(&info->lock, flags);
53259 port->blocked_open++;
53260
53261@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53262 remove_wait_queue(&port->open_wait, &wait);
53263
53264 if (!tty_hung_up_p(filp))
53265- port->count++;
53266+ atomic_inc(&port->count);
53267 port->blocked_open--;
53268
53269 if (!retval)
53270diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53271index c3f9091..abe4601 100644
53272--- a/drivers/tty/synclinkmp.c
53273+++ b/drivers/tty/synclinkmp.c
53274@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53275
53276 if (debug_level >= DEBUG_LEVEL_INFO)
53277 printk("%s(%d):%s open(), old ref count = %d\n",
53278- __FILE__,__LINE__,tty->driver->name, info->port.count);
53279+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53280
53281 /* If port is closing, signal caller to try again */
53282 if (info->port.flags & ASYNC_CLOSING){
53283@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53284 spin_unlock_irqrestore(&info->netlock, flags);
53285 goto cleanup;
53286 }
53287- info->port.count++;
53288+ atomic_inc(&info->port.count);
53289 spin_unlock_irqrestore(&info->netlock, flags);
53290
53291- if (info->port.count == 1) {
53292+ if (atomic_read(&info->port.count) == 1) {
53293 /* 1st open on this device, init hardware */
53294 retval = startup(info);
53295 if (retval < 0)
53296@@ -796,8 +796,8 @@ cleanup:
53297 if (retval) {
53298 if (tty->count == 1)
53299 info->port.tty = NULL; /* tty layer will release tty struct */
53300- if(info->port.count)
53301- info->port.count--;
53302+ if(atomic_read(&info->port.count))
53303+ atomic_dec(&info->port.count);
53304 }
53305
53306 return retval;
53307@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53308
53309 if (debug_level >= DEBUG_LEVEL_INFO)
53310 printk("%s(%d):%s close() entry, count=%d\n",
53311- __FILE__,__LINE__, info->device_name, info->port.count);
53312+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53313
53314 if (tty_port_close_start(&info->port, tty, filp) == 0)
53315 goto cleanup;
53316@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53317 cleanup:
53318 if (debug_level >= DEBUG_LEVEL_INFO)
53319 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53320- tty->driver->name, info->port.count);
53321+ tty->driver->name, atomic_read(&info->port.count));
53322 }
53323
53324 /* Called by tty_hangup() when a hangup is signaled.
53325@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53326 shutdown(info);
53327
53328 spin_lock_irqsave(&info->port.lock, flags);
53329- info->port.count = 0;
53330+ atomic_set(&info->port.count, 0);
53331 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53332 info->port.tty = NULL;
53333 spin_unlock_irqrestore(&info->port.lock, flags);
53334@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53335 unsigned short new_crctype;
53336
53337 /* return error if TTY interface open */
53338- if (info->port.count)
53339+ if (atomic_read(&info->port.count))
53340 return -EBUSY;
53341
53342 switch (encoding)
53343@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53344
53345 /* arbitrate between network and tty opens */
53346 spin_lock_irqsave(&info->netlock, flags);
53347- if (info->port.count != 0 || info->netcount != 0) {
53348+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53349 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53350 spin_unlock_irqrestore(&info->netlock, flags);
53351 return -EBUSY;
53352@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53353 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53354
53355 /* return error if TTY interface open */
53356- if (info->port.count)
53357+ if (atomic_read(&info->port.count))
53358 return -EBUSY;
53359
53360 if (cmd != SIOCWANDEV)
53361@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53362 * do not request bottom half processing if the
53363 * device is not open in a normal mode.
53364 */
53365- if ( port && (port->port.count || port->netcount) &&
53366+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53367 port->pending_bh && !port->bh_running &&
53368 !port->bh_requested ) {
53369 if ( debug_level >= DEBUG_LEVEL_ISR )
53370@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53371
53372 if (debug_level >= DEBUG_LEVEL_INFO)
53373 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53374- __FILE__,__LINE__, tty->driver->name, port->count );
53375+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53376
53377 spin_lock_irqsave(&info->lock, flags);
53378- port->count--;
53379+ atomic_dec(&port->count);
53380 spin_unlock_irqrestore(&info->lock, flags);
53381 port->blocked_open++;
53382
53383@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53384
53385 if (debug_level >= DEBUG_LEVEL_INFO)
53386 printk("%s(%d):%s block_til_ready() count=%d\n",
53387- __FILE__,__LINE__, tty->driver->name, port->count );
53388+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53389
53390 tty_unlock(tty);
53391 schedule();
53392@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53393 set_current_state(TASK_RUNNING);
53394 remove_wait_queue(&port->open_wait, &wait);
53395 if (!tty_hung_up_p(filp))
53396- port->count++;
53397+ atomic_inc(&port->count);
53398 port->blocked_open--;
53399
53400 if (debug_level >= DEBUG_LEVEL_INFO)
53401 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53402- __FILE__,__LINE__, tty->driver->name, port->count );
53403+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53404
53405 if (!retval)
53406 port->flags |= ASYNC_NORMAL_ACTIVE;
53407diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53408index 42bad18..447d7a2 100644
53409--- a/drivers/tty/sysrq.c
53410+++ b/drivers/tty/sysrq.c
53411@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53412 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53413 size_t count, loff_t *ppos)
53414 {
53415- if (count) {
53416+ if (count && capable(CAP_SYS_ADMIN)) {
53417 char c;
53418
53419 if (get_user(c, buf))
53420diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53421index 51f066a..a7f6e86 100644
53422--- a/drivers/tty/tty_io.c
53423+++ b/drivers/tty/tty_io.c
53424@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(start_tty);
53425 /* We limit tty time update visibility to every 8 seconds or so. */
53426 static void tty_update_time(struct timespec *time)
53427 {
53428- unsigned long sec = get_seconds() & ~7;
53429- if ((long)(sec - time->tv_sec) > 0)
53430+ unsigned long sec = get_seconds();
53431+ if (abs(sec - time->tv_sec) & ~7)
53432 time->tv_sec = sec;
53433 }
53434
53435@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53436
53437 void tty_default_fops(struct file_operations *fops)
53438 {
53439- *fops = tty_fops;
53440+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53441 }
53442
53443 /*
53444diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
53445index 1787fa4..552076b 100644
53446--- a/drivers/tty/tty_ioctl.c
53447+++ b/drivers/tty/tty_ioctl.c
53448@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
53449 #endif
53450 if (!timeout)
53451 timeout = MAX_SCHEDULE_TIMEOUT;
53452+
53453 if (wait_event_interruptible_timeout(tty->write_wait,
53454- !tty_chars_in_buffer(tty), timeout) >= 0) {
53455- if (tty->ops->wait_until_sent)
53456- tty->ops->wait_until_sent(tty, timeout);
53457+ !tty_chars_in_buffer(tty), timeout) < 0) {
53458+ return;
53459 }
53460+
53461+ if (timeout == MAX_SCHEDULE_TIMEOUT)
53462+ timeout = 0;
53463+
53464+ if (tty->ops->wait_until_sent)
53465+ tty->ops->wait_until_sent(tty, timeout);
53466 }
53467 EXPORT_SYMBOL(tty_wait_until_sent);
53468
53469diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53470index 3737f55..7cef448 100644
53471--- a/drivers/tty/tty_ldisc.c
53472+++ b/drivers/tty/tty_ldisc.c
53473@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53474 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53475 tty_ldiscs[disc] = new_ldisc;
53476 new_ldisc->num = disc;
53477- new_ldisc->refcount = 0;
53478+ atomic_set(&new_ldisc->refcount, 0);
53479 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53480
53481 return ret;
53482@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53483 return -EINVAL;
53484
53485 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53486- if (tty_ldiscs[disc]->refcount)
53487+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53488 ret = -EBUSY;
53489 else
53490 tty_ldiscs[disc] = NULL;
53491@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53492 if (ldops) {
53493 ret = ERR_PTR(-EAGAIN);
53494 if (try_module_get(ldops->owner)) {
53495- ldops->refcount++;
53496+ atomic_inc(&ldops->refcount);
53497 ret = ldops;
53498 }
53499 }
53500@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53501 unsigned long flags;
53502
53503 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53504- ldops->refcount--;
53505+ atomic_dec(&ldops->refcount);
53506 module_put(ldops->owner);
53507 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53508 }
53509diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53510index 40b31835..94d92ae 100644
53511--- a/drivers/tty/tty_port.c
53512+++ b/drivers/tty/tty_port.c
53513@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53514 unsigned long flags;
53515
53516 spin_lock_irqsave(&port->lock, flags);
53517- port->count = 0;
53518+ atomic_set(&port->count, 0);
53519 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53520 tty = port->tty;
53521 if (tty)
53522@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53523
53524 /* The port lock protects the port counts */
53525 spin_lock_irqsave(&port->lock, flags);
53526- port->count--;
53527+ atomic_dec(&port->count);
53528 port->blocked_open++;
53529 spin_unlock_irqrestore(&port->lock, flags);
53530
53531@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53532 we must not mess that up further */
53533 spin_lock_irqsave(&port->lock, flags);
53534 if (!tty_hung_up_p(filp))
53535- port->count++;
53536+ atomic_inc(&port->count);
53537 port->blocked_open--;
53538 if (retval == 0)
53539 port->flags |= ASYNC_NORMAL_ACTIVE;
53540@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53541 return 0;
53542
53543 spin_lock_irqsave(&port->lock, flags);
53544- if (tty->count == 1 && port->count != 1) {
53545+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
53546 printk(KERN_WARNING
53547 "tty_port_close_start: tty->count = 1 port count = %d.\n",
53548- port->count);
53549- port->count = 1;
53550+ atomic_read(&port->count));
53551+ atomic_set(&port->count, 1);
53552 }
53553- if (--port->count < 0) {
53554+ if (atomic_dec_return(&port->count) < 0) {
53555 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
53556- port->count);
53557- port->count = 0;
53558+ atomic_read(&port->count));
53559+ atomic_set(&port->count, 0);
53560 }
53561
53562- if (port->count) {
53563+ if (atomic_read(&port->count)) {
53564 spin_unlock_irqrestore(&port->lock, flags);
53565 return 0;
53566 }
53567@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
53568 struct file *filp)
53569 {
53570 spin_lock_irq(&port->lock);
53571- ++port->count;
53572+ atomic_inc(&port->count);
53573 spin_unlock_irq(&port->lock);
53574 tty_port_tty_set(port, tty);
53575
53576diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
53577index 8a89f6e..50b32af 100644
53578--- a/drivers/tty/vt/keyboard.c
53579+++ b/drivers/tty/vt/keyboard.c
53580@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
53581 kbd->kbdmode == VC_OFF) &&
53582 value != KVAL(K_SAK))
53583 return; /* SAK is allowed even in raw mode */
53584+
53585+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53586+ {
53587+ void *func = fn_handler[value];
53588+ if (func == fn_show_state || func == fn_show_ptregs ||
53589+ func == fn_show_mem)
53590+ return;
53591+ }
53592+#endif
53593+
53594 fn_handler[value](vc);
53595 }
53596
53597@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53598 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
53599 return -EFAULT;
53600
53601- if (!capable(CAP_SYS_TTY_CONFIG))
53602- perm = 0;
53603-
53604 switch (cmd) {
53605 case KDGKBENT:
53606 /* Ensure another thread doesn't free it under us */
53607@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53608 spin_unlock_irqrestore(&kbd_event_lock, flags);
53609 return put_user(val, &user_kbe->kb_value);
53610 case KDSKBENT:
53611+ if (!capable(CAP_SYS_TTY_CONFIG))
53612+ perm = 0;
53613+
53614 if (!perm)
53615 return -EPERM;
53616 if (!i && v == K_NOSUCHMAP) {
53617@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53618 int i, j, k;
53619 int ret;
53620
53621- if (!capable(CAP_SYS_TTY_CONFIG))
53622- perm = 0;
53623-
53624 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
53625 if (!kbs) {
53626 ret = -ENOMEM;
53627@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53628 kfree(kbs);
53629 return ((p && *p) ? -EOVERFLOW : 0);
53630 case KDSKBSENT:
53631+ if (!capable(CAP_SYS_TTY_CONFIG))
53632+ perm = 0;
53633+
53634 if (!perm) {
53635 ret = -EPERM;
53636 goto reterr;
53637diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
53638index 6276f13..84f2449 100644
53639--- a/drivers/uio/uio.c
53640+++ b/drivers/uio/uio.c
53641@@ -25,6 +25,7 @@
53642 #include <linux/kobject.h>
53643 #include <linux/cdev.h>
53644 #include <linux/uio_driver.h>
53645+#include <asm/local.h>
53646
53647 #define UIO_MAX_DEVICES (1U << MINORBITS)
53648
53649@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
53650 struct device_attribute *attr, char *buf)
53651 {
53652 struct uio_device *idev = dev_get_drvdata(dev);
53653- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
53654+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
53655 }
53656 static DEVICE_ATTR_RO(event);
53657
53658@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
53659 {
53660 struct uio_device *idev = info->uio_dev;
53661
53662- atomic_inc(&idev->event);
53663+ atomic_inc_unchecked(&idev->event);
53664 wake_up_interruptible(&idev->wait);
53665 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
53666 }
53667@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
53668 }
53669
53670 listener->dev = idev;
53671- listener->event_count = atomic_read(&idev->event);
53672+ listener->event_count = atomic_read_unchecked(&idev->event);
53673 filep->private_data = listener;
53674
53675 if (idev->info->open) {
53676@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
53677 return -EIO;
53678
53679 poll_wait(filep, &idev->wait, wait);
53680- if (listener->event_count != atomic_read(&idev->event))
53681+ if (listener->event_count != atomic_read_unchecked(&idev->event))
53682 return POLLIN | POLLRDNORM;
53683 return 0;
53684 }
53685@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
53686 do {
53687 set_current_state(TASK_INTERRUPTIBLE);
53688
53689- event_count = atomic_read(&idev->event);
53690+ event_count = atomic_read_unchecked(&idev->event);
53691 if (event_count != listener->event_count) {
53692 if (copy_to_user(buf, &event_count, count))
53693 retval = -EFAULT;
53694@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
53695 static int uio_find_mem_index(struct vm_area_struct *vma)
53696 {
53697 struct uio_device *idev = vma->vm_private_data;
53698+ unsigned long size;
53699
53700 if (vma->vm_pgoff < MAX_UIO_MAPS) {
53701- if (idev->info->mem[vma->vm_pgoff].size == 0)
53702+ size = idev->info->mem[vma->vm_pgoff].size;
53703+ if (size == 0)
53704+ return -1;
53705+ if (vma->vm_end - vma->vm_start > size)
53706 return -1;
53707 return (int)vma->vm_pgoff;
53708 }
53709@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
53710 idev->owner = owner;
53711 idev->info = info;
53712 init_waitqueue_head(&idev->wait);
53713- atomic_set(&idev->event, 0);
53714+ atomic_set_unchecked(&idev->event, 0);
53715
53716 ret = uio_get_minor(idev);
53717 if (ret)
53718diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
53719index 813d4d3..a71934f 100644
53720--- a/drivers/usb/atm/cxacru.c
53721+++ b/drivers/usb/atm/cxacru.c
53722@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
53723 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
53724 if (ret < 2)
53725 return -EINVAL;
53726- if (index < 0 || index > 0x7f)
53727+ if (index > 0x7f)
53728 return -EINVAL;
53729 pos += tmp;
53730
53731diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
53732index dada014..1d0d517 100644
53733--- a/drivers/usb/atm/usbatm.c
53734+++ b/drivers/usb/atm/usbatm.c
53735@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53736 if (printk_ratelimit())
53737 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
53738 __func__, vpi, vci);
53739- atomic_inc(&vcc->stats->rx_err);
53740+ atomic_inc_unchecked(&vcc->stats->rx_err);
53741 return;
53742 }
53743
53744@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53745 if (length > ATM_MAX_AAL5_PDU) {
53746 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
53747 __func__, length, vcc);
53748- atomic_inc(&vcc->stats->rx_err);
53749+ atomic_inc_unchecked(&vcc->stats->rx_err);
53750 goto out;
53751 }
53752
53753@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53754 if (sarb->len < pdu_length) {
53755 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
53756 __func__, pdu_length, sarb->len, vcc);
53757- atomic_inc(&vcc->stats->rx_err);
53758+ atomic_inc_unchecked(&vcc->stats->rx_err);
53759 goto out;
53760 }
53761
53762 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
53763 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
53764 __func__, vcc);
53765- atomic_inc(&vcc->stats->rx_err);
53766+ atomic_inc_unchecked(&vcc->stats->rx_err);
53767 goto out;
53768 }
53769
53770@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53771 if (printk_ratelimit())
53772 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
53773 __func__, length);
53774- atomic_inc(&vcc->stats->rx_drop);
53775+ atomic_inc_unchecked(&vcc->stats->rx_drop);
53776 goto out;
53777 }
53778
53779@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53780
53781 vcc->push(vcc, skb);
53782
53783- atomic_inc(&vcc->stats->rx);
53784+ atomic_inc_unchecked(&vcc->stats->rx);
53785 out:
53786 skb_trim(sarb, 0);
53787 }
53788@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
53789 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
53790
53791 usbatm_pop(vcc, skb);
53792- atomic_inc(&vcc->stats->tx);
53793+ atomic_inc_unchecked(&vcc->stats->tx);
53794
53795 skb = skb_dequeue(&instance->sndqueue);
53796 }
53797@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
53798 if (!left--)
53799 return sprintf(page,
53800 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
53801- atomic_read(&atm_dev->stats.aal5.tx),
53802- atomic_read(&atm_dev->stats.aal5.tx_err),
53803- atomic_read(&atm_dev->stats.aal5.rx),
53804- atomic_read(&atm_dev->stats.aal5.rx_err),
53805- atomic_read(&atm_dev->stats.aal5.rx_drop));
53806+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
53807+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
53808+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
53809+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
53810+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
53811
53812 if (!left--) {
53813 if (instance->disconnected)
53814diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
53815index 2a3bbdf..91d72cf 100644
53816--- a/drivers/usb/core/devices.c
53817+++ b/drivers/usb/core/devices.c
53818@@ -126,7 +126,7 @@ static const char format_endpt[] =
53819 * time it gets called.
53820 */
53821 static struct device_connect_event {
53822- atomic_t count;
53823+ atomic_unchecked_t count;
53824 wait_queue_head_t wait;
53825 } device_event = {
53826 .count = ATOMIC_INIT(1),
53827@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
53828
53829 void usbfs_conn_disc_event(void)
53830 {
53831- atomic_add(2, &device_event.count);
53832+ atomic_add_unchecked(2, &device_event.count);
53833 wake_up(&device_event.wait);
53834 }
53835
53836@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
53837
53838 poll_wait(file, &device_event.wait, wait);
53839
53840- event_count = atomic_read(&device_event.count);
53841+ event_count = atomic_read_unchecked(&device_event.count);
53842 if (file->f_version != event_count) {
53843 file->f_version = event_count;
53844 return POLLIN | POLLRDNORM;
53845diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
53846index 0b59731..46ee7d1 100644
53847--- a/drivers/usb/core/devio.c
53848+++ b/drivers/usb/core/devio.c
53849@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53850 struct usb_dev_state *ps = file->private_data;
53851 struct usb_device *dev = ps->dev;
53852 ssize_t ret = 0;
53853- unsigned len;
53854+ size_t len;
53855 loff_t pos;
53856 int i;
53857
53858@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53859 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
53860 struct usb_config_descriptor *config =
53861 (struct usb_config_descriptor *)dev->rawdescriptors[i];
53862- unsigned int length = le16_to_cpu(config->wTotalLength);
53863+ size_t length = le16_to_cpu(config->wTotalLength);
53864
53865 if (*ppos < pos + length) {
53866
53867 /* The descriptor may claim to be longer than it
53868 * really is. Here is the actual allocated length. */
53869- unsigned alloclen =
53870+ size_t alloclen =
53871 le16_to_cpu(dev->config[i].desc.wTotalLength);
53872
53873- len = length - (*ppos - pos);
53874+ len = length + pos - *ppos;
53875 if (len > nbytes)
53876 len = nbytes;
53877
53878 /* Simply don't write (skip over) unallocated parts */
53879 if (alloclen > (*ppos - pos)) {
53880- alloclen -= (*ppos - pos);
53881+ alloclen = alloclen + pos - *ppos;
53882 if (copy_to_user(buf,
53883 dev->rawdescriptors[i] + (*ppos - pos),
53884 min(len, alloclen))) {
53885diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
53886index 45a915c..09f9735 100644
53887--- a/drivers/usb/core/hcd.c
53888+++ b/drivers/usb/core/hcd.c
53889@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53890 */
53891 usb_get_urb(urb);
53892 atomic_inc(&urb->use_count);
53893- atomic_inc(&urb->dev->urbnum);
53894+ atomic_inc_unchecked(&urb->dev->urbnum);
53895 usbmon_urb_submit(&hcd->self, urb);
53896
53897 /* NOTE requirements on root-hub callers (usbfs and the hub
53898@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53899 urb->hcpriv = NULL;
53900 INIT_LIST_HEAD(&urb->urb_list);
53901 atomic_dec(&urb->use_count);
53902- atomic_dec(&urb->dev->urbnum);
53903+ atomic_dec_unchecked(&urb->dev->urbnum);
53904 if (atomic_read(&urb->reject))
53905 wake_up(&usb_kill_urb_queue);
53906 usb_put_urb(urb);
53907diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
53908index b4bfa3a..008f926 100644
53909--- a/drivers/usb/core/hub.c
53910+++ b/drivers/usb/core/hub.c
53911@@ -26,6 +26,7 @@
53912 #include <linux/mutex.h>
53913 #include <linux/random.h>
53914 #include <linux/pm_qos.h>
53915+#include <linux/grsecurity.h>
53916
53917 #include <asm/uaccess.h>
53918 #include <asm/byteorder.h>
53919@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
53920 goto done;
53921 return;
53922 }
53923+
53924+ if (gr_handle_new_usb())
53925+ goto done;
53926+
53927 if (hub_is_superspeed(hub->hdev))
53928 unit_load = 150;
53929 else
53930diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
53931index f368d20..0c30ac5 100644
53932--- a/drivers/usb/core/message.c
53933+++ b/drivers/usb/core/message.c
53934@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
53935 * Return: If successful, the number of bytes transferred. Otherwise, a negative
53936 * error number.
53937 */
53938-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
53939+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
53940 __u8 requesttype, __u16 value, __u16 index, void *data,
53941 __u16 size, int timeout)
53942 {
53943@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
53944 * If successful, 0. Otherwise a negative error number. The number of actual
53945 * bytes transferred will be stored in the @actual_length parameter.
53946 */
53947-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
53948+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
53949 void *data, int len, int *actual_length, int timeout)
53950 {
53951 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
53952@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
53953 * bytes transferred will be stored in the @actual_length parameter.
53954 *
53955 */
53956-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
53957+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
53958 void *data, int len, int *actual_length, int timeout)
53959 {
53960 struct urb *urb;
53961diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
53962index d269738..7340cd7 100644
53963--- a/drivers/usb/core/sysfs.c
53964+++ b/drivers/usb/core/sysfs.c
53965@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
53966 struct usb_device *udev;
53967
53968 udev = to_usb_device(dev);
53969- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
53970+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
53971 }
53972 static DEVICE_ATTR_RO(urbnum);
53973
53974diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
53975index b1fb9ae..4224885 100644
53976--- a/drivers/usb/core/usb.c
53977+++ b/drivers/usb/core/usb.c
53978@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
53979 set_dev_node(&dev->dev, dev_to_node(bus->controller));
53980 dev->state = USB_STATE_ATTACHED;
53981 dev->lpm_disable_count = 1;
53982- atomic_set(&dev->urbnum, 0);
53983+ atomic_set_unchecked(&dev->urbnum, 0);
53984
53985 INIT_LIST_HEAD(&dev->ep0.urb_list);
53986 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
53987diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
53988index 8cfc319..4868255 100644
53989--- a/drivers/usb/early/ehci-dbgp.c
53990+++ b/drivers/usb/early/ehci-dbgp.c
53991@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
53992
53993 #ifdef CONFIG_KGDB
53994 static struct kgdb_io kgdbdbgp_io_ops;
53995-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
53996+static struct kgdb_io kgdbdbgp_io_ops_console;
53997+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
53998 #else
53999 #define dbgp_kgdb_mode (0)
54000 #endif
54001@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54002 .write_char = kgdbdbgp_write_char,
54003 };
54004
54005+static struct kgdb_io kgdbdbgp_io_ops_console = {
54006+ .name = "kgdbdbgp",
54007+ .read_char = kgdbdbgp_read_char,
54008+ .write_char = kgdbdbgp_write_char,
54009+ .is_console = 1
54010+};
54011+
54012 static int kgdbdbgp_wait_time;
54013
54014 static int __init kgdbdbgp_parse_config(char *str)
54015@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54016 ptr++;
54017 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54018 }
54019- kgdb_register_io_module(&kgdbdbgp_io_ops);
54020- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54021+ if (early_dbgp_console.index != -1)
54022+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54023+ else
54024+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54025
54026 return 0;
54027 }
54028diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54029index e971584..03495ab 100644
54030--- a/drivers/usb/gadget/function/f_uac1.c
54031+++ b/drivers/usb/gadget/function/f_uac1.c
54032@@ -14,6 +14,7 @@
54033 #include <linux/module.h>
54034 #include <linux/device.h>
54035 #include <linux/atomic.h>
54036+#include <linux/module.h>
54037
54038 #include "u_uac1.h"
54039
54040diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54041index 491082a..dfd7d17 100644
54042--- a/drivers/usb/gadget/function/u_serial.c
54043+++ b/drivers/usb/gadget/function/u_serial.c
54044@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54045 spin_lock_irq(&port->port_lock);
54046
54047 /* already open? Great. */
54048- if (port->port.count) {
54049+ if (atomic_read(&port->port.count)) {
54050 status = 0;
54051- port->port.count++;
54052+ atomic_inc(&port->port.count);
54053
54054 /* currently opening/closing? wait ... */
54055 } else if (port->openclose) {
54056@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54057 tty->driver_data = port;
54058 port->port.tty = tty;
54059
54060- port->port.count = 1;
54061+ atomic_set(&port->port.count, 1);
54062 port->openclose = false;
54063
54064 /* if connected, start the I/O stream */
54065@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54066
54067 spin_lock_irq(&port->port_lock);
54068
54069- if (port->port.count != 1) {
54070- if (port->port.count == 0)
54071+ if (atomic_read(&port->port.count) != 1) {
54072+ if (atomic_read(&port->port.count) == 0)
54073 WARN_ON(1);
54074 else
54075- --port->port.count;
54076+ atomic_dec(&port->port.count);
54077 goto exit;
54078 }
54079
54080@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54081 * and sleep if necessary
54082 */
54083 port->openclose = true;
54084- port->port.count = 0;
54085+ atomic_set(&port->port.count, 0);
54086
54087 gser = port->port_usb;
54088 if (gser && gser->disconnect)
54089@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54090 int cond;
54091
54092 spin_lock_irq(&port->port_lock);
54093- cond = (port->port.count == 0) && !port->openclose;
54094+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54095 spin_unlock_irq(&port->port_lock);
54096 return cond;
54097 }
54098@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54099 /* if it's already open, start I/O ... and notify the serial
54100 * protocol about open/close status (connect/disconnect).
54101 */
54102- if (port->port.count) {
54103+ if (atomic_read(&port->port.count)) {
54104 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54105 gs_start_io(port);
54106 if (gser->connect)
54107@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54108
54109 port->port_usb = NULL;
54110 gser->ioport = NULL;
54111- if (port->port.count > 0 || port->openclose) {
54112+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54113 wake_up_interruptible(&port->drain_wait);
54114 if (port->port.tty)
54115 tty_hangup(port->port.tty);
54116@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54117
54118 /* finally, free any unused/unusable I/O buffers */
54119 spin_lock_irqsave(&port->port_lock, flags);
54120- if (port->port.count == 0 && !port->openclose)
54121+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54122 gs_buf_free(&port->port_write_buf);
54123 gs_free_requests(gser->out, &port->read_pool, NULL);
54124 gs_free_requests(gser->out, &port->read_queue, NULL);
54125diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54126index 53842a1..2bef3b6 100644
54127--- a/drivers/usb/gadget/function/u_uac1.c
54128+++ b/drivers/usb/gadget/function/u_uac1.c
54129@@ -17,6 +17,7 @@
54130 #include <linux/ctype.h>
54131 #include <linux/random.h>
54132 #include <linux/syscalls.h>
54133+#include <linux/module.h>
54134
54135 #include "u_uac1.h"
54136
54137diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54138index 118edb7..7a6415f 100644
54139--- a/drivers/usb/host/ehci-hub.c
54140+++ b/drivers/usb/host/ehci-hub.c
54141@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54142 urb->transfer_flags = URB_DIR_IN;
54143 usb_get_urb(urb);
54144 atomic_inc(&urb->use_count);
54145- atomic_inc(&urb->dev->urbnum);
54146+ atomic_inc_unchecked(&urb->dev->urbnum);
54147 urb->setup_dma = dma_map_single(
54148 hcd->self.controller,
54149 urb->setup_packet,
54150@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54151 urb->status = -EINPROGRESS;
54152 usb_get_urb(urb);
54153 atomic_inc(&urb->use_count);
54154- atomic_inc(&urb->dev->urbnum);
54155+ atomic_inc_unchecked(&urb->dev->urbnum);
54156 retval = submit_single_step_set_feature(hcd, urb, 0);
54157 if (!retval && !wait_for_completion_timeout(&done,
54158 msecs_to_jiffies(2000))) {
54159diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54160index 1db0626..4948782 100644
54161--- a/drivers/usb/host/hwa-hc.c
54162+++ b/drivers/usb/host/hwa-hc.c
54163@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54164 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54165 struct wahc *wa = &hwahc->wa;
54166 struct device *dev = &wa->usb_iface->dev;
54167- u8 mas_le[UWB_NUM_MAS/8];
54168+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54169+
54170+ if (mas_le == NULL)
54171+ return -ENOMEM;
54172
54173 /* Set the stream index */
54174 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54175@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54176 WUSB_REQ_SET_WUSB_MAS,
54177 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54178 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54179- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54180+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54181 if (result < 0)
54182 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54183 out:
54184+ kfree(mas_le);
54185+
54186 return result;
54187 }
54188
54189diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54190index b3d245e..99549ed 100644
54191--- a/drivers/usb/misc/appledisplay.c
54192+++ b/drivers/usb/misc/appledisplay.c
54193@@ -84,7 +84,7 @@ struct appledisplay {
54194 struct mutex sysfslock; /* concurrent read and write */
54195 };
54196
54197-static atomic_t count_displays = ATOMIC_INIT(0);
54198+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54199 static struct workqueue_struct *wq;
54200
54201 static void appledisplay_complete(struct urb *urb)
54202@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54203
54204 /* Register backlight device */
54205 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54206- atomic_inc_return(&count_displays) - 1);
54207+ atomic_inc_return_unchecked(&count_displays) - 1);
54208 memset(&props, 0, sizeof(struct backlight_properties));
54209 props.type = BACKLIGHT_RAW;
54210 props.max_brightness = 0xff;
54211diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54212index 29fa1c3..a57b08e 100644
54213--- a/drivers/usb/serial/console.c
54214+++ b/drivers/usb/serial/console.c
54215@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
54216
54217 info->port = port;
54218
54219- ++port->port.count;
54220+ atomic_inc(&port->port.count);
54221 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54222 if (serial->type->set_termios) {
54223 /*
54224@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
54225 }
54226 /* Now that any required fake tty operations are completed restore
54227 * the tty port count */
54228- --port->port.count;
54229+ atomic_dec(&port->port.count);
54230 /* The console is special in terms of closing the device so
54231 * indicate this port is now acting as a system console. */
54232 port->port.console = 1;
54233@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
54234 put_tty:
54235 tty_kref_put(tty);
54236 reset_open_count:
54237- port->port.count = 0;
54238+ atomic_set(&port->port.count, 0);
54239 usb_autopm_put_interface(serial->interface);
54240 error_get_interface:
54241 usb_serial_put(serial);
54242@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
54243 static void usb_console_write(struct console *co,
54244 const char *buf, unsigned count)
54245 {
54246- static struct usbcons_info *info = &usbcons_info;
54247+ struct usbcons_info *info = &usbcons_info;
54248 struct usb_serial_port *port = info->port;
54249 struct usb_serial *serial;
54250 int retval = -ENODEV;
54251diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
54252index ccf1df7..54e170d 100644
54253--- a/drivers/usb/serial/generic.c
54254+++ b/drivers/usb/serial/generic.c
54255@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
54256 * character or at least one jiffy.
54257 */
54258 period = max_t(unsigned long, (10 * HZ / bps), 1);
54259- period = min_t(unsigned long, period, timeout);
54260+ if (timeout)
54261+ period = min_t(unsigned long, period, timeout);
54262
54263 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
54264 __func__, jiffies_to_msecs(timeout),
54265@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
54266 schedule_timeout_interruptible(period);
54267 if (signal_pending(current))
54268 break;
54269- if (time_after(jiffies, expire))
54270+ if (timeout && time_after(jiffies, expire))
54271 break;
54272 }
54273 }
54274diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54275index 307e339..6aa97cb 100644
54276--- a/drivers/usb/storage/usb.h
54277+++ b/drivers/usb/storage/usb.h
54278@@ -63,7 +63,7 @@ struct us_unusual_dev {
54279 __u8 useProtocol;
54280 __u8 useTransport;
54281 int (*initFunction)(struct us_data *);
54282-};
54283+} __do_const;
54284
54285
54286 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54287diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54288index a863a98..d272795 100644
54289--- a/drivers/usb/usbip/vhci.h
54290+++ b/drivers/usb/usbip/vhci.h
54291@@ -83,7 +83,7 @@ struct vhci_hcd {
54292 unsigned resuming:1;
54293 unsigned long re_timeout;
54294
54295- atomic_t seqnum;
54296+ atomic_unchecked_t seqnum;
54297
54298 /*
54299 * NOTE:
54300diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54301index 1ae9d40..c62604b 100644
54302--- a/drivers/usb/usbip/vhci_hcd.c
54303+++ b/drivers/usb/usbip/vhci_hcd.c
54304@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54305
54306 spin_lock(&vdev->priv_lock);
54307
54308- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54309+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54310 if (priv->seqnum == 0xffff)
54311 dev_info(&urb->dev->dev, "seqnum max\n");
54312
54313@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54314 return -ENOMEM;
54315 }
54316
54317- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54318+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54319 if (unlink->seqnum == 0xffff)
54320 pr_info("seqnum max\n");
54321
54322@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
54323 vdev->rhport = rhport;
54324 }
54325
54326- atomic_set(&vhci->seqnum, 0);
54327+ atomic_set_unchecked(&vhci->seqnum, 0);
54328 spin_lock_init(&vhci->lock);
54329
54330 hcd->power_budget = 0; /* no limit */
54331diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54332index 00e4a54..d676f85 100644
54333--- a/drivers/usb/usbip/vhci_rx.c
54334+++ b/drivers/usb/usbip/vhci_rx.c
54335@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54336 if (!urb) {
54337 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54338 pr_info("max seqnum %d\n",
54339- atomic_read(&the_controller->seqnum));
54340+ atomic_read_unchecked(&the_controller->seqnum));
54341 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54342 return;
54343 }
54344diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54345index edc7267..9f65ce2 100644
54346--- a/drivers/usb/wusbcore/wa-hc.h
54347+++ b/drivers/usb/wusbcore/wa-hc.h
54348@@ -240,7 +240,7 @@ struct wahc {
54349 spinlock_t xfer_list_lock;
54350 struct work_struct xfer_enqueue_work;
54351 struct work_struct xfer_error_work;
54352- atomic_t xfer_id_count;
54353+ atomic_unchecked_t xfer_id_count;
54354
54355 kernel_ulong_t quirks;
54356 };
54357@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54358 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54359 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54360 wa->dto_in_use = 0;
54361- atomic_set(&wa->xfer_id_count, 1);
54362+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54363 /* init the buf in URBs */
54364 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54365 usb_init_urb(&(wa->buf_in_urbs[index]));
54366diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54367index 69af4fd..da390d7 100644
54368--- a/drivers/usb/wusbcore/wa-xfer.c
54369+++ b/drivers/usb/wusbcore/wa-xfer.c
54370@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54371 */
54372 static void wa_xfer_id_init(struct wa_xfer *xfer)
54373 {
54374- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54375+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54376 }
54377
54378 /* Return the xfer's ID. */
54379diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54380index f018d8d..ccab63f 100644
54381--- a/drivers/vfio/vfio.c
54382+++ b/drivers/vfio/vfio.c
54383@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54384 return 0;
54385
54386 /* TODO Prevent device auto probing */
54387- WARN("Device %s added to live group %d!\n", dev_name(dev),
54388+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54389 iommu_group_id(group->iommu_group));
54390
54391 return 0;
54392diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
54393index 9484d56..d415d69 100644
54394--- a/drivers/vhost/net.c
54395+++ b/drivers/vhost/net.c
54396@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
54397 break;
54398 }
54399 /* TODO: Should check and handle checksum. */
54400-
54401- hdr.num_buffers = cpu_to_vhost16(vq, headcount);
54402 if (likely(mergeable) &&
54403- memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
54404+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
54405 offsetof(typeof(hdr), num_buffers),
54406 sizeof hdr.num_buffers)) {
54407 vq_err(vq, "Failed num_buffers write");
54408diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54409index 3bb02c6..a01ff38 100644
54410--- a/drivers/vhost/vringh.c
54411+++ b/drivers/vhost/vringh.c
54412@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54413 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54414 {
54415 __virtio16 v = 0;
54416- int rc = get_user(v, (__force __virtio16 __user *)p);
54417+ int rc = get_user(v, (__force_user __virtio16 *)p);
54418 *val = vringh16_to_cpu(vrh, v);
54419 return rc;
54420 }
54421@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54422 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54423 {
54424 __virtio16 v = cpu_to_vringh16(vrh, val);
54425- return put_user(v, (__force __virtio16 __user *)p);
54426+ return put_user(v, (__force_user __virtio16 *)p);
54427 }
54428
54429 static inline int copydesc_user(void *dst, const void *src, size_t len)
54430 {
54431- return copy_from_user(dst, (__force void __user *)src, len) ?
54432+ return copy_from_user(dst, (void __force_user *)src, len) ?
54433 -EFAULT : 0;
54434 }
54435
54436@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54437 const struct vring_used_elem *src,
54438 unsigned int num)
54439 {
54440- return copy_to_user((__force void __user *)dst, src,
54441+ return copy_to_user((void __force_user *)dst, src,
54442 sizeof(*dst) * num) ? -EFAULT : 0;
54443 }
54444
54445 static inline int xfer_from_user(void *src, void *dst, size_t len)
54446 {
54447- return copy_from_user(dst, (__force void __user *)src, len) ?
54448+ return copy_from_user(dst, (void __force_user *)src, len) ?
54449 -EFAULT : 0;
54450 }
54451
54452 static inline int xfer_to_user(void *dst, void *src, size_t len)
54453 {
54454- return copy_to_user((__force void __user *)dst, src, len) ?
54455+ return copy_to_user((void __force_user *)dst, src, len) ?
54456 -EFAULT : 0;
54457 }
54458
54459@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54460 vrh->last_used_idx = 0;
54461 vrh->vring.num = num;
54462 /* vring expects kernel addresses, but only used via accessors. */
54463- vrh->vring.desc = (__force struct vring_desc *)desc;
54464- vrh->vring.avail = (__force struct vring_avail *)avail;
54465- vrh->vring.used = (__force struct vring_used *)used;
54466+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54467+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54468+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54469 return 0;
54470 }
54471 EXPORT_SYMBOL(vringh_init_user);
54472@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54473
54474 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54475 {
54476- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54477+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54478 return 0;
54479 }
54480
54481diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54482index 84a110a..96312c3 100644
54483--- a/drivers/video/backlight/kb3886_bl.c
54484+++ b/drivers/video/backlight/kb3886_bl.c
54485@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54486 static unsigned long kb3886bl_flags;
54487 #define KB3886BL_SUSPENDED 0x01
54488
54489-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54490+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54491 {
54492 .ident = "Sahara Touch-iT",
54493 .matches = {
54494diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54495index 1b0b233..6f34c2c 100644
54496--- a/drivers/video/fbdev/arcfb.c
54497+++ b/drivers/video/fbdev/arcfb.c
54498@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54499 return -ENOSPC;
54500
54501 err = 0;
54502- if ((count + p) > fbmemlength) {
54503+ if (count > (fbmemlength - p)) {
54504 count = fbmemlength - p;
54505 err = -ENOSPC;
54506 }
54507diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54508index aedf2fb..47c9aca 100644
54509--- a/drivers/video/fbdev/aty/aty128fb.c
54510+++ b/drivers/video/fbdev/aty/aty128fb.c
54511@@ -149,7 +149,7 @@ enum {
54512 };
54513
54514 /* Must match above enum */
54515-static char * const r128_family[] = {
54516+static const char * const r128_family[] = {
54517 "AGP",
54518 "PCI",
54519 "PRO AGP",
54520diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54521index 37ec09b..98f8862 100644
54522--- a/drivers/video/fbdev/aty/atyfb_base.c
54523+++ b/drivers/video/fbdev/aty/atyfb_base.c
54524@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54525 par->accel_flags = var->accel_flags; /* hack */
54526
54527 if (var->accel_flags) {
54528- info->fbops->fb_sync = atyfb_sync;
54529+ pax_open_kernel();
54530+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54531+ pax_close_kernel();
54532 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54533 } else {
54534- info->fbops->fb_sync = NULL;
54535+ pax_open_kernel();
54536+ *(void **)&info->fbops->fb_sync = NULL;
54537+ pax_close_kernel();
54538 info->flags |= FBINFO_HWACCEL_DISABLED;
54539 }
54540
54541diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54542index 2fa0317..4983f2a 100644
54543--- a/drivers/video/fbdev/aty/mach64_cursor.c
54544+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54545@@ -8,6 +8,7 @@
54546 #include "../core/fb_draw.h"
54547
54548 #include <asm/io.h>
54549+#include <asm/pgtable.h>
54550
54551 #ifdef __sparc__
54552 #include <asm/fbio.h>
54553@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54554 info->sprite.buf_align = 16; /* and 64 lines tall. */
54555 info->sprite.flags = FB_PIXMAP_IO;
54556
54557- info->fbops->fb_cursor = atyfb_cursor;
54558+ pax_open_kernel();
54559+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54560+ pax_close_kernel();
54561
54562 return 0;
54563 }
54564diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54565index d6cab1f..112f680 100644
54566--- a/drivers/video/fbdev/core/fb_defio.c
54567+++ b/drivers/video/fbdev/core/fb_defio.c
54568@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54569
54570 BUG_ON(!fbdefio);
54571 mutex_init(&fbdefio->lock);
54572- info->fbops->fb_mmap = fb_deferred_io_mmap;
54573+ pax_open_kernel();
54574+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54575+ pax_close_kernel();
54576 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54577 INIT_LIST_HEAD(&fbdefio->pagelist);
54578 if (fbdefio->delay == 0) /* set a default of 1 s */
54579@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54580 page->mapping = NULL;
54581 }
54582
54583- info->fbops->fb_mmap = NULL;
54584+ *(void **)&info->fbops->fb_mmap = NULL;
54585 mutex_destroy(&fbdefio->lock);
54586 }
54587 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
54588diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
54589index 0705d88..d9429bf 100644
54590--- a/drivers/video/fbdev/core/fbmem.c
54591+++ b/drivers/video/fbdev/core/fbmem.c
54592@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
54593 __u32 data;
54594 int err;
54595
54596- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
54597+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
54598
54599 data = (__u32) (unsigned long) fix->smem_start;
54600 err |= put_user(data, &fix32->smem_start);
54601diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
54602index 4254336..282567e 100644
54603--- a/drivers/video/fbdev/hyperv_fb.c
54604+++ b/drivers/video/fbdev/hyperv_fb.c
54605@@ -240,7 +240,7 @@ static uint screen_fb_size;
54606 static inline int synthvid_send(struct hv_device *hdev,
54607 struct synthvid_msg *msg)
54608 {
54609- static atomic64_t request_id = ATOMIC64_INIT(0);
54610+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
54611 int ret;
54612
54613 msg->pipe_hdr.type = PIPE_MSG_DATA;
54614@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
54615
54616 ret = vmbus_sendpacket(hdev->channel, msg,
54617 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
54618- atomic64_inc_return(&request_id),
54619+ atomic64_inc_return_unchecked(&request_id),
54620 VM_PKT_DATA_INBAND, 0);
54621
54622 if (ret)
54623diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
54624index 7672d2e..b56437f 100644
54625--- a/drivers/video/fbdev/i810/i810_accel.c
54626+++ b/drivers/video/fbdev/i810/i810_accel.c
54627@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
54628 }
54629 }
54630 printk("ringbuffer lockup!!!\n");
54631+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
54632 i810_report_error(mmio);
54633 par->dev_flags |= LOCKUP;
54634 info->pixmap.scan_align = 1;
54635diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54636index a01147f..5d896f8 100644
54637--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54638+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54639@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54640
54641 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54642 struct matrox_switch matrox_mystique = {
54643- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54644+ .preinit = MGA1064_preinit,
54645+ .reset = MGA1064_reset,
54646+ .init = MGA1064_init,
54647+ .restore = MGA1064_restore,
54648 };
54649 EXPORT_SYMBOL(matrox_mystique);
54650 #endif
54651
54652 #ifdef CONFIG_FB_MATROX_G
54653 struct matrox_switch matrox_G100 = {
54654- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54655+ .preinit = MGAG100_preinit,
54656+ .reset = MGAG100_reset,
54657+ .init = MGAG100_init,
54658+ .restore = MGAG100_restore,
54659 };
54660 EXPORT_SYMBOL(matrox_G100);
54661 #endif
54662diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54663index 195ad7c..09743fc 100644
54664--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54665+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54666@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54667 }
54668
54669 struct matrox_switch matrox_millennium = {
54670- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54671+ .preinit = Ti3026_preinit,
54672+ .reset = Ti3026_reset,
54673+ .init = Ti3026_init,
54674+ .restore = Ti3026_restore
54675 };
54676 EXPORT_SYMBOL(matrox_millennium);
54677 #endif
54678diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54679index fe92eed..106e085 100644
54680--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54681+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54682@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54683 struct mb862xxfb_par *par = info->par;
54684
54685 if (info->var.bits_per_pixel == 32) {
54686- info->fbops->fb_fillrect = cfb_fillrect;
54687- info->fbops->fb_copyarea = cfb_copyarea;
54688- info->fbops->fb_imageblit = cfb_imageblit;
54689+ pax_open_kernel();
54690+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54691+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54692+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54693+ pax_close_kernel();
54694 } else {
54695 outreg(disp, GC_L0EM, 3);
54696- info->fbops->fb_fillrect = mb86290fb_fillrect;
54697- info->fbops->fb_copyarea = mb86290fb_copyarea;
54698- info->fbops->fb_imageblit = mb86290fb_imageblit;
54699+ pax_open_kernel();
54700+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54701+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54702+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54703+ pax_close_kernel();
54704 }
54705 outreg(draw, GDC_REG_DRAW_BASE, 0);
54706 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54707diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
54708index def0412..fed6529 100644
54709--- a/drivers/video/fbdev/nvidia/nvidia.c
54710+++ b/drivers/video/fbdev/nvidia/nvidia.c
54711@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54712 info->fix.line_length = (info->var.xres_virtual *
54713 info->var.bits_per_pixel) >> 3;
54714 if (info->var.accel_flags) {
54715- info->fbops->fb_imageblit = nvidiafb_imageblit;
54716- info->fbops->fb_fillrect = nvidiafb_fillrect;
54717- info->fbops->fb_copyarea = nvidiafb_copyarea;
54718- info->fbops->fb_sync = nvidiafb_sync;
54719+ pax_open_kernel();
54720+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54721+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54722+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54723+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54724+ pax_close_kernel();
54725 info->pixmap.scan_align = 4;
54726 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54727 info->flags |= FBINFO_READS_FAST;
54728 NVResetGraphics(info);
54729 } else {
54730- info->fbops->fb_imageblit = cfb_imageblit;
54731- info->fbops->fb_fillrect = cfb_fillrect;
54732- info->fbops->fb_copyarea = cfb_copyarea;
54733- info->fbops->fb_sync = NULL;
54734+ pax_open_kernel();
54735+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54736+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54737+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54738+ *(void **)&info->fbops->fb_sync = NULL;
54739+ pax_close_kernel();
54740 info->pixmap.scan_align = 1;
54741 info->flags |= FBINFO_HWACCEL_DISABLED;
54742 info->flags &= ~FBINFO_READS_FAST;
54743@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54744 info->pixmap.size = 8 * 1024;
54745 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54746
54747- if (!hwcur)
54748- info->fbops->fb_cursor = NULL;
54749+ if (!hwcur) {
54750+ pax_open_kernel();
54751+ *(void **)&info->fbops->fb_cursor = NULL;
54752+ pax_close_kernel();
54753+ }
54754
54755 info->var.accel_flags = (!noaccel);
54756
54757diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
54758index 2412a0d..294215b 100644
54759--- a/drivers/video/fbdev/omap2/dss/display.c
54760+++ b/drivers/video/fbdev/omap2/dss/display.c
54761@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54762 if (dssdev->name == NULL)
54763 dssdev->name = dssdev->alias;
54764
54765+ pax_open_kernel();
54766 if (drv && drv->get_resolution == NULL)
54767- drv->get_resolution = omapdss_default_get_resolution;
54768+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54769 if (drv && drv->get_recommended_bpp == NULL)
54770- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54771+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54772 if (drv && drv->get_timings == NULL)
54773- drv->get_timings = omapdss_default_get_timings;
54774+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54775+ pax_close_kernel();
54776
54777 mutex_lock(&panel_list_mutex);
54778 list_add_tail(&dssdev->panel_list, &panel_list);
54779diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
54780index 83433cb..71e9b98 100644
54781--- a/drivers/video/fbdev/s1d13xxxfb.c
54782+++ b/drivers/video/fbdev/s1d13xxxfb.c
54783@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54784
54785 switch(prod_id) {
54786 case S1D13506_PROD_ID: /* activate acceleration */
54787- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54788- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54789+ pax_open_kernel();
54790+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54791+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54792+ pax_close_kernel();
54793 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54794 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54795 break;
54796diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54797index d3013cd..95b8285 100644
54798--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
54799+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54800@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
54801 }
54802
54803 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
54804- lcdc_sys_write_index,
54805- lcdc_sys_write_data,
54806- lcdc_sys_read_data,
54807+ .write_index = lcdc_sys_write_index,
54808+ .write_data = lcdc_sys_write_data,
54809+ .read_data = lcdc_sys_read_data,
54810 };
54811
54812 static int sh_mobile_lcdc_sginit(struct fb_info *info,
54813diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
54814index 9279e5f..d5f5276 100644
54815--- a/drivers/video/fbdev/smscufx.c
54816+++ b/drivers/video/fbdev/smscufx.c
54817@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
54818 fb_deferred_io_cleanup(info);
54819 kfree(info->fbdefio);
54820 info->fbdefio = NULL;
54821- info->fbops->fb_mmap = ufx_ops_mmap;
54822+ pax_open_kernel();
54823+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
54824+ pax_close_kernel();
54825 }
54826
54827 pr_debug("released /dev/fb%d user=%d count=%d",
54828diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
54829index ff2b873..626a8d5 100644
54830--- a/drivers/video/fbdev/udlfb.c
54831+++ b/drivers/video/fbdev/udlfb.c
54832@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
54833 dlfb_urb_completion(urb);
54834
54835 error:
54836- atomic_add(bytes_sent, &dev->bytes_sent);
54837- atomic_add(bytes_identical, &dev->bytes_identical);
54838- atomic_add(width*height*2, &dev->bytes_rendered);
54839+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54840+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54841+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
54842 end_cycles = get_cycles();
54843- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54844+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54845 >> 10)), /* Kcycles */
54846 &dev->cpu_kcycles_used);
54847
54848@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
54849 dlfb_urb_completion(urb);
54850
54851 error:
54852- atomic_add(bytes_sent, &dev->bytes_sent);
54853- atomic_add(bytes_identical, &dev->bytes_identical);
54854- atomic_add(bytes_rendered, &dev->bytes_rendered);
54855+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54856+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54857+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
54858 end_cycles = get_cycles();
54859- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54860+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54861 >> 10)), /* Kcycles */
54862 &dev->cpu_kcycles_used);
54863 }
54864@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
54865 fb_deferred_io_cleanup(info);
54866 kfree(info->fbdefio);
54867 info->fbdefio = NULL;
54868- info->fbops->fb_mmap = dlfb_ops_mmap;
54869+ pax_open_kernel();
54870+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
54871+ pax_close_kernel();
54872 }
54873
54874 pr_warn("released /dev/fb%d user=%d count=%d\n",
54875@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
54876 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54877 struct dlfb_data *dev = fb_info->par;
54878 return snprintf(buf, PAGE_SIZE, "%u\n",
54879- atomic_read(&dev->bytes_rendered));
54880+ atomic_read_unchecked(&dev->bytes_rendered));
54881 }
54882
54883 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54884@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54885 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54886 struct dlfb_data *dev = fb_info->par;
54887 return snprintf(buf, PAGE_SIZE, "%u\n",
54888- atomic_read(&dev->bytes_identical));
54889+ atomic_read_unchecked(&dev->bytes_identical));
54890 }
54891
54892 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54893@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54894 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54895 struct dlfb_data *dev = fb_info->par;
54896 return snprintf(buf, PAGE_SIZE, "%u\n",
54897- atomic_read(&dev->bytes_sent));
54898+ atomic_read_unchecked(&dev->bytes_sent));
54899 }
54900
54901 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54902@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54903 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54904 struct dlfb_data *dev = fb_info->par;
54905 return snprintf(buf, PAGE_SIZE, "%u\n",
54906- atomic_read(&dev->cpu_kcycles_used));
54907+ atomic_read_unchecked(&dev->cpu_kcycles_used));
54908 }
54909
54910 static ssize_t edid_show(
54911@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
54912 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54913 struct dlfb_data *dev = fb_info->par;
54914
54915- atomic_set(&dev->bytes_rendered, 0);
54916- atomic_set(&dev->bytes_identical, 0);
54917- atomic_set(&dev->bytes_sent, 0);
54918- atomic_set(&dev->cpu_kcycles_used, 0);
54919+ atomic_set_unchecked(&dev->bytes_rendered, 0);
54920+ atomic_set_unchecked(&dev->bytes_identical, 0);
54921+ atomic_set_unchecked(&dev->bytes_sent, 0);
54922+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
54923
54924 return count;
54925 }
54926diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
54927index d32d1c4..46722e6 100644
54928--- a/drivers/video/fbdev/uvesafb.c
54929+++ b/drivers/video/fbdev/uvesafb.c
54930@@ -19,6 +19,7 @@
54931 #include <linux/io.h>
54932 #include <linux/mutex.h>
54933 #include <linux/slab.h>
54934+#include <linux/moduleloader.h>
54935 #include <video/edid.h>
54936 #include <video/uvesafb.h>
54937 #ifdef CONFIG_X86
54938@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
54939 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
54940 par->pmi_setpal = par->ypan = 0;
54941 } else {
54942+
54943+#ifdef CONFIG_PAX_KERNEXEC
54944+#ifdef CONFIG_MODULES
54945+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
54946+#endif
54947+ if (!par->pmi_code) {
54948+ par->pmi_setpal = par->ypan = 0;
54949+ return 0;
54950+ }
54951+#endif
54952+
54953 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
54954 + task->t.regs.edi);
54955+
54956+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54957+ pax_open_kernel();
54958+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
54959+ pax_close_kernel();
54960+
54961+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
54962+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
54963+#else
54964 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
54965 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
54966+#endif
54967+
54968 printk(KERN_INFO "uvesafb: protected mode interface info at "
54969 "%04x:%04x\n",
54970 (u16)task->t.regs.es, (u16)task->t.regs.edi);
54971@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
54972 par->ypan = ypan;
54973
54974 if (par->pmi_setpal || par->ypan) {
54975+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
54976 if (__supported_pte_mask & _PAGE_NX) {
54977 par->pmi_setpal = par->ypan = 0;
54978 printk(KERN_WARNING "uvesafb: NX protection is active, "
54979 "better not use the PMI.\n");
54980- } else {
54981+ } else
54982+#endif
54983 uvesafb_vbe_getpmi(task, par);
54984- }
54985 }
54986 #else
54987 /* The protected mode interface is not available on non-x86. */
54988@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
54989 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
54990
54991 /* Disable blanking if the user requested so. */
54992- if (!blank)
54993- info->fbops->fb_blank = NULL;
54994+ if (!blank) {
54995+ pax_open_kernel();
54996+ *(void **)&info->fbops->fb_blank = NULL;
54997+ pax_close_kernel();
54998+ }
54999
55000 /*
55001 * Find out how much IO memory is required for the mode with
55002@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55003 info->flags = FBINFO_FLAG_DEFAULT |
55004 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55005
55006- if (!par->ypan)
55007- info->fbops->fb_pan_display = NULL;
55008+ if (!par->ypan) {
55009+ pax_open_kernel();
55010+ *(void **)&info->fbops->fb_pan_display = NULL;
55011+ pax_close_kernel();
55012+ }
55013 }
55014
55015 static void uvesafb_init_mtrr(struct fb_info *info)
55016@@ -1786,6 +1816,11 @@ out_mode:
55017 out:
55018 kfree(par->vbe_modes);
55019
55020+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55021+ if (par->pmi_code)
55022+ module_memfree_exec(par->pmi_code);
55023+#endif
55024+
55025 framebuffer_release(info);
55026 return err;
55027 }
55028@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55029 kfree(par->vbe_state_orig);
55030 kfree(par->vbe_state_saved);
55031
55032+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55033+ if (par->pmi_code)
55034+ module_memfree_exec(par->pmi_code);
55035+#endif
55036+
55037 framebuffer_release(info);
55038 }
55039 return 0;
55040diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55041index d79a0ac..2d0c3d4 100644
55042--- a/drivers/video/fbdev/vesafb.c
55043+++ b/drivers/video/fbdev/vesafb.c
55044@@ -9,6 +9,7 @@
55045 */
55046
55047 #include <linux/module.h>
55048+#include <linux/moduleloader.h>
55049 #include <linux/kernel.h>
55050 #include <linux/errno.h>
55051 #include <linux/string.h>
55052@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55053 static int vram_total; /* Set total amount of memory */
55054 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55055 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55056-static void (*pmi_start)(void) __read_mostly;
55057-static void (*pmi_pal) (void) __read_mostly;
55058+static void (*pmi_start)(void) __read_only;
55059+static void (*pmi_pal) (void) __read_only;
55060 static int depth __read_mostly;
55061 static int vga_compat __read_mostly;
55062 /* --------------------------------------------------------------------- */
55063@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55064 unsigned int size_remap;
55065 unsigned int size_total;
55066 char *option = NULL;
55067+ void *pmi_code = NULL;
55068
55069 /* ignore error return of fb_get_options */
55070 fb_get_options("vesafb", &option);
55071@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55072 size_remap = size_total;
55073 vesafb_fix.smem_len = size_remap;
55074
55075-#ifndef __i386__
55076- screen_info.vesapm_seg = 0;
55077-#endif
55078-
55079 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55080 printk(KERN_WARNING
55081 "vesafb: cannot reserve video memory at 0x%lx\n",
55082@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55083 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55084 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55085
55086+#ifdef __i386__
55087+
55088+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55089+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55090+ if (!pmi_code)
55091+#elif !defined(CONFIG_PAX_KERNEXEC)
55092+ if (0)
55093+#endif
55094+
55095+#endif
55096+ screen_info.vesapm_seg = 0;
55097+
55098 if (screen_info.vesapm_seg) {
55099- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55100- screen_info.vesapm_seg,screen_info.vesapm_off);
55101+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55102+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55103 }
55104
55105 if (screen_info.vesapm_seg < 0xc000)
55106@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55107
55108 if (ypan || pmi_setpal) {
55109 unsigned short *pmi_base;
55110+
55111 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55112- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55113- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55114+
55115+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55116+ pax_open_kernel();
55117+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55118+#else
55119+ pmi_code = pmi_base;
55120+#endif
55121+
55122+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55123+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55124+
55125+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55126+ pmi_start = ktva_ktla(pmi_start);
55127+ pmi_pal = ktva_ktla(pmi_pal);
55128+ pax_close_kernel();
55129+#endif
55130+
55131 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55132 if (pmi_base[3]) {
55133 printk(KERN_INFO "vesafb: pmi: ports = ");
55134@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55135 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55136 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55137
55138- if (!ypan)
55139- info->fbops->fb_pan_display = NULL;
55140+ if (!ypan) {
55141+ pax_open_kernel();
55142+ *(void **)&info->fbops->fb_pan_display = NULL;
55143+ pax_close_kernel();
55144+ }
55145
55146 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55147 err = -ENOMEM;
55148@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55149 fb_info(info, "%s frame buffer device\n", info->fix.id);
55150 return 0;
55151 err:
55152+
55153+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55154+ module_memfree_exec(pmi_code);
55155+#endif
55156+
55157 if (info->screen_base)
55158 iounmap(info->screen_base);
55159 framebuffer_release(info);
55160diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55161index 88714ae..16c2e11 100644
55162--- a/drivers/video/fbdev/via/via_clock.h
55163+++ b/drivers/video/fbdev/via/via_clock.h
55164@@ -56,7 +56,7 @@ struct via_clock {
55165
55166 void (*set_engine_pll_state)(u8 state);
55167 void (*set_engine_pll)(struct via_pll_config config);
55168-};
55169+} __no_const;
55170
55171
55172 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55173diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55174index 3c14e43..2630570 100644
55175--- a/drivers/video/logo/logo_linux_clut224.ppm
55176+++ b/drivers/video/logo/logo_linux_clut224.ppm
55177@@ -2,1603 +2,1123 @@ P3
55178 # Standard 224-color Linux logo
55179 80 80
55180 255
55181- 0 0 0 0 0 0 0 0 0 0 0 0
55182- 0 0 0 0 0 0 0 0 0 0 0 0
55183- 0 0 0 0 0 0 0 0 0 0 0 0
55184- 0 0 0 0 0 0 0 0 0 0 0 0
55185- 0 0 0 0 0 0 0 0 0 0 0 0
55186- 0 0 0 0 0 0 0 0 0 0 0 0
55187- 0 0 0 0 0 0 0 0 0 0 0 0
55188- 0 0 0 0 0 0 0 0 0 0 0 0
55189- 0 0 0 0 0 0 0 0 0 0 0 0
55190- 6 6 6 6 6 6 10 10 10 10 10 10
55191- 10 10 10 6 6 6 6 6 6 6 6 6
55192- 0 0 0 0 0 0 0 0 0 0 0 0
55193- 0 0 0 0 0 0 0 0 0 0 0 0
55194- 0 0 0 0 0 0 0 0 0 0 0 0
55195- 0 0 0 0 0 0 0 0 0 0 0 0
55196- 0 0 0 0 0 0 0 0 0 0 0 0
55197- 0 0 0 0 0 0 0 0 0 0 0 0
55198- 0 0 0 0 0 0 0 0 0 0 0 0
55199- 0 0 0 0 0 0 0 0 0 0 0 0
55200- 0 0 0 0 0 0 0 0 0 0 0 0
55201- 0 0 0 0 0 0 0 0 0 0 0 0
55202- 0 0 0 0 0 0 0 0 0 0 0 0
55203- 0 0 0 0 0 0 0 0 0 0 0 0
55204- 0 0 0 0 0 0 0 0 0 0 0 0
55205- 0 0 0 0 0 0 0 0 0 0 0 0
55206- 0 0 0 0 0 0 0 0 0 0 0 0
55207- 0 0 0 0 0 0 0 0 0 0 0 0
55208- 0 0 0 0 0 0 0 0 0 0 0 0
55209- 0 0 0 6 6 6 10 10 10 14 14 14
55210- 22 22 22 26 26 26 30 30 30 34 34 34
55211- 30 30 30 30 30 30 26 26 26 18 18 18
55212- 14 14 14 10 10 10 6 6 6 0 0 0
55213- 0 0 0 0 0 0 0 0 0 0 0 0
55214- 0 0 0 0 0 0 0 0 0 0 0 0
55215- 0 0 0 0 0 0 0 0 0 0 0 0
55216- 0 0 0 0 0 0 0 0 0 0 0 0
55217- 0 0 0 0 0 0 0 0 0 0 0 0
55218- 0 0 0 0 0 0 0 0 0 0 0 0
55219- 0 0 0 0 0 0 0 0 0 0 0 0
55220- 0 0 0 0 0 0 0 0 0 0 0 0
55221- 0 0 0 0 0 0 0 0 0 0 0 0
55222- 0 0 0 0 0 1 0 0 1 0 0 0
55223- 0 0 0 0 0 0 0 0 0 0 0 0
55224- 0 0 0 0 0 0 0 0 0 0 0 0
55225- 0 0 0 0 0 0 0 0 0 0 0 0
55226- 0 0 0 0 0 0 0 0 0 0 0 0
55227- 0 0 0 0 0 0 0 0 0 0 0 0
55228- 0 0 0 0 0 0 0 0 0 0 0 0
55229- 6 6 6 14 14 14 26 26 26 42 42 42
55230- 54 54 54 66 66 66 78 78 78 78 78 78
55231- 78 78 78 74 74 74 66 66 66 54 54 54
55232- 42 42 42 26 26 26 18 18 18 10 10 10
55233- 6 6 6 0 0 0 0 0 0 0 0 0
55234- 0 0 0 0 0 0 0 0 0 0 0 0
55235- 0 0 0 0 0 0 0 0 0 0 0 0
55236- 0 0 0 0 0 0 0 0 0 0 0 0
55237- 0 0 0 0 0 0 0 0 0 0 0 0
55238- 0 0 0 0 0 0 0 0 0 0 0 0
55239- 0 0 0 0 0 0 0 0 0 0 0 0
55240- 0 0 0 0 0 0 0 0 0 0 0 0
55241- 0 0 0 0 0 0 0 0 0 0 0 0
55242- 0 0 1 0 0 0 0 0 0 0 0 0
55243- 0 0 0 0 0 0 0 0 0 0 0 0
55244- 0 0 0 0 0 0 0 0 0 0 0 0
55245- 0 0 0 0 0 0 0 0 0 0 0 0
55246- 0 0 0 0 0 0 0 0 0 0 0 0
55247- 0 0 0 0 0 0 0 0 0 0 0 0
55248- 0 0 0 0 0 0 0 0 0 10 10 10
55249- 22 22 22 42 42 42 66 66 66 86 86 86
55250- 66 66 66 38 38 38 38 38 38 22 22 22
55251- 26 26 26 34 34 34 54 54 54 66 66 66
55252- 86 86 86 70 70 70 46 46 46 26 26 26
55253- 14 14 14 6 6 6 0 0 0 0 0 0
55254- 0 0 0 0 0 0 0 0 0 0 0 0
55255- 0 0 0 0 0 0 0 0 0 0 0 0
55256- 0 0 0 0 0 0 0 0 0 0 0 0
55257- 0 0 0 0 0 0 0 0 0 0 0 0
55258- 0 0 0 0 0 0 0 0 0 0 0 0
55259- 0 0 0 0 0 0 0 0 0 0 0 0
55260- 0 0 0 0 0 0 0 0 0 0 0 0
55261- 0 0 0 0 0 0 0 0 0 0 0 0
55262- 0 0 1 0 0 1 0 0 1 0 0 0
55263- 0 0 0 0 0 0 0 0 0 0 0 0
55264- 0 0 0 0 0 0 0 0 0 0 0 0
55265- 0 0 0 0 0 0 0 0 0 0 0 0
55266- 0 0 0 0 0 0 0 0 0 0 0 0
55267- 0 0 0 0 0 0 0 0 0 0 0 0
55268- 0 0 0 0 0 0 10 10 10 26 26 26
55269- 50 50 50 82 82 82 58 58 58 6 6 6
55270- 2 2 6 2 2 6 2 2 6 2 2 6
55271- 2 2 6 2 2 6 2 2 6 2 2 6
55272- 6 6 6 54 54 54 86 86 86 66 66 66
55273- 38 38 38 18 18 18 6 6 6 0 0 0
55274- 0 0 0 0 0 0 0 0 0 0 0 0
55275- 0 0 0 0 0 0 0 0 0 0 0 0
55276- 0 0 0 0 0 0 0 0 0 0 0 0
55277- 0 0 0 0 0 0 0 0 0 0 0 0
55278- 0 0 0 0 0 0 0 0 0 0 0 0
55279- 0 0 0 0 0 0 0 0 0 0 0 0
55280- 0 0 0 0 0 0 0 0 0 0 0 0
55281- 0 0 0 0 0 0 0 0 0 0 0 0
55282- 0 0 0 0 0 0 0 0 0 0 0 0
55283- 0 0 0 0 0 0 0 0 0 0 0 0
55284- 0 0 0 0 0 0 0 0 0 0 0 0
55285- 0 0 0 0 0 0 0 0 0 0 0 0
55286- 0 0 0 0 0 0 0 0 0 0 0 0
55287- 0 0 0 0 0 0 0 0 0 0 0 0
55288- 0 0 0 6 6 6 22 22 22 50 50 50
55289- 78 78 78 34 34 34 2 2 6 2 2 6
55290- 2 2 6 2 2 6 2 2 6 2 2 6
55291- 2 2 6 2 2 6 2 2 6 2 2 6
55292- 2 2 6 2 2 6 6 6 6 70 70 70
55293- 78 78 78 46 46 46 22 22 22 6 6 6
55294- 0 0 0 0 0 0 0 0 0 0 0 0
55295- 0 0 0 0 0 0 0 0 0 0 0 0
55296- 0 0 0 0 0 0 0 0 0 0 0 0
55297- 0 0 0 0 0 0 0 0 0 0 0 0
55298- 0 0 0 0 0 0 0 0 0 0 0 0
55299- 0 0 0 0 0 0 0 0 0 0 0 0
55300- 0 0 0 0 0 0 0 0 0 0 0 0
55301- 0 0 0 0 0 0 0 0 0 0 0 0
55302- 0 0 1 0 0 1 0 0 1 0 0 0
55303- 0 0 0 0 0 0 0 0 0 0 0 0
55304- 0 0 0 0 0 0 0 0 0 0 0 0
55305- 0 0 0 0 0 0 0 0 0 0 0 0
55306- 0 0 0 0 0 0 0 0 0 0 0 0
55307- 0 0 0 0 0 0 0 0 0 0 0 0
55308- 6 6 6 18 18 18 42 42 42 82 82 82
55309- 26 26 26 2 2 6 2 2 6 2 2 6
55310- 2 2 6 2 2 6 2 2 6 2 2 6
55311- 2 2 6 2 2 6 2 2 6 14 14 14
55312- 46 46 46 34 34 34 6 6 6 2 2 6
55313- 42 42 42 78 78 78 42 42 42 18 18 18
55314- 6 6 6 0 0 0 0 0 0 0 0 0
55315- 0 0 0 0 0 0 0 0 0 0 0 0
55316- 0 0 0 0 0 0 0 0 0 0 0 0
55317- 0 0 0 0 0 0 0 0 0 0 0 0
55318- 0 0 0 0 0 0 0 0 0 0 0 0
55319- 0 0 0 0 0 0 0 0 0 0 0 0
55320- 0 0 0 0 0 0 0 0 0 0 0 0
55321- 0 0 0 0 0 0 0 0 0 0 0 0
55322- 0 0 1 0 0 0 0 0 1 0 0 0
55323- 0 0 0 0 0 0 0 0 0 0 0 0
55324- 0 0 0 0 0 0 0 0 0 0 0 0
55325- 0 0 0 0 0 0 0 0 0 0 0 0
55326- 0 0 0 0 0 0 0 0 0 0 0 0
55327- 0 0 0 0 0 0 0 0 0 0 0 0
55328- 10 10 10 30 30 30 66 66 66 58 58 58
55329- 2 2 6 2 2 6 2 2 6 2 2 6
55330- 2 2 6 2 2 6 2 2 6 2 2 6
55331- 2 2 6 2 2 6 2 2 6 26 26 26
55332- 86 86 86 101 101 101 46 46 46 10 10 10
55333- 2 2 6 58 58 58 70 70 70 34 34 34
55334- 10 10 10 0 0 0 0 0 0 0 0 0
55335- 0 0 0 0 0 0 0 0 0 0 0 0
55336- 0 0 0 0 0 0 0 0 0 0 0 0
55337- 0 0 0 0 0 0 0 0 0 0 0 0
55338- 0 0 0 0 0 0 0 0 0 0 0 0
55339- 0 0 0 0 0 0 0 0 0 0 0 0
55340- 0 0 0 0 0 0 0 0 0 0 0 0
55341- 0 0 0 0 0 0 0 0 0 0 0 0
55342- 0 0 1 0 0 1 0 0 1 0 0 0
55343- 0 0 0 0 0 0 0 0 0 0 0 0
55344- 0 0 0 0 0 0 0 0 0 0 0 0
55345- 0 0 0 0 0 0 0 0 0 0 0 0
55346- 0 0 0 0 0 0 0 0 0 0 0 0
55347- 0 0 0 0 0 0 0 0 0 0 0 0
55348- 14 14 14 42 42 42 86 86 86 10 10 10
55349- 2 2 6 2 2 6 2 2 6 2 2 6
55350- 2 2 6 2 2 6 2 2 6 2 2 6
55351- 2 2 6 2 2 6 2 2 6 30 30 30
55352- 94 94 94 94 94 94 58 58 58 26 26 26
55353- 2 2 6 6 6 6 78 78 78 54 54 54
55354- 22 22 22 6 6 6 0 0 0 0 0 0
55355- 0 0 0 0 0 0 0 0 0 0 0 0
55356- 0 0 0 0 0 0 0 0 0 0 0 0
55357- 0 0 0 0 0 0 0 0 0 0 0 0
55358- 0 0 0 0 0 0 0 0 0 0 0 0
55359- 0 0 0 0 0 0 0 0 0 0 0 0
55360- 0 0 0 0 0 0 0 0 0 0 0 0
55361- 0 0 0 0 0 0 0 0 0 0 0 0
55362- 0 0 0 0 0 0 0 0 0 0 0 0
55363- 0 0 0 0 0 0 0 0 0 0 0 0
55364- 0 0 0 0 0 0 0 0 0 0 0 0
55365- 0 0 0 0 0 0 0 0 0 0 0 0
55366- 0 0 0 0 0 0 0 0 0 0 0 0
55367- 0 0 0 0 0 0 0 0 0 6 6 6
55368- 22 22 22 62 62 62 62 62 62 2 2 6
55369- 2 2 6 2 2 6 2 2 6 2 2 6
55370- 2 2 6 2 2 6 2 2 6 2 2 6
55371- 2 2 6 2 2 6 2 2 6 26 26 26
55372- 54 54 54 38 38 38 18 18 18 10 10 10
55373- 2 2 6 2 2 6 34 34 34 82 82 82
55374- 38 38 38 14 14 14 0 0 0 0 0 0
55375- 0 0 0 0 0 0 0 0 0 0 0 0
55376- 0 0 0 0 0 0 0 0 0 0 0 0
55377- 0 0 0 0 0 0 0 0 0 0 0 0
55378- 0 0 0 0 0 0 0 0 0 0 0 0
55379- 0 0 0 0 0 0 0 0 0 0 0 0
55380- 0 0 0 0 0 0 0 0 0 0 0 0
55381- 0 0 0 0 0 0 0 0 0 0 0 0
55382- 0 0 0 0 0 1 0 0 1 0 0 0
55383- 0 0 0 0 0 0 0 0 0 0 0 0
55384- 0 0 0 0 0 0 0 0 0 0 0 0
55385- 0 0 0 0 0 0 0 0 0 0 0 0
55386- 0 0 0 0 0 0 0 0 0 0 0 0
55387- 0 0 0 0 0 0 0 0 0 6 6 6
55388- 30 30 30 78 78 78 30 30 30 2 2 6
55389- 2 2 6 2 2 6 2 2 6 2 2 6
55390- 2 2 6 2 2 6 2 2 6 2 2 6
55391- 2 2 6 2 2 6 2 2 6 10 10 10
55392- 10 10 10 2 2 6 2 2 6 2 2 6
55393- 2 2 6 2 2 6 2 2 6 78 78 78
55394- 50 50 50 18 18 18 6 6 6 0 0 0
55395- 0 0 0 0 0 0 0 0 0 0 0 0
55396- 0 0 0 0 0 0 0 0 0 0 0 0
55397- 0 0 0 0 0 0 0 0 0 0 0 0
55398- 0 0 0 0 0 0 0 0 0 0 0 0
55399- 0 0 0 0 0 0 0 0 0 0 0 0
55400- 0 0 0 0 0 0 0 0 0 0 0 0
55401- 0 0 0 0 0 0 0 0 0 0 0 0
55402- 0 0 1 0 0 0 0 0 0 0 0 0
55403- 0 0 0 0 0 0 0 0 0 0 0 0
55404- 0 0 0 0 0 0 0 0 0 0 0 0
55405- 0 0 0 0 0 0 0 0 0 0 0 0
55406- 0 0 0 0 0 0 0 0 0 0 0 0
55407- 0 0 0 0 0 0 0 0 0 10 10 10
55408- 38 38 38 86 86 86 14 14 14 2 2 6
55409- 2 2 6 2 2 6 2 2 6 2 2 6
55410- 2 2 6 2 2 6 2 2 6 2 2 6
55411- 2 2 6 2 2 6 2 2 6 2 2 6
55412- 2 2 6 2 2 6 2 2 6 2 2 6
55413- 2 2 6 2 2 6 2 2 6 54 54 54
55414- 66 66 66 26 26 26 6 6 6 0 0 0
55415- 0 0 0 0 0 0 0 0 0 0 0 0
55416- 0 0 0 0 0 0 0 0 0 0 0 0
55417- 0 0 0 0 0 0 0 0 0 0 0 0
55418- 0 0 0 0 0 0 0 0 0 0 0 0
55419- 0 0 0 0 0 0 0 0 0 0 0 0
55420- 0 0 0 0 0 0 0 0 0 0 0 0
55421- 0 0 0 0 0 0 0 0 0 0 0 0
55422- 0 0 0 0 0 1 0 0 1 0 0 0
55423- 0 0 0 0 0 0 0 0 0 0 0 0
55424- 0 0 0 0 0 0 0 0 0 0 0 0
55425- 0 0 0 0 0 0 0 0 0 0 0 0
55426- 0 0 0 0 0 0 0 0 0 0 0 0
55427- 0 0 0 0 0 0 0 0 0 14 14 14
55428- 42 42 42 82 82 82 2 2 6 2 2 6
55429- 2 2 6 6 6 6 10 10 10 2 2 6
55430- 2 2 6 2 2 6 2 2 6 2 2 6
55431- 2 2 6 2 2 6 2 2 6 6 6 6
55432- 14 14 14 10 10 10 2 2 6 2 2 6
55433- 2 2 6 2 2 6 2 2 6 18 18 18
55434- 82 82 82 34 34 34 10 10 10 0 0 0
55435- 0 0 0 0 0 0 0 0 0 0 0 0
55436- 0 0 0 0 0 0 0 0 0 0 0 0
55437- 0 0 0 0 0 0 0 0 0 0 0 0
55438- 0 0 0 0 0 0 0 0 0 0 0 0
55439- 0 0 0 0 0 0 0 0 0 0 0 0
55440- 0 0 0 0 0 0 0 0 0 0 0 0
55441- 0 0 0 0 0 0 0 0 0 0 0 0
55442- 0 0 1 0 0 0 0 0 0 0 0 0
55443- 0 0 0 0 0 0 0 0 0 0 0 0
55444- 0 0 0 0 0 0 0 0 0 0 0 0
55445- 0 0 0 0 0 0 0 0 0 0 0 0
55446- 0 0 0 0 0 0 0 0 0 0 0 0
55447- 0 0 0 0 0 0 0 0 0 14 14 14
55448- 46 46 46 86 86 86 2 2 6 2 2 6
55449- 6 6 6 6 6 6 22 22 22 34 34 34
55450- 6 6 6 2 2 6 2 2 6 2 2 6
55451- 2 2 6 2 2 6 18 18 18 34 34 34
55452- 10 10 10 50 50 50 22 22 22 2 2 6
55453- 2 2 6 2 2 6 2 2 6 10 10 10
55454- 86 86 86 42 42 42 14 14 14 0 0 0
55455- 0 0 0 0 0 0 0 0 0 0 0 0
55456- 0 0 0 0 0 0 0 0 0 0 0 0
55457- 0 0 0 0 0 0 0 0 0 0 0 0
55458- 0 0 0 0 0 0 0 0 0 0 0 0
55459- 0 0 0 0 0 0 0 0 0 0 0 0
55460- 0 0 0 0 0 0 0 0 0 0 0 0
55461- 0 0 0 0 0 0 0 0 0 0 0 0
55462- 0 0 1 0 0 1 0 0 1 0 0 0
55463- 0 0 0 0 0 0 0 0 0 0 0 0
55464- 0 0 0 0 0 0 0 0 0 0 0 0
55465- 0 0 0 0 0 0 0 0 0 0 0 0
55466- 0 0 0 0 0 0 0 0 0 0 0 0
55467- 0 0 0 0 0 0 0 0 0 14 14 14
55468- 46 46 46 86 86 86 2 2 6 2 2 6
55469- 38 38 38 116 116 116 94 94 94 22 22 22
55470- 22 22 22 2 2 6 2 2 6 2 2 6
55471- 14 14 14 86 86 86 138 138 138 162 162 162
55472-154 154 154 38 38 38 26 26 26 6 6 6
55473- 2 2 6 2 2 6 2 2 6 2 2 6
55474- 86 86 86 46 46 46 14 14 14 0 0 0
55475- 0 0 0 0 0 0 0 0 0 0 0 0
55476- 0 0 0 0 0 0 0 0 0 0 0 0
55477- 0 0 0 0 0 0 0 0 0 0 0 0
55478- 0 0 0 0 0 0 0 0 0 0 0 0
55479- 0 0 0 0 0 0 0 0 0 0 0 0
55480- 0 0 0 0 0 0 0 0 0 0 0 0
55481- 0 0 0 0 0 0 0 0 0 0 0 0
55482- 0 0 0 0 0 0 0 0 0 0 0 0
55483- 0 0 0 0 0 0 0 0 0 0 0 0
55484- 0 0 0 0 0 0 0 0 0 0 0 0
55485- 0 0 0 0 0 0 0 0 0 0 0 0
55486- 0 0 0 0 0 0 0 0 0 0 0 0
55487- 0 0 0 0 0 0 0 0 0 14 14 14
55488- 46 46 46 86 86 86 2 2 6 14 14 14
55489-134 134 134 198 198 198 195 195 195 116 116 116
55490- 10 10 10 2 2 6 2 2 6 6 6 6
55491-101 98 89 187 187 187 210 210 210 218 218 218
55492-214 214 214 134 134 134 14 14 14 6 6 6
55493- 2 2 6 2 2 6 2 2 6 2 2 6
55494- 86 86 86 50 50 50 18 18 18 6 6 6
55495- 0 0 0 0 0 0 0 0 0 0 0 0
55496- 0 0 0 0 0 0 0 0 0 0 0 0
55497- 0 0 0 0 0 0 0 0 0 0 0 0
55498- 0 0 0 0 0 0 0 0 0 0 0 0
55499- 0 0 0 0 0 0 0 0 0 0 0 0
55500- 0 0 0 0 0 0 0 0 0 0 0 0
55501- 0 0 0 0 0 0 0 0 1 0 0 0
55502- 0 0 1 0 0 1 0 0 1 0 0 0
55503- 0 0 0 0 0 0 0 0 0 0 0 0
55504- 0 0 0 0 0 0 0 0 0 0 0 0
55505- 0 0 0 0 0 0 0 0 0 0 0 0
55506- 0 0 0 0 0 0 0 0 0 0 0 0
55507- 0 0 0 0 0 0 0 0 0 14 14 14
55508- 46 46 46 86 86 86 2 2 6 54 54 54
55509-218 218 218 195 195 195 226 226 226 246 246 246
55510- 58 58 58 2 2 6 2 2 6 30 30 30
55511-210 210 210 253 253 253 174 174 174 123 123 123
55512-221 221 221 234 234 234 74 74 74 2 2 6
55513- 2 2 6 2 2 6 2 2 6 2 2 6
55514- 70 70 70 58 58 58 22 22 22 6 6 6
55515- 0 0 0 0 0 0 0 0 0 0 0 0
55516- 0 0 0 0 0 0 0 0 0 0 0 0
55517- 0 0 0 0 0 0 0 0 0 0 0 0
55518- 0 0 0 0 0 0 0 0 0 0 0 0
55519- 0 0 0 0 0 0 0 0 0 0 0 0
55520- 0 0 0 0 0 0 0 0 0 0 0 0
55521- 0 0 0 0 0 0 0 0 0 0 0 0
55522- 0 0 0 0 0 0 0 0 0 0 0 0
55523- 0 0 0 0 0 0 0 0 0 0 0 0
55524- 0 0 0 0 0 0 0 0 0 0 0 0
55525- 0 0 0 0 0 0 0 0 0 0 0 0
55526- 0 0 0 0 0 0 0 0 0 0 0 0
55527- 0 0 0 0 0 0 0 0 0 14 14 14
55528- 46 46 46 82 82 82 2 2 6 106 106 106
55529-170 170 170 26 26 26 86 86 86 226 226 226
55530-123 123 123 10 10 10 14 14 14 46 46 46
55531-231 231 231 190 190 190 6 6 6 70 70 70
55532- 90 90 90 238 238 238 158 158 158 2 2 6
55533- 2 2 6 2 2 6 2 2 6 2 2 6
55534- 70 70 70 58 58 58 22 22 22 6 6 6
55535- 0 0 0 0 0 0 0 0 0 0 0 0
55536- 0 0 0 0 0 0 0 0 0 0 0 0
55537- 0 0 0 0 0 0 0 0 0 0 0 0
55538- 0 0 0 0 0 0 0 0 0 0 0 0
55539- 0 0 0 0 0 0 0 0 0 0 0 0
55540- 0 0 0 0 0 0 0 0 0 0 0 0
55541- 0 0 0 0 0 0 0 0 1 0 0 0
55542- 0 0 1 0 0 1 0 0 1 0 0 0
55543- 0 0 0 0 0 0 0 0 0 0 0 0
55544- 0 0 0 0 0 0 0 0 0 0 0 0
55545- 0 0 0 0 0 0 0 0 0 0 0 0
55546- 0 0 0 0 0 0 0 0 0 0 0 0
55547- 0 0 0 0 0 0 0 0 0 14 14 14
55548- 42 42 42 86 86 86 6 6 6 116 116 116
55549-106 106 106 6 6 6 70 70 70 149 149 149
55550-128 128 128 18 18 18 38 38 38 54 54 54
55551-221 221 221 106 106 106 2 2 6 14 14 14
55552- 46 46 46 190 190 190 198 198 198 2 2 6
55553- 2 2 6 2 2 6 2 2 6 2 2 6
55554- 74 74 74 62 62 62 22 22 22 6 6 6
55555- 0 0 0 0 0 0 0 0 0 0 0 0
55556- 0 0 0 0 0 0 0 0 0 0 0 0
55557- 0 0 0 0 0 0 0 0 0 0 0 0
55558- 0 0 0 0 0 0 0 0 0 0 0 0
55559- 0 0 0 0 0 0 0 0 0 0 0 0
55560- 0 0 0 0 0 0 0 0 0 0 0 0
55561- 0 0 0 0 0 0 0 0 1 0 0 0
55562- 0 0 1 0 0 0 0 0 1 0 0 0
55563- 0 0 0 0 0 0 0 0 0 0 0 0
55564- 0 0 0 0 0 0 0 0 0 0 0 0
55565- 0 0 0 0 0 0 0 0 0 0 0 0
55566- 0 0 0 0 0 0 0 0 0 0 0 0
55567- 0 0 0 0 0 0 0 0 0 14 14 14
55568- 42 42 42 94 94 94 14 14 14 101 101 101
55569-128 128 128 2 2 6 18 18 18 116 116 116
55570-118 98 46 121 92 8 121 92 8 98 78 10
55571-162 162 162 106 106 106 2 2 6 2 2 6
55572- 2 2 6 195 195 195 195 195 195 6 6 6
55573- 2 2 6 2 2 6 2 2 6 2 2 6
55574- 74 74 74 62 62 62 22 22 22 6 6 6
55575- 0 0 0 0 0 0 0 0 0 0 0 0
55576- 0 0 0 0 0 0 0 0 0 0 0 0
55577- 0 0 0 0 0 0 0 0 0 0 0 0
55578- 0 0 0 0 0 0 0 0 0 0 0 0
55579- 0 0 0 0 0 0 0 0 0 0 0 0
55580- 0 0 0 0 0 0 0 0 0 0 0 0
55581- 0 0 0 0 0 0 0 0 1 0 0 1
55582- 0 0 1 0 0 0 0 0 1 0 0 0
55583- 0 0 0 0 0 0 0 0 0 0 0 0
55584- 0 0 0 0 0 0 0 0 0 0 0 0
55585- 0 0 0 0 0 0 0 0 0 0 0 0
55586- 0 0 0 0 0 0 0 0 0 0 0 0
55587- 0 0 0 0 0 0 0 0 0 10 10 10
55588- 38 38 38 90 90 90 14 14 14 58 58 58
55589-210 210 210 26 26 26 54 38 6 154 114 10
55590-226 170 11 236 186 11 225 175 15 184 144 12
55591-215 174 15 175 146 61 37 26 9 2 2 6
55592- 70 70 70 246 246 246 138 138 138 2 2 6
55593- 2 2 6 2 2 6 2 2 6 2 2 6
55594- 70 70 70 66 66 66 26 26 26 6 6 6
55595- 0 0 0 0 0 0 0 0 0 0 0 0
55596- 0 0 0 0 0 0 0 0 0 0 0 0
55597- 0 0 0 0 0 0 0 0 0 0 0 0
55598- 0 0 0 0 0 0 0 0 0 0 0 0
55599- 0 0 0 0 0 0 0 0 0 0 0 0
55600- 0 0 0 0 0 0 0 0 0 0 0 0
55601- 0 0 0 0 0 0 0 0 0 0 0 0
55602- 0 0 0 0 0 0 0 0 0 0 0 0
55603- 0 0 0 0 0 0 0 0 0 0 0 0
55604- 0 0 0 0 0 0 0 0 0 0 0 0
55605- 0 0 0 0 0 0 0 0 0 0 0 0
55606- 0 0 0 0 0 0 0 0 0 0 0 0
55607- 0 0 0 0 0 0 0 0 0 10 10 10
55608- 38 38 38 86 86 86 14 14 14 10 10 10
55609-195 195 195 188 164 115 192 133 9 225 175 15
55610-239 182 13 234 190 10 232 195 16 232 200 30
55611-245 207 45 241 208 19 232 195 16 184 144 12
55612-218 194 134 211 206 186 42 42 42 2 2 6
55613- 2 2 6 2 2 6 2 2 6 2 2 6
55614- 50 50 50 74 74 74 30 30 30 6 6 6
55615- 0 0 0 0 0 0 0 0 0 0 0 0
55616- 0 0 0 0 0 0 0 0 0 0 0 0
55617- 0 0 0 0 0 0 0 0 0 0 0 0
55618- 0 0 0 0 0 0 0 0 0 0 0 0
55619- 0 0 0 0 0 0 0 0 0 0 0 0
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 0 0 0 0
55622- 0 0 0 0 0 0 0 0 0 0 0 0
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 0 0 0 0
55625- 0 0 0 0 0 0 0 0 0 0 0 0
55626- 0 0 0 0 0 0 0 0 0 0 0 0
55627- 0 0 0 0 0 0 0 0 0 10 10 10
55628- 34 34 34 86 86 86 14 14 14 2 2 6
55629-121 87 25 192 133 9 219 162 10 239 182 13
55630-236 186 11 232 195 16 241 208 19 244 214 54
55631-246 218 60 246 218 38 246 215 20 241 208 19
55632-241 208 19 226 184 13 121 87 25 2 2 6
55633- 2 2 6 2 2 6 2 2 6 2 2 6
55634- 50 50 50 82 82 82 34 34 34 10 10 10
55635- 0 0 0 0 0 0 0 0 0 0 0 0
55636- 0 0 0 0 0 0 0 0 0 0 0 0
55637- 0 0 0 0 0 0 0 0 0 0 0 0
55638- 0 0 0 0 0 0 0 0 0 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 0 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 10 10 10
55648- 34 34 34 82 82 82 30 30 30 61 42 6
55649-180 123 7 206 145 10 230 174 11 239 182 13
55650-234 190 10 238 202 15 241 208 19 246 218 74
55651-246 218 38 246 215 20 246 215 20 246 215 20
55652-226 184 13 215 174 15 184 144 12 6 6 6
55653- 2 2 6 2 2 6 2 2 6 2 2 6
55654- 26 26 26 94 94 94 42 42 42 14 14 14
55655- 0 0 0 0 0 0 0 0 0 0 0 0
55656- 0 0 0 0 0 0 0 0 0 0 0 0
55657- 0 0 0 0 0 0 0 0 0 0 0 0
55658- 0 0 0 0 0 0 0 0 0 0 0 0
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 0 0 0
55665- 0 0 0 0 0 0 0 0 0 0 0 0
55666- 0 0 0 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 10 10 10
55668- 30 30 30 78 78 78 50 50 50 104 69 6
55669-192 133 9 216 158 10 236 178 12 236 186 11
55670-232 195 16 241 208 19 244 214 54 245 215 43
55671-246 215 20 246 215 20 241 208 19 198 155 10
55672-200 144 11 216 158 10 156 118 10 2 2 6
55673- 2 2 6 2 2 6 2 2 6 2 2 6
55674- 6 6 6 90 90 90 54 54 54 18 18 18
55675- 6 6 6 0 0 0 0 0 0 0 0 0
55676- 0 0 0 0 0 0 0 0 0 0 0 0
55677- 0 0 0 0 0 0 0 0 0 0 0 0
55678- 0 0 0 0 0 0 0 0 0 0 0 0
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 10 10 10
55688- 30 30 30 78 78 78 46 46 46 22 22 22
55689-137 92 6 210 162 10 239 182 13 238 190 10
55690-238 202 15 241 208 19 246 215 20 246 215 20
55691-241 208 19 203 166 17 185 133 11 210 150 10
55692-216 158 10 210 150 10 102 78 10 2 2 6
55693- 6 6 6 54 54 54 14 14 14 2 2 6
55694- 2 2 6 62 62 62 74 74 74 30 30 30
55695- 10 10 10 0 0 0 0 0 0 0 0 0
55696- 0 0 0 0 0 0 0 0 0 0 0 0
55697- 0 0 0 0 0 0 0 0 0 0 0 0
55698- 0 0 0 0 0 0 0 0 0 0 0 0
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 0 0 0 0 0 0 0 0 0 0
55705- 0 0 0 0 0 0 0 0 0 0 0 0
55706- 0 0 0 0 0 0 0 0 0 0 0 0
55707- 0 0 0 0 0 0 0 0 0 10 10 10
55708- 34 34 34 78 78 78 50 50 50 6 6 6
55709- 94 70 30 139 102 15 190 146 13 226 184 13
55710-232 200 30 232 195 16 215 174 15 190 146 13
55711-168 122 10 192 133 9 210 150 10 213 154 11
55712-202 150 34 182 157 106 101 98 89 2 2 6
55713- 2 2 6 78 78 78 116 116 116 58 58 58
55714- 2 2 6 22 22 22 90 90 90 46 46 46
55715- 18 18 18 6 6 6 0 0 0 0 0 0
55716- 0 0 0 0 0 0 0 0 0 0 0 0
55717- 0 0 0 0 0 0 0 0 0 0 0 0
55718- 0 0 0 0 0 0 0 0 0 0 0 0
55719- 0 0 0 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 10 10 10
55728- 38 38 38 86 86 86 50 50 50 6 6 6
55729-128 128 128 174 154 114 156 107 11 168 122 10
55730-198 155 10 184 144 12 197 138 11 200 144 11
55731-206 145 10 206 145 10 197 138 11 188 164 115
55732-195 195 195 198 198 198 174 174 174 14 14 14
55733- 2 2 6 22 22 22 116 116 116 116 116 116
55734- 22 22 22 2 2 6 74 74 74 70 70 70
55735- 30 30 30 10 10 10 0 0 0 0 0 0
55736- 0 0 0 0 0 0 0 0 0 0 0 0
55737- 0 0 0 0 0 0 0 0 0 0 0 0
55738- 0 0 0 0 0 0 0 0 0 0 0 0
55739- 0 0 0 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 0 0 0 0 0 0 0 0 0 0 0 0
55745- 0 0 0 0 0 0 0 0 0 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 6 6 6 18 18 18
55748- 50 50 50 101 101 101 26 26 26 10 10 10
55749-138 138 138 190 190 190 174 154 114 156 107 11
55750-197 138 11 200 144 11 197 138 11 192 133 9
55751-180 123 7 190 142 34 190 178 144 187 187 187
55752-202 202 202 221 221 221 214 214 214 66 66 66
55753- 2 2 6 2 2 6 50 50 50 62 62 62
55754- 6 6 6 2 2 6 10 10 10 90 90 90
55755- 50 50 50 18 18 18 6 6 6 0 0 0
55756- 0 0 0 0 0 0 0 0 0 0 0 0
55757- 0 0 0 0 0 0 0 0 0 0 0 0
55758- 0 0 0 0 0 0 0 0 0 0 0 0
55759- 0 0 0 0 0 0 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 0 0 0
55764- 0 0 0 0 0 0 0 0 0 0 0 0
55765- 0 0 0 0 0 0 0 0 0 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 10 10 10 34 34 34
55768- 74 74 74 74 74 74 2 2 6 6 6 6
55769-144 144 144 198 198 198 190 190 190 178 166 146
55770-154 121 60 156 107 11 156 107 11 168 124 44
55771-174 154 114 187 187 187 190 190 190 210 210 210
55772-246 246 246 253 253 253 253 253 253 182 182 182
55773- 6 6 6 2 2 6 2 2 6 2 2 6
55774- 2 2 6 2 2 6 2 2 6 62 62 62
55775- 74 74 74 34 34 34 14 14 14 0 0 0
55776- 0 0 0 0 0 0 0 0 0 0 0 0
55777- 0 0 0 0 0 0 0 0 0 0 0 0
55778- 0 0 0 0 0 0 0 0 0 0 0 0
55779- 0 0 0 0 0 0 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 0 0 0
55784- 0 0 0 0 0 0 0 0 0 0 0 0
55785- 0 0 0 0 0 0 0 0 0 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 10 10 10 22 22 22 54 54 54
55788- 94 94 94 18 18 18 2 2 6 46 46 46
55789-234 234 234 221 221 221 190 190 190 190 190 190
55790-190 190 190 187 187 187 187 187 187 190 190 190
55791-190 190 190 195 195 195 214 214 214 242 242 242
55792-253 253 253 253 253 253 253 253 253 253 253 253
55793- 82 82 82 2 2 6 2 2 6 2 2 6
55794- 2 2 6 2 2 6 2 2 6 14 14 14
55795- 86 86 86 54 54 54 22 22 22 6 6 6
55796- 0 0 0 0 0 0 0 0 0 0 0 0
55797- 0 0 0 0 0 0 0 0 0 0 0 0
55798- 0 0 0 0 0 0 0 0 0 0 0 0
55799- 0 0 0 0 0 0 0 0 0 0 0 0
55800- 0 0 0 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 0 0 0
55804- 0 0 0 0 0 0 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 6 6 6 18 18 18 46 46 46 90 90 90
55808- 46 46 46 18 18 18 6 6 6 182 182 182
55809-253 253 253 246 246 246 206 206 206 190 190 190
55810-190 190 190 190 190 190 190 190 190 190 190 190
55811-206 206 206 231 231 231 250 250 250 253 253 253
55812-253 253 253 253 253 253 253 253 253 253 253 253
55813-202 202 202 14 14 14 2 2 6 2 2 6
55814- 2 2 6 2 2 6 2 2 6 2 2 6
55815- 42 42 42 86 86 86 42 42 42 18 18 18
55816- 6 6 6 0 0 0 0 0 0 0 0 0
55817- 0 0 0 0 0 0 0 0 0 0 0 0
55818- 0 0 0 0 0 0 0 0 0 0 0 0
55819- 0 0 0 0 0 0 0 0 0 0 0 0
55820- 0 0 0 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 0 0 0
55824- 0 0 0 0 0 0 0 0 0 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 6 6 6
55827- 14 14 14 38 38 38 74 74 74 66 66 66
55828- 2 2 6 6 6 6 90 90 90 250 250 250
55829-253 253 253 253 253 253 238 238 238 198 198 198
55830-190 190 190 190 190 190 195 195 195 221 221 221
55831-246 246 246 253 253 253 253 253 253 253 253 253
55832-253 253 253 253 253 253 253 253 253 253 253 253
55833-253 253 253 82 82 82 2 2 6 2 2 6
55834- 2 2 6 2 2 6 2 2 6 2 2 6
55835- 2 2 6 78 78 78 70 70 70 34 34 34
55836- 14 14 14 6 6 6 0 0 0 0 0 0
55837- 0 0 0 0 0 0 0 0 0 0 0 0
55838- 0 0 0 0 0 0 0 0 0 0 0 0
55839- 0 0 0 0 0 0 0 0 0 0 0 0
55840- 0 0 0 0 0 0 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 0 0 0
55844- 0 0 0 0 0 0 0 0 0 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 14 14 14
55847- 34 34 34 66 66 66 78 78 78 6 6 6
55848- 2 2 6 18 18 18 218 218 218 253 253 253
55849-253 253 253 253 253 253 253 253 253 246 246 246
55850-226 226 226 231 231 231 246 246 246 253 253 253
55851-253 253 253 253 253 253 253 253 253 253 253 253
55852-253 253 253 253 253 253 253 253 253 253 253 253
55853-253 253 253 178 178 178 2 2 6 2 2 6
55854- 2 2 6 2 2 6 2 2 6 2 2 6
55855- 2 2 6 18 18 18 90 90 90 62 62 62
55856- 30 30 30 10 10 10 0 0 0 0 0 0
55857- 0 0 0 0 0 0 0 0 0 0 0 0
55858- 0 0 0 0 0 0 0 0 0 0 0 0
55859- 0 0 0 0 0 0 0 0 0 0 0 0
55860- 0 0 0 0 0 0 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 0 0 0
55864- 0 0 0 0 0 0 0 0 0 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 10 10 10 26 26 26
55867- 58 58 58 90 90 90 18 18 18 2 2 6
55868- 2 2 6 110 110 110 253 253 253 253 253 253
55869-253 253 253 253 253 253 253 253 253 253 253 253
55870-250 250 250 253 253 253 253 253 253 253 253 253
55871-253 253 253 253 253 253 253 253 253 253 253 253
55872-253 253 253 253 253 253 253 253 253 253 253 253
55873-253 253 253 231 231 231 18 18 18 2 2 6
55874- 2 2 6 2 2 6 2 2 6 2 2 6
55875- 2 2 6 2 2 6 18 18 18 94 94 94
55876- 54 54 54 26 26 26 10 10 10 0 0 0
55877- 0 0 0 0 0 0 0 0 0 0 0 0
55878- 0 0 0 0 0 0 0 0 0 0 0 0
55879- 0 0 0 0 0 0 0 0 0 0 0 0
55880- 0 0 0 0 0 0 0 0 0 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 0 0 0
55884- 0 0 0 0 0 0 0 0 0 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 6 6 6 22 22 22 50 50 50
55887- 90 90 90 26 26 26 2 2 6 2 2 6
55888- 14 14 14 195 195 195 250 250 250 253 253 253
55889-253 253 253 253 253 253 253 253 253 253 253 253
55890-253 253 253 253 253 253 253 253 253 253 253 253
55891-253 253 253 253 253 253 253 253 253 253 253 253
55892-253 253 253 253 253 253 253 253 253 253 253 253
55893-250 250 250 242 242 242 54 54 54 2 2 6
55894- 2 2 6 2 2 6 2 2 6 2 2 6
55895- 2 2 6 2 2 6 2 2 6 38 38 38
55896- 86 86 86 50 50 50 22 22 22 6 6 6
55897- 0 0 0 0 0 0 0 0 0 0 0 0
55898- 0 0 0 0 0 0 0 0 0 0 0 0
55899- 0 0 0 0 0 0 0 0 0 0 0 0
55900- 0 0 0 0 0 0 0 0 0 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 0 0 0
55904- 0 0 0 0 0 0 0 0 0 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 6 6 6 14 14 14 38 38 38 82 82 82
55907- 34 34 34 2 2 6 2 2 6 2 2 6
55908- 42 42 42 195 195 195 246 246 246 253 253 253
55909-253 253 253 253 253 253 253 253 253 250 250 250
55910-242 242 242 242 242 242 250 250 250 253 253 253
55911-253 253 253 253 253 253 253 253 253 253 253 253
55912-253 253 253 250 250 250 246 246 246 238 238 238
55913-226 226 226 231 231 231 101 101 101 6 6 6
55914- 2 2 6 2 2 6 2 2 6 2 2 6
55915- 2 2 6 2 2 6 2 2 6 2 2 6
55916- 38 38 38 82 82 82 42 42 42 14 14 14
55917- 6 6 6 0 0 0 0 0 0 0 0 0
55918- 0 0 0 0 0 0 0 0 0 0 0 0
55919- 0 0 0 0 0 0 0 0 0 0 0 0
55920- 0 0 0 0 0 0 0 0 0 0 0 0
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 0 0 0
55924- 0 0 0 0 0 0 0 0 0 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 10 10 10 26 26 26 62 62 62 66 66 66
55927- 2 2 6 2 2 6 2 2 6 6 6 6
55928- 70 70 70 170 170 170 206 206 206 234 234 234
55929-246 246 246 250 250 250 250 250 250 238 238 238
55930-226 226 226 231 231 231 238 238 238 250 250 250
55931-250 250 250 250 250 250 246 246 246 231 231 231
55932-214 214 214 206 206 206 202 202 202 202 202 202
55933-198 198 198 202 202 202 182 182 182 18 18 18
55934- 2 2 6 2 2 6 2 2 6 2 2 6
55935- 2 2 6 2 2 6 2 2 6 2 2 6
55936- 2 2 6 62 62 62 66 66 66 30 30 30
55937- 10 10 10 0 0 0 0 0 0 0 0 0
55938- 0 0 0 0 0 0 0 0 0 0 0 0
55939- 0 0 0 0 0 0 0 0 0 0 0 0
55940- 0 0 0 0 0 0 0 0 0 0 0 0
55941- 0 0 0 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 0 0 0 0
55944- 0 0 0 0 0 0 0 0 0 0 0 0
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 14 14 14 42 42 42 82 82 82 18 18 18
55947- 2 2 6 2 2 6 2 2 6 10 10 10
55948- 94 94 94 182 182 182 218 218 218 242 242 242
55949-250 250 250 253 253 253 253 253 253 250 250 250
55950-234 234 234 253 253 253 253 253 253 253 253 253
55951-253 253 253 253 253 253 253 253 253 246 246 246
55952-238 238 238 226 226 226 210 210 210 202 202 202
55953-195 195 195 195 195 195 210 210 210 158 158 158
55954- 6 6 6 14 14 14 50 50 50 14 14 14
55955- 2 2 6 2 2 6 2 2 6 2 2 6
55956- 2 2 6 6 6 6 86 86 86 46 46 46
55957- 18 18 18 6 6 6 0 0 0 0 0 0
55958- 0 0 0 0 0 0 0 0 0 0 0 0
55959- 0 0 0 0 0 0 0 0 0 0 0 0
55960- 0 0 0 0 0 0 0 0 0 0 0 0
55961- 0 0 0 0 0 0 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 0 0 0 0 0 0
55964- 0 0 0 0 0 0 0 0 0 0 0 0
55965- 0 0 0 0 0 0 0 0 0 6 6 6
55966- 22 22 22 54 54 54 70 70 70 2 2 6
55967- 2 2 6 10 10 10 2 2 6 22 22 22
55968-166 166 166 231 231 231 250 250 250 253 253 253
55969-253 253 253 253 253 253 253 253 253 250 250 250
55970-242 242 242 253 253 253 253 253 253 253 253 253
55971-253 253 253 253 253 253 253 253 253 253 253 253
55972-253 253 253 253 253 253 253 253 253 246 246 246
55973-231 231 231 206 206 206 198 198 198 226 226 226
55974- 94 94 94 2 2 6 6 6 6 38 38 38
55975- 30 30 30 2 2 6 2 2 6 2 2 6
55976- 2 2 6 2 2 6 62 62 62 66 66 66
55977- 26 26 26 10 10 10 0 0 0 0 0 0
55978- 0 0 0 0 0 0 0 0 0 0 0 0
55979- 0 0 0 0 0 0 0 0 0 0 0 0
55980- 0 0 0 0 0 0 0 0 0 0 0 0
55981- 0 0 0 0 0 0 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 0 0 0 0 0 0
55984- 0 0 0 0 0 0 0 0 0 0 0 0
55985- 0 0 0 0 0 0 0 0 0 10 10 10
55986- 30 30 30 74 74 74 50 50 50 2 2 6
55987- 26 26 26 26 26 26 2 2 6 106 106 106
55988-238 238 238 253 253 253 253 253 253 253 253 253
55989-253 253 253 253 253 253 253 253 253 253 253 253
55990-253 253 253 253 253 253 253 253 253 253 253 253
55991-253 253 253 253 253 253 253 253 253 253 253 253
55992-253 253 253 253 253 253 253 253 253 253 253 253
55993-253 253 253 246 246 246 218 218 218 202 202 202
55994-210 210 210 14 14 14 2 2 6 2 2 6
55995- 30 30 30 22 22 22 2 2 6 2 2 6
55996- 2 2 6 2 2 6 18 18 18 86 86 86
55997- 42 42 42 14 14 14 0 0 0 0 0 0
55998- 0 0 0 0 0 0 0 0 0 0 0 0
55999- 0 0 0 0 0 0 0 0 0 0 0 0
56000- 0 0 0 0 0 0 0 0 0 0 0 0
56001- 0 0 0 0 0 0 0 0 0 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 0 0 0 0 0 0 0 0 0
56004- 0 0 0 0 0 0 0 0 0 0 0 0
56005- 0 0 0 0 0 0 0 0 0 14 14 14
56006- 42 42 42 90 90 90 22 22 22 2 2 6
56007- 42 42 42 2 2 6 18 18 18 218 218 218
56008-253 253 253 253 253 253 253 253 253 253 253 253
56009-253 253 253 253 253 253 253 253 253 253 253 253
56010-253 253 253 253 253 253 253 253 253 253 253 253
56011-253 253 253 253 253 253 253 253 253 253 253 253
56012-253 253 253 253 253 253 253 253 253 253 253 253
56013-253 253 253 253 253 253 250 250 250 221 221 221
56014-218 218 218 101 101 101 2 2 6 14 14 14
56015- 18 18 18 38 38 38 10 10 10 2 2 6
56016- 2 2 6 2 2 6 2 2 6 78 78 78
56017- 58 58 58 22 22 22 6 6 6 0 0 0
56018- 0 0 0 0 0 0 0 0 0 0 0 0
56019- 0 0 0 0 0 0 0 0 0 0 0 0
56020- 0 0 0 0 0 0 0 0 0 0 0 0
56021- 0 0 0 0 0 0 0 0 0 0 0 0
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 0 0 0 0 0 0 0 0 0 0 0 0
56024- 0 0 0 0 0 0 0 0 0 0 0 0
56025- 0 0 0 0 0 0 6 6 6 18 18 18
56026- 54 54 54 82 82 82 2 2 6 26 26 26
56027- 22 22 22 2 2 6 123 123 123 253 253 253
56028-253 253 253 253 253 253 253 253 253 253 253 253
56029-253 253 253 253 253 253 253 253 253 253 253 253
56030-253 253 253 253 253 253 253 253 253 253 253 253
56031-253 253 253 253 253 253 253 253 253 253 253 253
56032-253 253 253 253 253 253 253 253 253 253 253 253
56033-253 253 253 253 253 253 253 253 253 250 250 250
56034-238 238 238 198 198 198 6 6 6 38 38 38
56035- 58 58 58 26 26 26 38 38 38 2 2 6
56036- 2 2 6 2 2 6 2 2 6 46 46 46
56037- 78 78 78 30 30 30 10 10 10 0 0 0
56038- 0 0 0 0 0 0 0 0 0 0 0 0
56039- 0 0 0 0 0 0 0 0 0 0 0 0
56040- 0 0 0 0 0 0 0 0 0 0 0 0
56041- 0 0 0 0 0 0 0 0 0 0 0 0
56042- 0 0 0 0 0 0 0 0 0 0 0 0
56043- 0 0 0 0 0 0 0 0 0 0 0 0
56044- 0 0 0 0 0 0 0 0 0 0 0 0
56045- 0 0 0 0 0 0 10 10 10 30 30 30
56046- 74 74 74 58 58 58 2 2 6 42 42 42
56047- 2 2 6 22 22 22 231 231 231 253 253 253
56048-253 253 253 253 253 253 253 253 253 253 253 253
56049-253 253 253 253 253 253 253 253 253 250 250 250
56050-253 253 253 253 253 253 253 253 253 253 253 253
56051-253 253 253 253 253 253 253 253 253 253 253 253
56052-253 253 253 253 253 253 253 253 253 253 253 253
56053-253 253 253 253 253 253 253 253 253 253 253 253
56054-253 253 253 246 246 246 46 46 46 38 38 38
56055- 42 42 42 14 14 14 38 38 38 14 14 14
56056- 2 2 6 2 2 6 2 2 6 6 6 6
56057- 86 86 86 46 46 46 14 14 14 0 0 0
56058- 0 0 0 0 0 0 0 0 0 0 0 0
56059- 0 0 0 0 0 0 0 0 0 0 0 0
56060- 0 0 0 0 0 0 0 0 0 0 0 0
56061- 0 0 0 0 0 0 0 0 0 0 0 0
56062- 0 0 0 0 0 0 0 0 0 0 0 0
56063- 0 0 0 0 0 0 0 0 0 0 0 0
56064- 0 0 0 0 0 0 0 0 0 0 0 0
56065- 0 0 0 6 6 6 14 14 14 42 42 42
56066- 90 90 90 18 18 18 18 18 18 26 26 26
56067- 2 2 6 116 116 116 253 253 253 253 253 253
56068-253 253 253 253 253 253 253 253 253 253 253 253
56069-253 253 253 253 253 253 250 250 250 238 238 238
56070-253 253 253 253 253 253 253 253 253 253 253 253
56071-253 253 253 253 253 253 253 253 253 253 253 253
56072-253 253 253 253 253 253 253 253 253 253 253 253
56073-253 253 253 253 253 253 253 253 253 253 253 253
56074-253 253 253 253 253 253 94 94 94 6 6 6
56075- 2 2 6 2 2 6 10 10 10 34 34 34
56076- 2 2 6 2 2 6 2 2 6 2 2 6
56077- 74 74 74 58 58 58 22 22 22 6 6 6
56078- 0 0 0 0 0 0 0 0 0 0 0 0
56079- 0 0 0 0 0 0 0 0 0 0 0 0
56080- 0 0 0 0 0 0 0 0 0 0 0 0
56081- 0 0 0 0 0 0 0 0 0 0 0 0
56082- 0 0 0 0 0 0 0 0 0 0 0 0
56083- 0 0 0 0 0 0 0 0 0 0 0 0
56084- 0 0 0 0 0 0 0 0 0 0 0 0
56085- 0 0 0 10 10 10 26 26 26 66 66 66
56086- 82 82 82 2 2 6 38 38 38 6 6 6
56087- 14 14 14 210 210 210 253 253 253 253 253 253
56088-253 253 253 253 253 253 253 253 253 253 253 253
56089-253 253 253 253 253 253 246 246 246 242 242 242
56090-253 253 253 253 253 253 253 253 253 253 253 253
56091-253 253 253 253 253 253 253 253 253 253 253 253
56092-253 253 253 253 253 253 253 253 253 253 253 253
56093-253 253 253 253 253 253 253 253 253 253 253 253
56094-253 253 253 253 253 253 144 144 144 2 2 6
56095- 2 2 6 2 2 6 2 2 6 46 46 46
56096- 2 2 6 2 2 6 2 2 6 2 2 6
56097- 42 42 42 74 74 74 30 30 30 10 10 10
56098- 0 0 0 0 0 0 0 0 0 0 0 0
56099- 0 0 0 0 0 0 0 0 0 0 0 0
56100- 0 0 0 0 0 0 0 0 0 0 0 0
56101- 0 0 0 0 0 0 0 0 0 0 0 0
56102- 0 0 0 0 0 0 0 0 0 0 0 0
56103- 0 0 0 0 0 0 0 0 0 0 0 0
56104- 0 0 0 0 0 0 0 0 0 0 0 0
56105- 6 6 6 14 14 14 42 42 42 90 90 90
56106- 26 26 26 6 6 6 42 42 42 2 2 6
56107- 74 74 74 250 250 250 253 253 253 253 253 253
56108-253 253 253 253 253 253 253 253 253 253 253 253
56109-253 253 253 253 253 253 242 242 242 242 242 242
56110-253 253 253 253 253 253 253 253 253 253 253 253
56111-253 253 253 253 253 253 253 253 253 253 253 253
56112-253 253 253 253 253 253 253 253 253 253 253 253
56113-253 253 253 253 253 253 253 253 253 253 253 253
56114-253 253 253 253 253 253 182 182 182 2 2 6
56115- 2 2 6 2 2 6 2 2 6 46 46 46
56116- 2 2 6 2 2 6 2 2 6 2 2 6
56117- 10 10 10 86 86 86 38 38 38 10 10 10
56118- 0 0 0 0 0 0 0 0 0 0 0 0
56119- 0 0 0 0 0 0 0 0 0 0 0 0
56120- 0 0 0 0 0 0 0 0 0 0 0 0
56121- 0 0 0 0 0 0 0 0 0 0 0 0
56122- 0 0 0 0 0 0 0 0 0 0 0 0
56123- 0 0 0 0 0 0 0 0 0 0 0 0
56124- 0 0 0 0 0 0 0 0 0 0 0 0
56125- 10 10 10 26 26 26 66 66 66 82 82 82
56126- 2 2 6 22 22 22 18 18 18 2 2 6
56127-149 149 149 253 253 253 253 253 253 253 253 253
56128-253 253 253 253 253 253 253 253 253 253 253 253
56129-253 253 253 253 253 253 234 234 234 242 242 242
56130-253 253 253 253 253 253 253 253 253 253 253 253
56131-253 253 253 253 253 253 253 253 253 253 253 253
56132-253 253 253 253 253 253 253 253 253 253 253 253
56133-253 253 253 253 253 253 253 253 253 253 253 253
56134-253 253 253 253 253 253 206 206 206 2 2 6
56135- 2 2 6 2 2 6 2 2 6 38 38 38
56136- 2 2 6 2 2 6 2 2 6 2 2 6
56137- 6 6 6 86 86 86 46 46 46 14 14 14
56138- 0 0 0 0 0 0 0 0 0 0 0 0
56139- 0 0 0 0 0 0 0 0 0 0 0 0
56140- 0 0 0 0 0 0 0 0 0 0 0 0
56141- 0 0 0 0 0 0 0 0 0 0 0 0
56142- 0 0 0 0 0 0 0 0 0 0 0 0
56143- 0 0 0 0 0 0 0 0 0 0 0 0
56144- 0 0 0 0 0 0 0 0 0 6 6 6
56145- 18 18 18 46 46 46 86 86 86 18 18 18
56146- 2 2 6 34 34 34 10 10 10 6 6 6
56147-210 210 210 253 253 253 253 253 253 253 253 253
56148-253 253 253 253 253 253 253 253 253 253 253 253
56149-253 253 253 253 253 253 234 234 234 242 242 242
56150-253 253 253 253 253 253 253 253 253 253 253 253
56151-253 253 253 253 253 253 253 253 253 253 253 253
56152-253 253 253 253 253 253 253 253 253 253 253 253
56153-253 253 253 253 253 253 253 253 253 253 253 253
56154-253 253 253 253 253 253 221 221 221 6 6 6
56155- 2 2 6 2 2 6 6 6 6 30 30 30
56156- 2 2 6 2 2 6 2 2 6 2 2 6
56157- 2 2 6 82 82 82 54 54 54 18 18 18
56158- 6 6 6 0 0 0 0 0 0 0 0 0
56159- 0 0 0 0 0 0 0 0 0 0 0 0
56160- 0 0 0 0 0 0 0 0 0 0 0 0
56161- 0 0 0 0 0 0 0 0 0 0 0 0
56162- 0 0 0 0 0 0 0 0 0 0 0 0
56163- 0 0 0 0 0 0 0 0 0 0 0 0
56164- 0 0 0 0 0 0 0 0 0 10 10 10
56165- 26 26 26 66 66 66 62 62 62 2 2 6
56166- 2 2 6 38 38 38 10 10 10 26 26 26
56167-238 238 238 253 253 253 253 253 253 253 253 253
56168-253 253 253 253 253 253 253 253 253 253 253 253
56169-253 253 253 253 253 253 231 231 231 238 238 238
56170-253 253 253 253 253 253 253 253 253 253 253 253
56171-253 253 253 253 253 253 253 253 253 253 253 253
56172-253 253 253 253 253 253 253 253 253 253 253 253
56173-253 253 253 253 253 253 253 253 253 253 253 253
56174-253 253 253 253 253 253 231 231 231 6 6 6
56175- 2 2 6 2 2 6 10 10 10 30 30 30
56176- 2 2 6 2 2 6 2 2 6 2 2 6
56177- 2 2 6 66 66 66 58 58 58 22 22 22
56178- 6 6 6 0 0 0 0 0 0 0 0 0
56179- 0 0 0 0 0 0 0 0 0 0 0 0
56180- 0 0 0 0 0 0 0 0 0 0 0 0
56181- 0 0 0 0 0 0 0 0 0 0 0 0
56182- 0 0 0 0 0 0 0 0 0 0 0 0
56183- 0 0 0 0 0 0 0 0 0 0 0 0
56184- 0 0 0 0 0 0 0 0 0 10 10 10
56185- 38 38 38 78 78 78 6 6 6 2 2 6
56186- 2 2 6 46 46 46 14 14 14 42 42 42
56187-246 246 246 253 253 253 253 253 253 253 253 253
56188-253 253 253 253 253 253 253 253 253 253 253 253
56189-253 253 253 253 253 253 231 231 231 242 242 242
56190-253 253 253 253 253 253 253 253 253 253 253 253
56191-253 253 253 253 253 253 253 253 253 253 253 253
56192-253 253 253 253 253 253 253 253 253 253 253 253
56193-253 253 253 253 253 253 253 253 253 253 253 253
56194-253 253 253 253 253 253 234 234 234 10 10 10
56195- 2 2 6 2 2 6 22 22 22 14 14 14
56196- 2 2 6 2 2 6 2 2 6 2 2 6
56197- 2 2 6 66 66 66 62 62 62 22 22 22
56198- 6 6 6 0 0 0 0 0 0 0 0 0
56199- 0 0 0 0 0 0 0 0 0 0 0 0
56200- 0 0 0 0 0 0 0 0 0 0 0 0
56201- 0 0 0 0 0 0 0 0 0 0 0 0
56202- 0 0 0 0 0 0 0 0 0 0 0 0
56203- 0 0 0 0 0 0 0 0 0 0 0 0
56204- 0 0 0 0 0 0 6 6 6 18 18 18
56205- 50 50 50 74 74 74 2 2 6 2 2 6
56206- 14 14 14 70 70 70 34 34 34 62 62 62
56207-250 250 250 253 253 253 253 253 253 253 253 253
56208-253 253 253 253 253 253 253 253 253 253 253 253
56209-253 253 253 253 253 253 231 231 231 246 246 246
56210-253 253 253 253 253 253 253 253 253 253 253 253
56211-253 253 253 253 253 253 253 253 253 253 253 253
56212-253 253 253 253 253 253 253 253 253 253 253 253
56213-253 253 253 253 253 253 253 253 253 253 253 253
56214-253 253 253 253 253 253 234 234 234 14 14 14
56215- 2 2 6 2 2 6 30 30 30 2 2 6
56216- 2 2 6 2 2 6 2 2 6 2 2 6
56217- 2 2 6 66 66 66 62 62 62 22 22 22
56218- 6 6 6 0 0 0 0 0 0 0 0 0
56219- 0 0 0 0 0 0 0 0 0 0 0 0
56220- 0 0 0 0 0 0 0 0 0 0 0 0
56221- 0 0 0 0 0 0 0 0 0 0 0 0
56222- 0 0 0 0 0 0 0 0 0 0 0 0
56223- 0 0 0 0 0 0 0 0 0 0 0 0
56224- 0 0 0 0 0 0 6 6 6 18 18 18
56225- 54 54 54 62 62 62 2 2 6 2 2 6
56226- 2 2 6 30 30 30 46 46 46 70 70 70
56227-250 250 250 253 253 253 253 253 253 253 253 253
56228-253 253 253 253 253 253 253 253 253 253 253 253
56229-253 253 253 253 253 253 231 231 231 246 246 246
56230-253 253 253 253 253 253 253 253 253 253 253 253
56231-253 253 253 253 253 253 253 253 253 253 253 253
56232-253 253 253 253 253 253 253 253 253 253 253 253
56233-253 253 253 253 253 253 253 253 253 253 253 253
56234-253 253 253 253 253 253 226 226 226 10 10 10
56235- 2 2 6 6 6 6 30 30 30 2 2 6
56236- 2 2 6 2 2 6 2 2 6 2 2 6
56237- 2 2 6 66 66 66 58 58 58 22 22 22
56238- 6 6 6 0 0 0 0 0 0 0 0 0
56239- 0 0 0 0 0 0 0 0 0 0 0 0
56240- 0 0 0 0 0 0 0 0 0 0 0 0
56241- 0 0 0 0 0 0 0 0 0 0 0 0
56242- 0 0 0 0 0 0 0 0 0 0 0 0
56243- 0 0 0 0 0 0 0 0 0 0 0 0
56244- 0 0 0 0 0 0 6 6 6 22 22 22
56245- 58 58 58 62 62 62 2 2 6 2 2 6
56246- 2 2 6 2 2 6 30 30 30 78 78 78
56247-250 250 250 253 253 253 253 253 253 253 253 253
56248-253 253 253 253 253 253 253 253 253 253 253 253
56249-253 253 253 253 253 253 231 231 231 246 246 246
56250-253 253 253 253 253 253 253 253 253 253 253 253
56251-253 253 253 253 253 253 253 253 253 253 253 253
56252-253 253 253 253 253 253 253 253 253 253 253 253
56253-253 253 253 253 253 253 253 253 253 253 253 253
56254-253 253 253 253 253 253 206 206 206 2 2 6
56255- 22 22 22 34 34 34 18 14 6 22 22 22
56256- 26 26 26 18 18 18 6 6 6 2 2 6
56257- 2 2 6 82 82 82 54 54 54 18 18 18
56258- 6 6 6 0 0 0 0 0 0 0 0 0
56259- 0 0 0 0 0 0 0 0 0 0 0 0
56260- 0 0 0 0 0 0 0 0 0 0 0 0
56261- 0 0 0 0 0 0 0 0 0 0 0 0
56262- 0 0 0 0 0 0 0 0 0 0 0 0
56263- 0 0 0 0 0 0 0 0 0 0 0 0
56264- 0 0 0 0 0 0 6 6 6 26 26 26
56265- 62 62 62 106 106 106 74 54 14 185 133 11
56266-210 162 10 121 92 8 6 6 6 62 62 62
56267-238 238 238 253 253 253 253 253 253 253 253 253
56268-253 253 253 253 253 253 253 253 253 253 253 253
56269-253 253 253 253 253 253 231 231 231 246 246 246
56270-253 253 253 253 253 253 253 253 253 253 253 253
56271-253 253 253 253 253 253 253 253 253 253 253 253
56272-253 253 253 253 253 253 253 253 253 253 253 253
56273-253 253 253 253 253 253 253 253 253 253 253 253
56274-253 253 253 253 253 253 158 158 158 18 18 18
56275- 14 14 14 2 2 6 2 2 6 2 2 6
56276- 6 6 6 18 18 18 66 66 66 38 38 38
56277- 6 6 6 94 94 94 50 50 50 18 18 18
56278- 6 6 6 0 0 0 0 0 0 0 0 0
56279- 0 0 0 0 0 0 0 0 0 0 0 0
56280- 0 0 0 0 0 0 0 0 0 0 0 0
56281- 0 0 0 0 0 0 0 0 0 0 0 0
56282- 0 0 0 0 0 0 0 0 0 0 0 0
56283- 0 0 0 0 0 0 0 0 0 6 6 6
56284- 10 10 10 10 10 10 18 18 18 38 38 38
56285- 78 78 78 142 134 106 216 158 10 242 186 14
56286-246 190 14 246 190 14 156 118 10 10 10 10
56287- 90 90 90 238 238 238 253 253 253 253 253 253
56288-253 253 253 253 253 253 253 253 253 253 253 253
56289-253 253 253 253 253 253 231 231 231 250 250 250
56290-253 253 253 253 253 253 253 253 253 253 253 253
56291-253 253 253 253 253 253 253 253 253 253 253 253
56292-253 253 253 253 253 253 253 253 253 253 253 253
56293-253 253 253 253 253 253 253 253 253 246 230 190
56294-238 204 91 238 204 91 181 142 44 37 26 9
56295- 2 2 6 2 2 6 2 2 6 2 2 6
56296- 2 2 6 2 2 6 38 38 38 46 46 46
56297- 26 26 26 106 106 106 54 54 54 18 18 18
56298- 6 6 6 0 0 0 0 0 0 0 0 0
56299- 0 0 0 0 0 0 0 0 0 0 0 0
56300- 0 0 0 0 0 0 0 0 0 0 0 0
56301- 0 0 0 0 0 0 0 0 0 0 0 0
56302- 0 0 0 0 0 0 0 0 0 0 0 0
56303- 0 0 0 6 6 6 14 14 14 22 22 22
56304- 30 30 30 38 38 38 50 50 50 70 70 70
56305-106 106 106 190 142 34 226 170 11 242 186 14
56306-246 190 14 246 190 14 246 190 14 154 114 10
56307- 6 6 6 74 74 74 226 226 226 253 253 253
56308-253 253 253 253 253 253 253 253 253 253 253 253
56309-253 253 253 253 253 253 231 231 231 250 250 250
56310-253 253 253 253 253 253 253 253 253 253 253 253
56311-253 253 253 253 253 253 253 253 253 253 253 253
56312-253 253 253 253 253 253 253 253 253 253 253 253
56313-253 253 253 253 253 253 253 253 253 228 184 62
56314-241 196 14 241 208 19 232 195 16 38 30 10
56315- 2 2 6 2 2 6 2 2 6 2 2 6
56316- 2 2 6 6 6 6 30 30 30 26 26 26
56317-203 166 17 154 142 90 66 66 66 26 26 26
56318- 6 6 6 0 0 0 0 0 0 0 0 0
56319- 0 0 0 0 0 0 0 0 0 0 0 0
56320- 0 0 0 0 0 0 0 0 0 0 0 0
56321- 0 0 0 0 0 0 0 0 0 0 0 0
56322- 0 0 0 0 0 0 0 0 0 0 0 0
56323- 6 6 6 18 18 18 38 38 38 58 58 58
56324- 78 78 78 86 86 86 101 101 101 123 123 123
56325-175 146 61 210 150 10 234 174 13 246 186 14
56326-246 190 14 246 190 14 246 190 14 238 190 10
56327-102 78 10 2 2 6 46 46 46 198 198 198
56328-253 253 253 253 253 253 253 253 253 253 253 253
56329-253 253 253 253 253 253 234 234 234 242 242 242
56330-253 253 253 253 253 253 253 253 253 253 253 253
56331-253 253 253 253 253 253 253 253 253 253 253 253
56332-253 253 253 253 253 253 253 253 253 253 253 253
56333-253 253 253 253 253 253 253 253 253 224 178 62
56334-242 186 14 241 196 14 210 166 10 22 18 6
56335- 2 2 6 2 2 6 2 2 6 2 2 6
56336- 2 2 6 2 2 6 6 6 6 121 92 8
56337-238 202 15 232 195 16 82 82 82 34 34 34
56338- 10 10 10 0 0 0 0 0 0 0 0 0
56339- 0 0 0 0 0 0 0 0 0 0 0 0
56340- 0 0 0 0 0 0 0 0 0 0 0 0
56341- 0 0 0 0 0 0 0 0 0 0 0 0
56342- 0 0 0 0 0 0 0 0 0 0 0 0
56343- 14 14 14 38 38 38 70 70 70 154 122 46
56344-190 142 34 200 144 11 197 138 11 197 138 11
56345-213 154 11 226 170 11 242 186 14 246 190 14
56346-246 190 14 246 190 14 246 190 14 246 190 14
56347-225 175 15 46 32 6 2 2 6 22 22 22
56348-158 158 158 250 250 250 253 253 253 253 253 253
56349-253 253 253 253 253 253 253 253 253 253 253 253
56350-253 253 253 253 253 253 253 253 253 253 253 253
56351-253 253 253 253 253 253 253 253 253 253 253 253
56352-253 253 253 253 253 253 253 253 253 253 253 253
56353-253 253 253 250 250 250 242 242 242 224 178 62
56354-239 182 13 236 186 11 213 154 11 46 32 6
56355- 2 2 6 2 2 6 2 2 6 2 2 6
56356- 2 2 6 2 2 6 61 42 6 225 175 15
56357-238 190 10 236 186 11 112 100 78 42 42 42
56358- 14 14 14 0 0 0 0 0 0 0 0 0
56359- 0 0 0 0 0 0 0 0 0 0 0 0
56360- 0 0 0 0 0 0 0 0 0 0 0 0
56361- 0 0 0 0 0 0 0 0 0 0 0 0
56362- 0 0 0 0 0 0 0 0 0 6 6 6
56363- 22 22 22 54 54 54 154 122 46 213 154 11
56364-226 170 11 230 174 11 226 170 11 226 170 11
56365-236 178 12 242 186 14 246 190 14 246 190 14
56366-246 190 14 246 190 14 246 190 14 246 190 14
56367-241 196 14 184 144 12 10 10 10 2 2 6
56368- 6 6 6 116 116 116 242 242 242 253 253 253
56369-253 253 253 253 253 253 253 253 253 253 253 253
56370-253 253 253 253 253 253 253 253 253 253 253 253
56371-253 253 253 253 253 253 253 253 253 253 253 253
56372-253 253 253 253 253 253 253 253 253 253 253 253
56373-253 253 253 231 231 231 198 198 198 214 170 54
56374-236 178 12 236 178 12 210 150 10 137 92 6
56375- 18 14 6 2 2 6 2 2 6 2 2 6
56376- 6 6 6 70 47 6 200 144 11 236 178 12
56377-239 182 13 239 182 13 124 112 88 58 58 58
56378- 22 22 22 6 6 6 0 0 0 0 0 0
56379- 0 0 0 0 0 0 0 0 0 0 0 0
56380- 0 0 0 0 0 0 0 0 0 0 0 0
56381- 0 0 0 0 0 0 0 0 0 0 0 0
56382- 0 0 0 0 0 0 0 0 0 10 10 10
56383- 30 30 30 70 70 70 180 133 36 226 170 11
56384-239 182 13 242 186 14 242 186 14 246 186 14
56385-246 190 14 246 190 14 246 190 14 246 190 14
56386-246 190 14 246 190 14 246 190 14 246 190 14
56387-246 190 14 232 195 16 98 70 6 2 2 6
56388- 2 2 6 2 2 6 66 66 66 221 221 221
56389-253 253 253 253 253 253 253 253 253 253 253 253
56390-253 253 253 253 253 253 253 253 253 253 253 253
56391-253 253 253 253 253 253 253 253 253 253 253 253
56392-253 253 253 253 253 253 253 253 253 253 253 253
56393-253 253 253 206 206 206 198 198 198 214 166 58
56394-230 174 11 230 174 11 216 158 10 192 133 9
56395-163 110 8 116 81 8 102 78 10 116 81 8
56396-167 114 7 197 138 11 226 170 11 239 182 13
56397-242 186 14 242 186 14 162 146 94 78 78 78
56398- 34 34 34 14 14 14 6 6 6 0 0 0
56399- 0 0 0 0 0 0 0 0 0 0 0 0
56400- 0 0 0 0 0 0 0 0 0 0 0 0
56401- 0 0 0 0 0 0 0 0 0 0 0 0
56402- 0 0 0 0 0 0 0 0 0 6 6 6
56403- 30 30 30 78 78 78 190 142 34 226 170 11
56404-239 182 13 246 190 14 246 190 14 246 190 14
56405-246 190 14 246 190 14 246 190 14 246 190 14
56406-246 190 14 246 190 14 246 190 14 246 190 14
56407-246 190 14 241 196 14 203 166 17 22 18 6
56408- 2 2 6 2 2 6 2 2 6 38 38 38
56409-218 218 218 253 253 253 253 253 253 253 253 253
56410-253 253 253 253 253 253 253 253 253 253 253 253
56411-253 253 253 253 253 253 253 253 253 253 253 253
56412-253 253 253 253 253 253 253 253 253 253 253 253
56413-250 250 250 206 206 206 198 198 198 202 162 69
56414-226 170 11 236 178 12 224 166 10 210 150 10
56415-200 144 11 197 138 11 192 133 9 197 138 11
56416-210 150 10 226 170 11 242 186 14 246 190 14
56417-246 190 14 246 186 14 225 175 15 124 112 88
56418- 62 62 62 30 30 30 14 14 14 6 6 6
56419- 0 0 0 0 0 0 0 0 0 0 0 0
56420- 0 0 0 0 0 0 0 0 0 0 0 0
56421- 0 0 0 0 0 0 0 0 0 0 0 0
56422- 0 0 0 0 0 0 0 0 0 10 10 10
56423- 30 30 30 78 78 78 174 135 50 224 166 10
56424-239 182 13 246 190 14 246 190 14 246 190 14
56425-246 190 14 246 190 14 246 190 14 246 190 14
56426-246 190 14 246 190 14 246 190 14 246 190 14
56427-246 190 14 246 190 14 241 196 14 139 102 15
56428- 2 2 6 2 2 6 2 2 6 2 2 6
56429- 78 78 78 250 250 250 253 253 253 253 253 253
56430-253 253 253 253 253 253 253 253 253 253 253 253
56431-253 253 253 253 253 253 253 253 253 253 253 253
56432-253 253 253 253 253 253 253 253 253 253 253 253
56433-250 250 250 214 214 214 198 198 198 190 150 46
56434-219 162 10 236 178 12 234 174 13 224 166 10
56435-216 158 10 213 154 11 213 154 11 216 158 10
56436-226 170 11 239 182 13 246 190 14 246 190 14
56437-246 190 14 246 190 14 242 186 14 206 162 42
56438-101 101 101 58 58 58 30 30 30 14 14 14
56439- 6 6 6 0 0 0 0 0 0 0 0 0
56440- 0 0 0 0 0 0 0 0 0 0 0 0
56441- 0 0 0 0 0 0 0 0 0 0 0 0
56442- 0 0 0 0 0 0 0 0 0 10 10 10
56443- 30 30 30 74 74 74 174 135 50 216 158 10
56444-236 178 12 246 190 14 246 190 14 246 190 14
56445-246 190 14 246 190 14 246 190 14 246 190 14
56446-246 190 14 246 190 14 246 190 14 246 190 14
56447-246 190 14 246 190 14 241 196 14 226 184 13
56448- 61 42 6 2 2 6 2 2 6 2 2 6
56449- 22 22 22 238 238 238 253 253 253 253 253 253
56450-253 253 253 253 253 253 253 253 253 253 253 253
56451-253 253 253 253 253 253 253 253 253 253 253 253
56452-253 253 253 253 253 253 253 253 253 253 253 253
56453-253 253 253 226 226 226 187 187 187 180 133 36
56454-216 158 10 236 178 12 239 182 13 236 178 12
56455-230 174 11 226 170 11 226 170 11 230 174 11
56456-236 178 12 242 186 14 246 190 14 246 190 14
56457-246 190 14 246 190 14 246 186 14 239 182 13
56458-206 162 42 106 106 106 66 66 66 34 34 34
56459- 14 14 14 6 6 6 0 0 0 0 0 0
56460- 0 0 0 0 0 0 0 0 0 0 0 0
56461- 0 0 0 0 0 0 0 0 0 0 0 0
56462- 0 0 0 0 0 0 0 0 0 6 6 6
56463- 26 26 26 70 70 70 163 133 67 213 154 11
56464-236 178 12 246 190 14 246 190 14 246 190 14
56465-246 190 14 246 190 14 246 190 14 246 190 14
56466-246 190 14 246 190 14 246 190 14 246 190 14
56467-246 190 14 246 190 14 246 190 14 241 196 14
56468-190 146 13 18 14 6 2 2 6 2 2 6
56469- 46 46 46 246 246 246 253 253 253 253 253 253
56470-253 253 253 253 253 253 253 253 253 253 253 253
56471-253 253 253 253 253 253 253 253 253 253 253 253
56472-253 253 253 253 253 253 253 253 253 253 253 253
56473-253 253 253 221 221 221 86 86 86 156 107 11
56474-216 158 10 236 178 12 242 186 14 246 186 14
56475-242 186 14 239 182 13 239 182 13 242 186 14
56476-242 186 14 246 186 14 246 190 14 246 190 14
56477-246 190 14 246 190 14 246 190 14 246 190 14
56478-242 186 14 225 175 15 142 122 72 66 66 66
56479- 30 30 30 10 10 10 0 0 0 0 0 0
56480- 0 0 0 0 0 0 0 0 0 0 0 0
56481- 0 0 0 0 0 0 0 0 0 0 0 0
56482- 0 0 0 0 0 0 0 0 0 6 6 6
56483- 26 26 26 70 70 70 163 133 67 210 150 10
56484-236 178 12 246 190 14 246 190 14 246 190 14
56485-246 190 14 246 190 14 246 190 14 246 190 14
56486-246 190 14 246 190 14 246 190 14 246 190 14
56487-246 190 14 246 190 14 246 190 14 246 190 14
56488-232 195 16 121 92 8 34 34 34 106 106 106
56489-221 221 221 253 253 253 253 253 253 253 253 253
56490-253 253 253 253 253 253 253 253 253 253 253 253
56491-253 253 253 253 253 253 253 253 253 253 253 253
56492-253 253 253 253 253 253 253 253 253 253 253 253
56493-242 242 242 82 82 82 18 14 6 163 110 8
56494-216 158 10 236 178 12 242 186 14 246 190 14
56495-246 190 14 246 190 14 246 190 14 246 190 14
56496-246 190 14 246 190 14 246 190 14 246 190 14
56497-246 190 14 246 190 14 246 190 14 246 190 14
56498-246 190 14 246 190 14 242 186 14 163 133 67
56499- 46 46 46 18 18 18 6 6 6 0 0 0
56500- 0 0 0 0 0 0 0 0 0 0 0 0
56501- 0 0 0 0 0 0 0 0 0 0 0 0
56502- 0 0 0 0 0 0 0 0 0 10 10 10
56503- 30 30 30 78 78 78 163 133 67 210 150 10
56504-236 178 12 246 186 14 246 190 14 246 190 14
56505-246 190 14 246 190 14 246 190 14 246 190 14
56506-246 190 14 246 190 14 246 190 14 246 190 14
56507-246 190 14 246 190 14 246 190 14 246 190 14
56508-241 196 14 215 174 15 190 178 144 253 253 253
56509-253 253 253 253 253 253 253 253 253 253 253 253
56510-253 253 253 253 253 253 253 253 253 253 253 253
56511-253 253 253 253 253 253 253 253 253 253 253 253
56512-253 253 253 253 253 253 253 253 253 218 218 218
56513- 58 58 58 2 2 6 22 18 6 167 114 7
56514-216 158 10 236 178 12 246 186 14 246 190 14
56515-246 190 14 246 190 14 246 190 14 246 190 14
56516-246 190 14 246 190 14 246 190 14 246 190 14
56517-246 190 14 246 190 14 246 190 14 246 190 14
56518-246 190 14 246 186 14 242 186 14 190 150 46
56519- 54 54 54 22 22 22 6 6 6 0 0 0
56520- 0 0 0 0 0 0 0 0 0 0 0 0
56521- 0 0 0 0 0 0 0 0 0 0 0 0
56522- 0 0 0 0 0 0 0 0 0 14 14 14
56523- 38 38 38 86 86 86 180 133 36 213 154 11
56524-236 178 12 246 186 14 246 190 14 246 190 14
56525-246 190 14 246 190 14 246 190 14 246 190 14
56526-246 190 14 246 190 14 246 190 14 246 190 14
56527-246 190 14 246 190 14 246 190 14 246 190 14
56528-246 190 14 232 195 16 190 146 13 214 214 214
56529-253 253 253 253 253 253 253 253 253 253 253 253
56530-253 253 253 253 253 253 253 253 253 253 253 253
56531-253 253 253 253 253 253 253 253 253 253 253 253
56532-253 253 253 250 250 250 170 170 170 26 26 26
56533- 2 2 6 2 2 6 37 26 9 163 110 8
56534-219 162 10 239 182 13 246 186 14 246 190 14
56535-246 190 14 246 190 14 246 190 14 246 190 14
56536-246 190 14 246 190 14 246 190 14 246 190 14
56537-246 190 14 246 190 14 246 190 14 246 190 14
56538-246 186 14 236 178 12 224 166 10 142 122 72
56539- 46 46 46 18 18 18 6 6 6 0 0 0
56540- 0 0 0 0 0 0 0 0 0 0 0 0
56541- 0 0 0 0 0 0 0 0 0 0 0 0
56542- 0 0 0 0 0 0 6 6 6 18 18 18
56543- 50 50 50 109 106 95 192 133 9 224 166 10
56544-242 186 14 246 190 14 246 190 14 246 190 14
56545-246 190 14 246 190 14 246 190 14 246 190 14
56546-246 190 14 246 190 14 246 190 14 246 190 14
56547-246 190 14 246 190 14 246 190 14 246 190 14
56548-242 186 14 226 184 13 210 162 10 142 110 46
56549-226 226 226 253 253 253 253 253 253 253 253 253
56550-253 253 253 253 253 253 253 253 253 253 253 253
56551-253 253 253 253 253 253 253 253 253 253 253 253
56552-198 198 198 66 66 66 2 2 6 2 2 6
56553- 2 2 6 2 2 6 50 34 6 156 107 11
56554-219 162 10 239 182 13 246 186 14 246 190 14
56555-246 190 14 246 190 14 246 190 14 246 190 14
56556-246 190 14 246 190 14 246 190 14 246 190 14
56557-246 190 14 246 190 14 246 190 14 242 186 14
56558-234 174 13 213 154 11 154 122 46 66 66 66
56559- 30 30 30 10 10 10 0 0 0 0 0 0
56560- 0 0 0 0 0 0 0 0 0 0 0 0
56561- 0 0 0 0 0 0 0 0 0 0 0 0
56562- 0 0 0 0 0 0 6 6 6 22 22 22
56563- 58 58 58 154 121 60 206 145 10 234 174 13
56564-242 186 14 246 186 14 246 190 14 246 190 14
56565-246 190 14 246 190 14 246 190 14 246 190 14
56566-246 190 14 246 190 14 246 190 14 246 190 14
56567-246 190 14 246 190 14 246 190 14 246 190 14
56568-246 186 14 236 178 12 210 162 10 163 110 8
56569- 61 42 6 138 138 138 218 218 218 250 250 250
56570-253 253 253 253 253 253 253 253 253 250 250 250
56571-242 242 242 210 210 210 144 144 144 66 66 66
56572- 6 6 6 2 2 6 2 2 6 2 2 6
56573- 2 2 6 2 2 6 61 42 6 163 110 8
56574-216 158 10 236 178 12 246 190 14 246 190 14
56575-246 190 14 246 190 14 246 190 14 246 190 14
56576-246 190 14 246 190 14 246 190 14 246 190 14
56577-246 190 14 239 182 13 230 174 11 216 158 10
56578-190 142 34 124 112 88 70 70 70 38 38 38
56579- 18 18 18 6 6 6 0 0 0 0 0 0
56580- 0 0 0 0 0 0 0 0 0 0 0 0
56581- 0 0 0 0 0 0 0 0 0 0 0 0
56582- 0 0 0 0 0 0 6 6 6 22 22 22
56583- 62 62 62 168 124 44 206 145 10 224 166 10
56584-236 178 12 239 182 13 242 186 14 242 186 14
56585-246 186 14 246 190 14 246 190 14 246 190 14
56586-246 190 14 246 190 14 246 190 14 246 190 14
56587-246 190 14 246 190 14 246 190 14 246 190 14
56588-246 190 14 236 178 12 216 158 10 175 118 6
56589- 80 54 7 2 2 6 6 6 6 30 30 30
56590- 54 54 54 62 62 62 50 50 50 38 38 38
56591- 14 14 14 2 2 6 2 2 6 2 2 6
56592- 2 2 6 2 2 6 2 2 6 2 2 6
56593- 2 2 6 6 6 6 80 54 7 167 114 7
56594-213 154 11 236 178 12 246 190 14 246 190 14
56595-246 190 14 246 190 14 246 190 14 246 190 14
56596-246 190 14 242 186 14 239 182 13 239 182 13
56597-230 174 11 210 150 10 174 135 50 124 112 88
56598- 82 82 82 54 54 54 34 34 34 18 18 18
56599- 6 6 6 0 0 0 0 0 0 0 0 0
56600- 0 0 0 0 0 0 0 0 0 0 0 0
56601- 0 0 0 0 0 0 0 0 0 0 0 0
56602- 0 0 0 0 0 0 6 6 6 18 18 18
56603- 50 50 50 158 118 36 192 133 9 200 144 11
56604-216 158 10 219 162 10 224 166 10 226 170 11
56605-230 174 11 236 178 12 239 182 13 239 182 13
56606-242 186 14 246 186 14 246 190 14 246 190 14
56607-246 190 14 246 190 14 246 190 14 246 190 14
56608-246 186 14 230 174 11 210 150 10 163 110 8
56609-104 69 6 10 10 10 2 2 6 2 2 6
56610- 2 2 6 2 2 6 2 2 6 2 2 6
56611- 2 2 6 2 2 6 2 2 6 2 2 6
56612- 2 2 6 2 2 6 2 2 6 2 2 6
56613- 2 2 6 6 6 6 91 60 6 167 114 7
56614-206 145 10 230 174 11 242 186 14 246 190 14
56615-246 190 14 246 190 14 246 186 14 242 186 14
56616-239 182 13 230 174 11 224 166 10 213 154 11
56617-180 133 36 124 112 88 86 86 86 58 58 58
56618- 38 38 38 22 22 22 10 10 10 6 6 6
56619- 0 0 0 0 0 0 0 0 0 0 0 0
56620- 0 0 0 0 0 0 0 0 0 0 0 0
56621- 0 0 0 0 0 0 0 0 0 0 0 0
56622- 0 0 0 0 0 0 0 0 0 14 14 14
56623- 34 34 34 70 70 70 138 110 50 158 118 36
56624-167 114 7 180 123 7 192 133 9 197 138 11
56625-200 144 11 206 145 10 213 154 11 219 162 10
56626-224 166 10 230 174 11 239 182 13 242 186 14
56627-246 186 14 246 186 14 246 186 14 246 186 14
56628-239 182 13 216 158 10 185 133 11 152 99 6
56629-104 69 6 18 14 6 2 2 6 2 2 6
56630- 2 2 6 2 2 6 2 2 6 2 2 6
56631- 2 2 6 2 2 6 2 2 6 2 2 6
56632- 2 2 6 2 2 6 2 2 6 2 2 6
56633- 2 2 6 6 6 6 80 54 7 152 99 6
56634-192 133 9 219 162 10 236 178 12 239 182 13
56635-246 186 14 242 186 14 239 182 13 236 178 12
56636-224 166 10 206 145 10 192 133 9 154 121 60
56637- 94 94 94 62 62 62 42 42 42 22 22 22
56638- 14 14 14 6 6 6 0 0 0 0 0 0
56639- 0 0 0 0 0 0 0 0 0 0 0 0
56640- 0 0 0 0 0 0 0 0 0 0 0 0
56641- 0 0 0 0 0 0 0 0 0 0 0 0
56642- 0 0 0 0 0 0 0 0 0 6 6 6
56643- 18 18 18 34 34 34 58 58 58 78 78 78
56644-101 98 89 124 112 88 142 110 46 156 107 11
56645-163 110 8 167 114 7 175 118 6 180 123 7
56646-185 133 11 197 138 11 210 150 10 219 162 10
56647-226 170 11 236 178 12 236 178 12 234 174 13
56648-219 162 10 197 138 11 163 110 8 130 83 6
56649- 91 60 6 10 10 10 2 2 6 2 2 6
56650- 18 18 18 38 38 38 38 38 38 38 38 38
56651- 38 38 38 38 38 38 38 38 38 38 38 38
56652- 38 38 38 38 38 38 26 26 26 2 2 6
56653- 2 2 6 6 6 6 70 47 6 137 92 6
56654-175 118 6 200 144 11 219 162 10 230 174 11
56655-234 174 13 230 174 11 219 162 10 210 150 10
56656-192 133 9 163 110 8 124 112 88 82 82 82
56657- 50 50 50 30 30 30 14 14 14 6 6 6
56658- 0 0 0 0 0 0 0 0 0 0 0 0
56659- 0 0 0 0 0 0 0 0 0 0 0 0
56660- 0 0 0 0 0 0 0 0 0 0 0 0
56661- 0 0 0 0 0 0 0 0 0 0 0 0
56662- 0 0 0 0 0 0 0 0 0 0 0 0
56663- 6 6 6 14 14 14 22 22 22 34 34 34
56664- 42 42 42 58 58 58 74 74 74 86 86 86
56665-101 98 89 122 102 70 130 98 46 121 87 25
56666-137 92 6 152 99 6 163 110 8 180 123 7
56667-185 133 11 197 138 11 206 145 10 200 144 11
56668-180 123 7 156 107 11 130 83 6 104 69 6
56669- 50 34 6 54 54 54 110 110 110 101 98 89
56670- 86 86 86 82 82 82 78 78 78 78 78 78
56671- 78 78 78 78 78 78 78 78 78 78 78 78
56672- 78 78 78 82 82 82 86 86 86 94 94 94
56673-106 106 106 101 101 101 86 66 34 124 80 6
56674-156 107 11 180 123 7 192 133 9 200 144 11
56675-206 145 10 200 144 11 192 133 9 175 118 6
56676-139 102 15 109 106 95 70 70 70 42 42 42
56677- 22 22 22 10 10 10 0 0 0 0 0 0
56678- 0 0 0 0 0 0 0 0 0 0 0 0
56679- 0 0 0 0 0 0 0 0 0 0 0 0
56680- 0 0 0 0 0 0 0 0 0 0 0 0
56681- 0 0 0 0 0 0 0 0 0 0 0 0
56682- 0 0 0 0 0 0 0 0 0 0 0 0
56683- 0 0 0 0 0 0 6 6 6 10 10 10
56684- 14 14 14 22 22 22 30 30 30 38 38 38
56685- 50 50 50 62 62 62 74 74 74 90 90 90
56686-101 98 89 112 100 78 121 87 25 124 80 6
56687-137 92 6 152 99 6 152 99 6 152 99 6
56688-138 86 6 124 80 6 98 70 6 86 66 30
56689-101 98 89 82 82 82 58 58 58 46 46 46
56690- 38 38 38 34 34 34 34 34 34 34 34 34
56691- 34 34 34 34 34 34 34 34 34 34 34 34
56692- 34 34 34 34 34 34 38 38 38 42 42 42
56693- 54 54 54 82 82 82 94 86 76 91 60 6
56694-134 86 6 156 107 11 167 114 7 175 118 6
56695-175 118 6 167 114 7 152 99 6 121 87 25
56696-101 98 89 62 62 62 34 34 34 18 18 18
56697- 6 6 6 0 0 0 0 0 0 0 0 0
56698- 0 0 0 0 0 0 0 0 0 0 0 0
56699- 0 0 0 0 0 0 0 0 0 0 0 0
56700- 0 0 0 0 0 0 0 0 0 0 0 0
56701- 0 0 0 0 0 0 0 0 0 0 0 0
56702- 0 0 0 0 0 0 0 0 0 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 6 6 6 6 6 6 10 10 10
56705- 18 18 18 22 22 22 30 30 30 42 42 42
56706- 50 50 50 66 66 66 86 86 86 101 98 89
56707-106 86 58 98 70 6 104 69 6 104 69 6
56708-104 69 6 91 60 6 82 62 34 90 90 90
56709- 62 62 62 38 38 38 22 22 22 14 14 14
56710- 10 10 10 10 10 10 10 10 10 10 10 10
56711- 10 10 10 10 10 10 6 6 6 10 10 10
56712- 10 10 10 10 10 10 10 10 10 14 14 14
56713- 22 22 22 42 42 42 70 70 70 89 81 66
56714- 80 54 7 104 69 6 124 80 6 137 92 6
56715-134 86 6 116 81 8 100 82 52 86 86 86
56716- 58 58 58 30 30 30 14 14 14 6 6 6
56717- 0 0 0 0 0 0 0 0 0 0 0 0
56718- 0 0 0 0 0 0 0 0 0 0 0 0
56719- 0 0 0 0 0 0 0 0 0 0 0 0
56720- 0 0 0 0 0 0 0 0 0 0 0 0
56721- 0 0 0 0 0 0 0 0 0 0 0 0
56722- 0 0 0 0 0 0 0 0 0 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 0 0 0
56725- 0 0 0 6 6 6 10 10 10 14 14 14
56726- 18 18 18 26 26 26 38 38 38 54 54 54
56727- 70 70 70 86 86 86 94 86 76 89 81 66
56728- 89 81 66 86 86 86 74 74 74 50 50 50
56729- 30 30 30 14 14 14 6 6 6 0 0 0
56730- 0 0 0 0 0 0 0 0 0 0 0 0
56731- 0 0 0 0 0 0 0 0 0 0 0 0
56732- 0 0 0 0 0 0 0 0 0 0 0 0
56733- 6 6 6 18 18 18 34 34 34 58 58 58
56734- 82 82 82 89 81 66 89 81 66 89 81 66
56735- 94 86 66 94 86 76 74 74 74 50 50 50
56736- 26 26 26 14 14 14 6 6 6 0 0 0
56737- 0 0 0 0 0 0 0 0 0 0 0 0
56738- 0 0 0 0 0 0 0 0 0 0 0 0
56739- 0 0 0 0 0 0 0 0 0 0 0 0
56740- 0 0 0 0 0 0 0 0 0 0 0 0
56741- 0 0 0 0 0 0 0 0 0 0 0 0
56742- 0 0 0 0 0 0 0 0 0 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 0 0 0 0 0 0
56746- 6 6 6 6 6 6 14 14 14 18 18 18
56747- 30 30 30 38 38 38 46 46 46 54 54 54
56748- 50 50 50 42 42 42 30 30 30 18 18 18
56749- 10 10 10 0 0 0 0 0 0 0 0 0
56750- 0 0 0 0 0 0 0 0 0 0 0 0
56751- 0 0 0 0 0 0 0 0 0 0 0 0
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 0 0 0 6 6 6 14 14 14 26 26 26
56754- 38 38 38 50 50 50 58 58 58 58 58 58
56755- 54 54 54 42 42 42 30 30 30 18 18 18
56756- 10 10 10 0 0 0 0 0 0 0 0 0
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 0 0 0 0 0 0
56759- 0 0 0 0 0 0 0 0 0 0 0 0
56760- 0 0 0 0 0 0 0 0 0 0 0 0
56761- 0 0 0 0 0 0 0 0 0 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 6 6 6
56767- 6 6 6 10 10 10 14 14 14 18 18 18
56768- 18 18 18 14 14 14 10 10 10 6 6 6
56769- 0 0 0 0 0 0 0 0 0 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 0 0 0 0 0 0
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 0 0 0 0 0 0 0 0 0 6 6 6
56774- 14 14 14 18 18 18 22 22 22 22 22 22
56775- 18 18 18 14 14 14 10 10 10 6 6 6
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 0 0 0 0 0 0
56779- 0 0 0 0 0 0 0 0 0 0 0 0
56780- 0 0 0 0 0 0 0 0 0 0 0 0
56781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56794+4 4 4 4 4 4
56795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56808+4 4 4 4 4 4
56809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56822+4 4 4 4 4 4
56823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56836+4 4 4 4 4 4
56837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56850+4 4 4 4 4 4
56851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56864+4 4 4 4 4 4
56865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56869+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
56870+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
56871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56874+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
56875+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56876+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
56877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56878+4 4 4 4 4 4
56879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56883+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
56884+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
56885+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56888+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
56889+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
56890+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
56891+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56892+4 4 4 4 4 4
56893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56897+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
56898+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
56899+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56902+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
56903+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
56904+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
56905+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
56906+4 4 4 4 4 4
56907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56910+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
56911+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
56912+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
56913+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
56914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56915+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
56916+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
56917+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
56918+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
56919+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
56920+4 4 4 4 4 4
56921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56924+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
56925+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
56926+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
56927+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
56928+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
56929+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
56930+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
56931+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
56932+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
56933+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
56934+4 4 4 4 4 4
56935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
56938+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
56939+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
56940+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
56941+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
56942+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
56943+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
56944+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
56945+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
56946+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
56947+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
56948+4 4 4 4 4 4
56949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56951+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
56952+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
56953+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
56954+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
56955+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
56956+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
56957+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
56958+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
56959+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
56960+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
56961+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
56962+4 4 4 4 4 4
56963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56965+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
56966+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
56967+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
56968+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
56969+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
56970+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
56971+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
56972+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
56973+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
56974+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
56975+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
56976+4 4 4 4 4 4
56977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56979+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
56980+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
56981+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
56982+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
56983+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
56984+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
56985+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
56986+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
56987+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
56988+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
56989+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56990+4 4 4 4 4 4
56991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56993+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
56994+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
56995+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
56996+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
56997+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
56998+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
56999+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57000+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57001+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57002+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57003+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57004+4 4 4 4 4 4
57005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57006+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57007+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57008+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57009+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57010+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57011+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57012+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57013+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57014+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57015+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57016+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57017+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57018+4 4 4 4 4 4
57019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57020+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57021+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57022+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57023+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57024+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57025+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57026+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57027+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57028+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57029+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57030+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57031+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57032+0 0 0 4 4 4
57033+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57034+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57035+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57036+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57037+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57038+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57039+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57040+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57041+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57042+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57043+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57044+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57045+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57046+2 0 0 0 0 0
57047+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57048+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57049+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57050+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57051+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57052+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57053+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57054+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57055+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57056+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57057+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57058+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57059+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57060+37 38 37 0 0 0
57061+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57062+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57063+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57064+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57065+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57066+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57067+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57068+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57069+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57070+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57071+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57072+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57073+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57074+85 115 134 4 0 0
57075+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57076+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57077+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57078+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57079+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57080+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57081+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57082+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57083+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57084+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57085+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57086+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57087+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57088+60 73 81 4 0 0
57089+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57090+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57091+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57092+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57093+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57094+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57095+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57096+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57097+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57098+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57099+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57100+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57101+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57102+16 19 21 4 0 0
57103+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57104+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57105+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57106+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57107+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57108+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57109+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57110+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57111+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57112+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57113+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57114+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57115+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57116+4 0 0 4 3 3
57117+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57118+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57119+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57121+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57122+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57123+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57124+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57125+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57126+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57127+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57128+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57129+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57130+3 2 2 4 4 4
57131+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57132+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57133+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57134+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57135+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57136+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57137+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57138+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57139+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57140+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57141+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57142+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57143+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57144+4 4 4 4 4 4
57145+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57146+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57147+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57148+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57149+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57150+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57151+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57152+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57153+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57154+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57155+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57156+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57157+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57158+4 4 4 4 4 4
57159+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57160+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57161+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57162+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57163+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57164+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57165+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57166+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57167+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57168+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57169+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57170+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57171+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57172+5 5 5 5 5 5
57173+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57174+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57175+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57176+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57177+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57178+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57179+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57180+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57181+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57182+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57183+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57184+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57185+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57186+5 5 5 4 4 4
57187+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57188+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57189+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57190+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57191+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57192+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57193+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57194+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57195+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57196+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57197+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57198+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57200+4 4 4 4 4 4
57201+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57202+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57203+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57204+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57205+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57206+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57207+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57208+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57209+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57210+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57211+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57212+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57214+4 4 4 4 4 4
57215+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57216+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57217+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57218+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57219+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57220+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57221+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57222+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57223+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57224+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57225+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57228+4 4 4 4 4 4
57229+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57230+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57231+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57232+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57233+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57234+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57235+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57236+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57237+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57238+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57239+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57242+4 4 4 4 4 4
57243+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57244+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57245+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57246+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57247+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57248+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57249+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57250+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57251+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57252+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57253+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57256+4 4 4 4 4 4
57257+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57258+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57259+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57260+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57261+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57262+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57263+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57264+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57265+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57266+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57267+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57270+4 4 4 4 4 4
57271+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57272+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57273+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57274+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57275+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57276+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57277+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57278+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57279+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57280+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57281+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57284+4 4 4 4 4 4
57285+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57286+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57287+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57288+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57289+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57290+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57291+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57292+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57293+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57294+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57295+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57298+4 4 4 4 4 4
57299+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57300+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57301+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57302+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57303+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57304+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57305+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57306+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57307+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57308+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57309+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57312+4 4 4 4 4 4
57313+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57314+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57315+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57316+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57317+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57318+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57319+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57320+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57321+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57322+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57323+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57326+4 4 4 4 4 4
57327+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57328+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57329+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57330+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57331+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57332+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57333+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57334+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57335+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57336+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57337+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57340+4 4 4 4 4 4
57341+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57342+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57343+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57344+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57345+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57346+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57347+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57348+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57349+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57350+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57351+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57354+4 4 4 4 4 4
57355+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57356+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57357+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57358+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57359+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57360+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57361+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57362+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57363+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57364+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57365+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57368+4 4 4 4 4 4
57369+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57370+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57371+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57372+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57373+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57374+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57375+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57376+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57377+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57378+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57379+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57382+4 4 4 4 4 4
57383+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57384+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57385+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57386+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57387+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57388+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57389+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57390+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57391+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57392+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57393+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57396+4 4 4 4 4 4
57397+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57398+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57399+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57400+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57401+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57402+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57403+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57404+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57405+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57406+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57407+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57410+4 4 4 4 4 4
57411+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57412+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57413+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57414+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57415+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57416+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57417+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57418+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57419+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57420+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57421+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57424+4 4 4 4 4 4
57425+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57426+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57427+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57428+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57429+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57430+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57431+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57432+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57433+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57434+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57435+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57438+4 4 4 4 4 4
57439+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57440+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57441+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57442+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57443+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57444+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57445+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57446+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57447+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57448+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57449+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57452+4 4 4 4 4 4
57453+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57454+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57455+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57456+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57457+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57458+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57459+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57460+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57461+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57462+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57463+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57466+4 4 4 4 4 4
57467+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57468+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57469+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57470+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57471+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57472+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57473+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57474+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57475+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57476+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57477+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57480+4 4 4 4 4 4
57481+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57482+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57483+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57484+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57485+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57486+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57487+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57488+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57489+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57490+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57491+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57494+4 4 4 4 4 4
57495+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57496+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57497+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57498+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57499+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57500+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57501+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57502+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57503+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57504+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57505+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57508+4 4 4 4 4 4
57509+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57510+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57511+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57512+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57513+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57514+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57515+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57516+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57517+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57518+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57519+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57522+4 4 4 4 4 4
57523+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57524+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57525+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57526+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57527+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57528+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57529+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57530+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57531+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57532+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57533+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57536+4 4 4 4 4 4
57537+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57538+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57539+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57540+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57541+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57542+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57543+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57544+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57545+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57546+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57547+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57550+4 4 4 4 4 4
57551+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57552+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57553+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57554+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57555+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57556+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57557+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57558+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57559+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57560+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57561+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57564+4 4 4 4 4 4
57565+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57566+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57567+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57568+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57569+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57570+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57571+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57572+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57573+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57574+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57575+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57578+4 4 4 4 4 4
57579+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57580+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57581+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57582+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57583+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
57584+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
57585+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
57586+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
57587+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
57588+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
57589+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57592+4 4 4 4 4 4
57593+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
57594+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
57595+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57596+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
57597+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
57598+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
57599+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
57600+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
57601+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
57602+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
57603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57606+4 4 4 4 4 4
57607+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57608+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
57609+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
57610+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
57611+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
57612+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
57613+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
57614+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
57615+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
57616+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57620+4 4 4 4 4 4
57621+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
57622+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
57623+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
57624+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
57625+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
57626+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
57627+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
57628+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
57629+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
57630+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57634+4 4 4 4 4 4
57635+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
57636+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
57637+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
57638+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
57639+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
57640+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
57641+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
57642+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
57643+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57644+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57648+4 4 4 4 4 4
57649+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
57650+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57651+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
57652+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57653+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
57654+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
57655+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
57656+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
57657+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
57658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57662+4 4 4 4 4 4
57663+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
57664+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
57665+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
57666+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
57667+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
57668+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
57669+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
57670+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
57671+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
57672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57676+4 4 4 4 4 4
57677+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57678+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
57679+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
57680+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
57681+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
57682+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
57683+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
57684+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
57685+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57690+4 4 4 4 4 4
57691+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
57692+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
57693+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57694+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
57695+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
57696+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
57697+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
57698+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
57699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57704+4 4 4 4 4 4
57705+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57706+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
57707+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
57708+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
57709+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
57710+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
57711+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
57712+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57718+4 4 4 4 4 4
57719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57720+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
57721+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57722+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
57723+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
57724+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
57725+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
57726+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
57727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57732+4 4 4 4 4 4
57733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57734+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
57735+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
57736+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
57737+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
57738+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
57739+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
57740+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
57741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57746+4 4 4 4 4 4
57747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57748+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57749+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
57750+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57751+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
57752+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
57753+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
57754+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57760+4 4 4 4 4 4
57761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57763+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57764+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
57765+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
57766+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
57767+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
57768+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57774+4 4 4 4 4 4
57775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57778+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57779+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
57780+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
57781+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
57782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57788+4 4 4 4 4 4
57789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57792+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57793+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57794+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
57795+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
57796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57802+4 4 4 4 4 4
57803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57806+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57807+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57808+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57809+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
57810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57816+4 4 4 4 4 4
57817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57820+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
57821+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
57822+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
57823+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
57824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57830+4 4 4 4 4 4
57831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57835+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
57836+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57837+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57844+4 4 4 4 4 4
57845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57849+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
57850+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
57851+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57858+4 4 4 4 4 4
57859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57863+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
57864+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
57865+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57872+4 4 4 4 4 4
57873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57877+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
57878+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
57879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57886+4 4 4 4 4 4
57887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57891+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57892+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
57893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57900+4 4 4 4 4 4
57901diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
57902index fef20db..d28b1ab 100644
57903--- a/drivers/xen/xenfs/xenstored.c
57904+++ b/drivers/xen/xenfs/xenstored.c
57905@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
57906 static int xsd_kva_open(struct inode *inode, struct file *file)
57907 {
57908 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
57909+#ifdef CONFIG_GRKERNSEC_HIDESYM
57910+ NULL);
57911+#else
57912 xen_store_interface);
57913+#endif
57914+
57915 if (!file->private_data)
57916 return -ENOMEM;
57917 return 0;
57918diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
57919index eb14e05..5156de7 100644
57920--- a/fs/9p/vfs_addr.c
57921+++ b/fs/9p/vfs_addr.c
57922@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
57923
57924 retval = v9fs_file_write_internal(inode,
57925 v9inode->writeback_fid,
57926- (__force const char __user *)buffer,
57927+ (const char __force_user *)buffer,
57928 len, &offset, 0);
57929 if (retval > 0)
57930 retval = 0;
57931diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
57932index 9ee5343..5165e3c 100644
57933--- a/fs/9p/vfs_inode.c
57934+++ b/fs/9p/vfs_inode.c
57935@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
57936 void
57937 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
57938 {
57939- char *s = nd_get_link(nd);
57940+ const char *s = nd_get_link(nd);
57941
57942 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
57943 dentry, IS_ERR(s) ? "<error>" : s);
57944diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
57945index c055d56e..a46f4f5 100644
57946--- a/fs/Kconfig.binfmt
57947+++ b/fs/Kconfig.binfmt
57948@@ -106,7 +106,7 @@ config HAVE_AOUT
57949
57950 config BINFMT_AOUT
57951 tristate "Kernel support for a.out and ECOFF binaries"
57952- depends on HAVE_AOUT
57953+ depends on HAVE_AOUT && BROKEN
57954 ---help---
57955 A.out (Assembler.OUTput) is a set of formats for libraries and
57956 executables used in the earliest versions of UNIX. Linux used
57957diff --git a/fs/afs/inode.c b/fs/afs/inode.c
57958index 8a1d38e..300a14e 100644
57959--- a/fs/afs/inode.c
57960+++ b/fs/afs/inode.c
57961@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
57962 struct afs_vnode *vnode;
57963 struct super_block *sb;
57964 struct inode *inode;
57965- static atomic_t afs_autocell_ino;
57966+ static atomic_unchecked_t afs_autocell_ino;
57967
57968 _enter("{%x:%u},%*.*s,",
57969 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
57970@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
57971 data.fid.unique = 0;
57972 data.fid.vnode = 0;
57973
57974- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
57975+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
57976 afs_iget5_autocell_test, afs_iget5_set,
57977 &data);
57978 if (!inode) {
57979diff --git a/fs/aio.c b/fs/aio.c
57980index c428871..3f3041b 100644
57981--- a/fs/aio.c
57982+++ b/fs/aio.c
57983@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
57984 size += sizeof(struct io_event) * nr_events;
57985
57986 nr_pages = PFN_UP(size);
57987- if (nr_pages < 0)
57988+ if (nr_pages <= 0)
57989 return -EINVAL;
57990
57991 file = aio_private_file(ctx, nr_pages);
57992diff --git a/fs/attr.c b/fs/attr.c
57993index 6530ced..4a827e2 100644
57994--- a/fs/attr.c
57995+++ b/fs/attr.c
57996@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
57997 unsigned long limit;
57998
57999 limit = rlimit(RLIMIT_FSIZE);
58000+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58001 if (limit != RLIM_INFINITY && offset > limit)
58002 goto out_sig;
58003 if (offset > inode->i_sb->s_maxbytes)
58004diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
58005index aaf96cb..ac7d921 100644
58006--- a/fs/autofs4/dev-ioctl.c
58007+++ b/fs/autofs4/dev-ioctl.c
58008@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
58009 */
58010 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
58011 {
58012- struct autofs_dev_ioctl tmp;
58013+ struct autofs_dev_ioctl tmp, *res;
58014
58015 if (copy_from_user(&tmp, in, sizeof(tmp)))
58016 return ERR_PTR(-EFAULT);
58017@@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
58018 if (tmp.size > (PATH_MAX + sizeof(tmp)))
58019 return ERR_PTR(-ENAMETOOLONG);
58020
58021- return memdup_user(in, tmp.size);
58022+ res = memdup_user(in, tmp.size);
58023+ if (!IS_ERR(res))
58024+ res->size = tmp.size;
58025+
58026+ return res;
58027 }
58028
58029 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
58030diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58031index 116fd38..c04182da 100644
58032--- a/fs/autofs4/waitq.c
58033+++ b/fs/autofs4/waitq.c
58034@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58035 {
58036 unsigned long sigpipe, flags;
58037 mm_segment_t fs;
58038- const char *data = (const char *)addr;
58039+ const char __user *data = (const char __force_user *)addr;
58040 ssize_t wr = 0;
58041
58042 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58043@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58044 return 1;
58045 }
58046
58047+#ifdef CONFIG_GRKERNSEC_HIDESYM
58048+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58049+#endif
58050+
58051 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58052 enum autofs_notify notify)
58053 {
58054@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58055
58056 /* If this is a direct mount request create a dummy name */
58057 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58058+#ifdef CONFIG_GRKERNSEC_HIDESYM
58059+ /* this name does get written to userland via autofs4_write() */
58060+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58061+#else
58062 qstr.len = sprintf(name, "%p", dentry);
58063+#endif
58064 else {
58065 qstr.len = autofs4_getpath(sbi, dentry, &name);
58066 if (!qstr.len) {
58067diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58068index 2722387..56059b5 100644
58069--- a/fs/befs/endian.h
58070+++ b/fs/befs/endian.h
58071@@ -11,7 +11,7 @@
58072
58073 #include <asm/byteorder.h>
58074
58075-static inline u64
58076+static inline u64 __intentional_overflow(-1)
58077 fs64_to_cpu(const struct super_block *sb, fs64 n)
58078 {
58079 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58080@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58081 return (__force fs64)cpu_to_be64(n);
58082 }
58083
58084-static inline u32
58085+static inline u32 __intentional_overflow(-1)
58086 fs32_to_cpu(const struct super_block *sb, fs32 n)
58087 {
58088 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58089@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58090 return (__force fs32)cpu_to_be32(n);
58091 }
58092
58093-static inline u16
58094+static inline u16 __intentional_overflow(-1)
58095 fs16_to_cpu(const struct super_block *sb, fs16 n)
58096 {
58097 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58098diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58099index 4c55668..eeae150 100644
58100--- a/fs/binfmt_aout.c
58101+++ b/fs/binfmt_aout.c
58102@@ -16,6 +16,7 @@
58103 #include <linux/string.h>
58104 #include <linux/fs.h>
58105 #include <linux/file.h>
58106+#include <linux/security.h>
58107 #include <linux/stat.h>
58108 #include <linux/fcntl.h>
58109 #include <linux/ptrace.h>
58110@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58111 #endif
58112 # define START_STACK(u) ((void __user *)u.start_stack)
58113
58114+ memset(&dump, 0, sizeof(dump));
58115+
58116 fs = get_fs();
58117 set_fs(KERNEL_DS);
58118 has_dumped = 1;
58119@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58120
58121 /* If the size of the dump file exceeds the rlimit, then see what would happen
58122 if we wrote the stack, but not the data area. */
58123+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58124 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58125 dump.u_dsize = 0;
58126
58127 /* Make sure we have enough room to write the stack and data areas. */
58128+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58129 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58130 dump.u_ssize = 0;
58131
58132@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58133 rlim = rlimit(RLIMIT_DATA);
58134 if (rlim >= RLIM_INFINITY)
58135 rlim = ~0;
58136+
58137+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58138 if (ex.a_data + ex.a_bss > rlim)
58139 return -ENOMEM;
58140
58141@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58142
58143 install_exec_creds(bprm);
58144
58145+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58146+ current->mm->pax_flags = 0UL;
58147+#endif
58148+
58149+#ifdef CONFIG_PAX_PAGEEXEC
58150+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58151+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58152+
58153+#ifdef CONFIG_PAX_EMUTRAMP
58154+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58155+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58156+#endif
58157+
58158+#ifdef CONFIG_PAX_MPROTECT
58159+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58160+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58161+#endif
58162+
58163+ }
58164+#endif
58165+
58166 if (N_MAGIC(ex) == OMAGIC) {
58167 unsigned long text_addr, map_size;
58168 loff_t pos;
58169@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58170 return error;
58171
58172 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58173- PROT_READ | PROT_WRITE | PROT_EXEC,
58174+ PROT_READ | PROT_WRITE,
58175 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58176 fd_offset + ex.a_text);
58177 if (error != N_DATADDR(ex))
58178diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58179index 995986b..dcc4ef2 100644
58180--- a/fs/binfmt_elf.c
58181+++ b/fs/binfmt_elf.c
58182@@ -34,6 +34,7 @@
58183 #include <linux/utsname.h>
58184 #include <linux/coredump.h>
58185 #include <linux/sched.h>
58186+#include <linux/xattr.h>
58187 #include <asm/uaccess.h>
58188 #include <asm/param.h>
58189 #include <asm/page.h>
58190@@ -47,7 +48,7 @@
58191
58192 static int load_elf_binary(struct linux_binprm *bprm);
58193 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58194- int, int, unsigned long);
58195+ int, int, unsigned long) __intentional_overflow(-1);
58196
58197 #ifdef CONFIG_USELIB
58198 static int load_elf_library(struct file *);
58199@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58200 #define elf_core_dump NULL
58201 #endif
58202
58203+#ifdef CONFIG_PAX_MPROTECT
58204+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58205+#endif
58206+
58207+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58208+static void elf_handle_mmap(struct file *file);
58209+#endif
58210+
58211 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58212 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58213 #else
58214@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58215 .load_binary = load_elf_binary,
58216 .load_shlib = load_elf_library,
58217 .core_dump = elf_core_dump,
58218+
58219+#ifdef CONFIG_PAX_MPROTECT
58220+ .handle_mprotect= elf_handle_mprotect,
58221+#endif
58222+
58223+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58224+ .handle_mmap = elf_handle_mmap,
58225+#endif
58226+
58227 .min_coredump = ELF_EXEC_PAGESIZE,
58228 };
58229
58230@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58231
58232 static int set_brk(unsigned long start, unsigned long end)
58233 {
58234+ unsigned long e = end;
58235+
58236 start = ELF_PAGEALIGN(start);
58237 end = ELF_PAGEALIGN(end);
58238 if (end > start) {
58239@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58240 if (BAD_ADDR(addr))
58241 return addr;
58242 }
58243- current->mm->start_brk = current->mm->brk = end;
58244+ current->mm->start_brk = current->mm->brk = e;
58245 return 0;
58246 }
58247
58248@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58249 elf_addr_t __user *u_rand_bytes;
58250 const char *k_platform = ELF_PLATFORM;
58251 const char *k_base_platform = ELF_BASE_PLATFORM;
58252- unsigned char k_rand_bytes[16];
58253+ u32 k_rand_bytes[4];
58254 int items;
58255 elf_addr_t *elf_info;
58256 int ei_index = 0;
58257 const struct cred *cred = current_cred();
58258 struct vm_area_struct *vma;
58259+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58260
58261 /*
58262 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58263@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58264 * Generate 16 random bytes for userspace PRNG seeding.
58265 */
58266 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58267- u_rand_bytes = (elf_addr_t __user *)
58268- STACK_ALLOC(p, sizeof(k_rand_bytes));
58269+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58270+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58271+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58272+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58273+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58274+ u_rand_bytes = (elf_addr_t __user *) p;
58275 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58276 return -EFAULT;
58277
58278@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58279 return -EFAULT;
58280 current->mm->env_end = p;
58281
58282+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58283+
58284 /* Put the elf_info on the stack in the right place. */
58285 sp = (elf_addr_t __user *)envp + 1;
58286- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58287+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58288 return -EFAULT;
58289 return 0;
58290 }
58291@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58292 an ELF header */
58293
58294 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58295- struct file *interpreter, unsigned long *interp_map_addr,
58296+ struct file *interpreter,
58297 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58298 {
58299 struct elf_phdr *eppnt;
58300- unsigned long load_addr = 0;
58301+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58302 int load_addr_set = 0;
58303 unsigned long last_bss = 0, elf_bss = 0;
58304- unsigned long error = ~0UL;
58305+ unsigned long error = -EINVAL;
58306 unsigned long total_size;
58307 int i;
58308
58309@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58310 goto out;
58311 }
58312
58313+#ifdef CONFIG_PAX_SEGMEXEC
58314+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58315+ pax_task_size = SEGMEXEC_TASK_SIZE;
58316+#endif
58317+
58318 eppnt = interp_elf_phdata;
58319 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58320 if (eppnt->p_type == PT_LOAD) {
58321@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58322 map_addr = elf_map(interpreter, load_addr + vaddr,
58323 eppnt, elf_prot, elf_type, total_size);
58324 total_size = 0;
58325- if (!*interp_map_addr)
58326- *interp_map_addr = map_addr;
58327 error = map_addr;
58328 if (BAD_ADDR(map_addr))
58329 goto out;
58330@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58331 k = load_addr + eppnt->p_vaddr;
58332 if (BAD_ADDR(k) ||
58333 eppnt->p_filesz > eppnt->p_memsz ||
58334- eppnt->p_memsz > TASK_SIZE ||
58335- TASK_SIZE - eppnt->p_memsz < k) {
58336+ eppnt->p_memsz > pax_task_size ||
58337+ pax_task_size - eppnt->p_memsz < k) {
58338 error = -ENOMEM;
58339 goto out;
58340 }
58341@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58342 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58343
58344 /* Map the last of the bss segment */
58345- error = vm_brk(elf_bss, last_bss - elf_bss);
58346- if (BAD_ADDR(error))
58347- goto out;
58348+ if (last_bss > elf_bss) {
58349+ error = vm_brk(elf_bss, last_bss - elf_bss);
58350+ if (BAD_ADDR(error))
58351+ goto out;
58352+ }
58353 }
58354
58355 error = load_addr;
58356@@ -634,6 +666,336 @@ out:
58357 return error;
58358 }
58359
58360+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58361+#ifdef CONFIG_PAX_SOFTMODE
58362+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58363+{
58364+ unsigned long pax_flags = 0UL;
58365+
58366+#ifdef CONFIG_PAX_PAGEEXEC
58367+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58368+ pax_flags |= MF_PAX_PAGEEXEC;
58369+#endif
58370+
58371+#ifdef CONFIG_PAX_SEGMEXEC
58372+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58373+ pax_flags |= MF_PAX_SEGMEXEC;
58374+#endif
58375+
58376+#ifdef CONFIG_PAX_EMUTRAMP
58377+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58378+ pax_flags |= MF_PAX_EMUTRAMP;
58379+#endif
58380+
58381+#ifdef CONFIG_PAX_MPROTECT
58382+ if (elf_phdata->p_flags & PF_MPROTECT)
58383+ pax_flags |= MF_PAX_MPROTECT;
58384+#endif
58385+
58386+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58387+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58388+ pax_flags |= MF_PAX_RANDMMAP;
58389+#endif
58390+
58391+ return pax_flags;
58392+}
58393+#endif
58394+
58395+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58396+{
58397+ unsigned long pax_flags = 0UL;
58398+
58399+#ifdef CONFIG_PAX_PAGEEXEC
58400+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58401+ pax_flags |= MF_PAX_PAGEEXEC;
58402+#endif
58403+
58404+#ifdef CONFIG_PAX_SEGMEXEC
58405+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58406+ pax_flags |= MF_PAX_SEGMEXEC;
58407+#endif
58408+
58409+#ifdef CONFIG_PAX_EMUTRAMP
58410+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58411+ pax_flags |= MF_PAX_EMUTRAMP;
58412+#endif
58413+
58414+#ifdef CONFIG_PAX_MPROTECT
58415+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58416+ pax_flags |= MF_PAX_MPROTECT;
58417+#endif
58418+
58419+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58420+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58421+ pax_flags |= MF_PAX_RANDMMAP;
58422+#endif
58423+
58424+ return pax_flags;
58425+}
58426+#endif
58427+
58428+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58429+#ifdef CONFIG_PAX_SOFTMODE
58430+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58431+{
58432+ unsigned long pax_flags = 0UL;
58433+
58434+#ifdef CONFIG_PAX_PAGEEXEC
58435+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58436+ pax_flags |= MF_PAX_PAGEEXEC;
58437+#endif
58438+
58439+#ifdef CONFIG_PAX_SEGMEXEC
58440+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58441+ pax_flags |= MF_PAX_SEGMEXEC;
58442+#endif
58443+
58444+#ifdef CONFIG_PAX_EMUTRAMP
58445+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58446+ pax_flags |= MF_PAX_EMUTRAMP;
58447+#endif
58448+
58449+#ifdef CONFIG_PAX_MPROTECT
58450+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58451+ pax_flags |= MF_PAX_MPROTECT;
58452+#endif
58453+
58454+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58455+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58456+ pax_flags |= MF_PAX_RANDMMAP;
58457+#endif
58458+
58459+ return pax_flags;
58460+}
58461+#endif
58462+
58463+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58464+{
58465+ unsigned long pax_flags = 0UL;
58466+
58467+#ifdef CONFIG_PAX_PAGEEXEC
58468+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58469+ pax_flags |= MF_PAX_PAGEEXEC;
58470+#endif
58471+
58472+#ifdef CONFIG_PAX_SEGMEXEC
58473+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58474+ pax_flags |= MF_PAX_SEGMEXEC;
58475+#endif
58476+
58477+#ifdef CONFIG_PAX_EMUTRAMP
58478+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58479+ pax_flags |= MF_PAX_EMUTRAMP;
58480+#endif
58481+
58482+#ifdef CONFIG_PAX_MPROTECT
58483+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58484+ pax_flags |= MF_PAX_MPROTECT;
58485+#endif
58486+
58487+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58488+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58489+ pax_flags |= MF_PAX_RANDMMAP;
58490+#endif
58491+
58492+ return pax_flags;
58493+}
58494+#endif
58495+
58496+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58497+static unsigned long pax_parse_defaults(void)
58498+{
58499+ unsigned long pax_flags = 0UL;
58500+
58501+#ifdef CONFIG_PAX_SOFTMODE
58502+ if (pax_softmode)
58503+ return pax_flags;
58504+#endif
58505+
58506+#ifdef CONFIG_PAX_PAGEEXEC
58507+ pax_flags |= MF_PAX_PAGEEXEC;
58508+#endif
58509+
58510+#ifdef CONFIG_PAX_SEGMEXEC
58511+ pax_flags |= MF_PAX_SEGMEXEC;
58512+#endif
58513+
58514+#ifdef CONFIG_PAX_MPROTECT
58515+ pax_flags |= MF_PAX_MPROTECT;
58516+#endif
58517+
58518+#ifdef CONFIG_PAX_RANDMMAP
58519+ if (randomize_va_space)
58520+ pax_flags |= MF_PAX_RANDMMAP;
58521+#endif
58522+
58523+ return pax_flags;
58524+}
58525+
58526+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58527+{
58528+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58529+
58530+#ifdef CONFIG_PAX_EI_PAX
58531+
58532+#ifdef CONFIG_PAX_SOFTMODE
58533+ if (pax_softmode)
58534+ return pax_flags;
58535+#endif
58536+
58537+ pax_flags = 0UL;
58538+
58539+#ifdef CONFIG_PAX_PAGEEXEC
58540+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58541+ pax_flags |= MF_PAX_PAGEEXEC;
58542+#endif
58543+
58544+#ifdef CONFIG_PAX_SEGMEXEC
58545+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58546+ pax_flags |= MF_PAX_SEGMEXEC;
58547+#endif
58548+
58549+#ifdef CONFIG_PAX_EMUTRAMP
58550+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58551+ pax_flags |= MF_PAX_EMUTRAMP;
58552+#endif
58553+
58554+#ifdef CONFIG_PAX_MPROTECT
58555+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58556+ pax_flags |= MF_PAX_MPROTECT;
58557+#endif
58558+
58559+#ifdef CONFIG_PAX_ASLR
58560+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58561+ pax_flags |= MF_PAX_RANDMMAP;
58562+#endif
58563+
58564+#endif
58565+
58566+ return pax_flags;
58567+
58568+}
58569+
58570+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58571+{
58572+
58573+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58574+ unsigned long i;
58575+
58576+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58577+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58578+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58579+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58580+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58581+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58582+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
58583+ return PAX_PARSE_FLAGS_FALLBACK;
58584+
58585+#ifdef CONFIG_PAX_SOFTMODE
58586+ if (pax_softmode)
58587+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58588+ else
58589+#endif
58590+
58591+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58592+ break;
58593+ }
58594+#endif
58595+
58596+ return PAX_PARSE_FLAGS_FALLBACK;
58597+}
58598+
58599+static unsigned long pax_parse_xattr_pax(struct file * const file)
58600+{
58601+
58602+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58603+ ssize_t xattr_size, i;
58604+ unsigned char xattr_value[sizeof("pemrs") - 1];
58605+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
58606+
58607+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
58608+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
58609+ return PAX_PARSE_FLAGS_FALLBACK;
58610+
58611+ for (i = 0; i < xattr_size; i++)
58612+ switch (xattr_value[i]) {
58613+ default:
58614+ return PAX_PARSE_FLAGS_FALLBACK;
58615+
58616+#define parse_flag(option1, option2, flag) \
58617+ case option1: \
58618+ if (pax_flags_hardmode & MF_PAX_##flag) \
58619+ return PAX_PARSE_FLAGS_FALLBACK;\
58620+ pax_flags_hardmode |= MF_PAX_##flag; \
58621+ break; \
58622+ case option2: \
58623+ if (pax_flags_softmode & MF_PAX_##flag) \
58624+ return PAX_PARSE_FLAGS_FALLBACK;\
58625+ pax_flags_softmode |= MF_PAX_##flag; \
58626+ break;
58627+
58628+ parse_flag('p', 'P', PAGEEXEC);
58629+ parse_flag('e', 'E', EMUTRAMP);
58630+ parse_flag('m', 'M', MPROTECT);
58631+ parse_flag('r', 'R', RANDMMAP);
58632+ parse_flag('s', 'S', SEGMEXEC);
58633+
58634+#undef parse_flag
58635+ }
58636+
58637+ if (pax_flags_hardmode & pax_flags_softmode)
58638+ return PAX_PARSE_FLAGS_FALLBACK;
58639+
58640+#ifdef CONFIG_PAX_SOFTMODE
58641+ if (pax_softmode)
58642+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
58643+ else
58644+#endif
58645+
58646+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
58647+#else
58648+ return PAX_PARSE_FLAGS_FALLBACK;
58649+#endif
58650+
58651+}
58652+
58653+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
58654+{
58655+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
58656+
58657+ pax_flags = pax_parse_defaults();
58658+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
58659+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
58660+ xattr_pax_flags = pax_parse_xattr_pax(file);
58661+
58662+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58663+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58664+ pt_pax_flags != xattr_pax_flags)
58665+ return -EINVAL;
58666+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58667+ pax_flags = xattr_pax_flags;
58668+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58669+ pax_flags = pt_pax_flags;
58670+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58671+ pax_flags = ei_pax_flags;
58672+
58673+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
58674+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58675+ if ((__supported_pte_mask & _PAGE_NX))
58676+ pax_flags &= ~MF_PAX_SEGMEXEC;
58677+ else
58678+ pax_flags &= ~MF_PAX_PAGEEXEC;
58679+ }
58680+#endif
58681+
58682+ if (0 > pax_check_flags(&pax_flags))
58683+ return -EINVAL;
58684+
58685+ current->mm->pax_flags = pax_flags;
58686+ return 0;
58687+}
58688+#endif
58689+
58690 /*
58691 * These are the functions used to load ELF style executables and shared
58692 * libraries. There is no binary dependent code anywhere else.
58693@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58694 {
58695 unsigned long random_variable = 0;
58696
58697+#ifdef CONFIG_PAX_RANDUSTACK
58698+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
58699+ return stack_top - current->mm->delta_stack;
58700+#endif
58701+
58702 if ((current->flags & PF_RANDOMIZE) &&
58703 !(current->personality & ADDR_NO_RANDOMIZE)) {
58704 random_variable = (unsigned long) get_random_int();
58705@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58706 unsigned long load_addr = 0, load_bias = 0;
58707 int load_addr_set = 0;
58708 char * elf_interpreter = NULL;
58709- unsigned long error;
58710+ unsigned long error = 0;
58711 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
58712 unsigned long elf_bss, elf_brk;
58713 int retval, i;
58714@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58715 struct elfhdr interp_elf_ex;
58716 } *loc;
58717 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
58718+ unsigned long pax_task_size;
58719
58720 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
58721 if (!loc) {
58722@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
58723 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
58724 may depend on the personality. */
58725 SET_PERSONALITY2(loc->elf_ex, &arch_state);
58726+
58727+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58728+ current->mm->pax_flags = 0UL;
58729+#endif
58730+
58731+#ifdef CONFIG_PAX_DLRESOLVE
58732+ current->mm->call_dl_resolve = 0UL;
58733+#endif
58734+
58735+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
58736+ current->mm->call_syscall = 0UL;
58737+#endif
58738+
58739+#ifdef CONFIG_PAX_ASLR
58740+ current->mm->delta_mmap = 0UL;
58741+ current->mm->delta_stack = 0UL;
58742+#endif
58743+
58744+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58745+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58746+ send_sig(SIGKILL, current, 0);
58747+ goto out_free_dentry;
58748+ }
58749+#endif
58750+
58751+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58752+ pax_set_initial_flags(bprm);
58753+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58754+ if (pax_set_initial_flags_func)
58755+ (pax_set_initial_flags_func)(bprm);
58756+#endif
58757+
58758+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58759+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58760+ current->mm->context.user_cs_limit = PAGE_SIZE;
58761+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
58762+ }
58763+#endif
58764+
58765+#ifdef CONFIG_PAX_SEGMEXEC
58766+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
58767+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
58768+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
58769+ pax_task_size = SEGMEXEC_TASK_SIZE;
58770+ current->mm->def_flags |= VM_NOHUGEPAGE;
58771+ } else
58772+#endif
58773+
58774+ pax_task_size = TASK_SIZE;
58775+
58776+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
58777+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58778+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
58779+ put_cpu();
58780+ }
58781+#endif
58782+
58783+#ifdef CONFIG_PAX_ASLR
58784+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58785+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
58786+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
58787+ }
58788+#endif
58789+
58790+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58791+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58792+ executable_stack = EXSTACK_DISABLE_X;
58793+ current->personality &= ~READ_IMPLIES_EXEC;
58794+ } else
58795+#endif
58796+
58797 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
58798 current->personality |= READ_IMPLIES_EXEC;
58799
58800@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
58801 #else
58802 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
58803 #endif
58804+
58805+#ifdef CONFIG_PAX_RANDMMAP
58806+ /* PaX: randomize base address at the default exe base if requested */
58807+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
58808+#ifdef CONFIG_SPARC64
58809+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
58810+#else
58811+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
58812+#endif
58813+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
58814+ elf_flags |= MAP_FIXED;
58815+ }
58816+#endif
58817+
58818 }
58819
58820 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
58821@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
58822 * allowed task size. Note that p_filesz must always be
58823 * <= p_memsz so it is only necessary to check p_memsz.
58824 */
58825- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58826- elf_ppnt->p_memsz > TASK_SIZE ||
58827- TASK_SIZE - elf_ppnt->p_memsz < k) {
58828+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58829+ elf_ppnt->p_memsz > pax_task_size ||
58830+ pax_task_size - elf_ppnt->p_memsz < k) {
58831 /* set_brk can never work. Avoid overflows. */
58832 retval = -EINVAL;
58833 goto out_free_dentry;
58834@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
58835 if (retval)
58836 goto out_free_dentry;
58837 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
58838- retval = -EFAULT; /* Nobody gets to see this, but.. */
58839- goto out_free_dentry;
58840+ /*
58841+ * This bss-zeroing can fail if the ELF
58842+ * file specifies odd protections. So
58843+ * we don't check the return value
58844+ */
58845 }
58846
58847+#ifdef CONFIG_PAX_RANDMMAP
58848+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58849+ unsigned long start, size, flags;
58850+ vm_flags_t vm_flags;
58851+
58852+ start = ELF_PAGEALIGN(elf_brk);
58853+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
58854+ flags = MAP_FIXED | MAP_PRIVATE;
58855+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
58856+
58857+ down_write(&current->mm->mmap_sem);
58858+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
58859+ retval = -ENOMEM;
58860+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
58861+// if (current->personality & ADDR_NO_RANDOMIZE)
58862+// vm_flags |= VM_READ | VM_MAYREAD;
58863+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
58864+ retval = IS_ERR_VALUE(start) ? start : 0;
58865+ }
58866+ up_write(&current->mm->mmap_sem);
58867+ if (retval == 0)
58868+ retval = set_brk(start + size, start + size + PAGE_SIZE);
58869+ if (retval < 0)
58870+ goto out_free_dentry;
58871+ }
58872+#endif
58873+
58874 if (elf_interpreter) {
58875- unsigned long interp_map_addr = 0;
58876-
58877 elf_entry = load_elf_interp(&loc->interp_elf_ex,
58878 interpreter,
58879- &interp_map_addr,
58880 load_bias, interp_elf_phdata);
58881 if (!IS_ERR((void *)elf_entry)) {
58882 /*
58883@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
58884 * Decide what to dump of a segment, part, all or none.
58885 */
58886 static unsigned long vma_dump_size(struct vm_area_struct *vma,
58887- unsigned long mm_flags)
58888+ unsigned long mm_flags, long signr)
58889 {
58890 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
58891
58892@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58893 if (vma->vm_file == NULL)
58894 return 0;
58895
58896- if (FILTER(MAPPED_PRIVATE))
58897+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
58898 goto whole;
58899
58900 /*
58901@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
58902 {
58903 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
58904 int i = 0;
58905- do
58906+ do {
58907 i += 2;
58908- while (auxv[i - 2] != AT_NULL);
58909+ } while (auxv[i - 2] != AT_NULL);
58910 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
58911 }
58912
58913@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
58914 {
58915 mm_segment_t old_fs = get_fs();
58916 set_fs(KERNEL_DS);
58917- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
58918+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
58919 set_fs(old_fs);
58920 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
58921 }
58922@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58923 vma = next_vma(vma, gate_vma)) {
58924 unsigned long dump_size;
58925
58926- dump_size = vma_dump_size(vma, cprm->mm_flags);
58927+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
58928 vma_filesz[i++] = dump_size;
58929 vma_data_size += dump_size;
58930 }
58931@@ -2314,6 +2794,167 @@ out:
58932
58933 #endif /* CONFIG_ELF_CORE */
58934
58935+#ifdef CONFIG_PAX_MPROTECT
58936+/* PaX: non-PIC ELF libraries need relocations on their executable segments
58937+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
58938+ * we'll remove VM_MAYWRITE for good on RELRO segments.
58939+ *
58940+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
58941+ * basis because we want to allow the common case and not the special ones.
58942+ */
58943+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
58944+{
58945+ struct elfhdr elf_h;
58946+ struct elf_phdr elf_p;
58947+ unsigned long i;
58948+ unsigned long oldflags;
58949+ bool is_textrel_rw, is_textrel_rx, is_relro;
58950+
58951+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
58952+ return;
58953+
58954+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
58955+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
58956+
58957+#ifdef CONFIG_PAX_ELFRELOCS
58958+ /* possible TEXTREL */
58959+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
58960+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
58961+#else
58962+ is_textrel_rw = false;
58963+ is_textrel_rx = false;
58964+#endif
58965+
58966+ /* possible RELRO */
58967+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
58968+
58969+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
58970+ return;
58971+
58972+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
58973+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
58974+
58975+#ifdef CONFIG_PAX_ETEXECRELOCS
58976+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
58977+#else
58978+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
58979+#endif
58980+
58981+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
58982+ !elf_check_arch(&elf_h) ||
58983+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
58984+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
58985+ return;
58986+
58987+ for (i = 0UL; i < elf_h.e_phnum; i++) {
58988+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
58989+ return;
58990+ switch (elf_p.p_type) {
58991+ case PT_DYNAMIC:
58992+ if (!is_textrel_rw && !is_textrel_rx)
58993+ continue;
58994+ i = 0UL;
58995+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
58996+ elf_dyn dyn;
58997+
58998+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
58999+ break;
59000+ if (dyn.d_tag == DT_NULL)
59001+ break;
59002+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59003+ gr_log_textrel(vma);
59004+ if (is_textrel_rw)
59005+ vma->vm_flags |= VM_MAYWRITE;
59006+ else
59007+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59008+ vma->vm_flags &= ~VM_MAYWRITE;
59009+ break;
59010+ }
59011+ i++;
59012+ }
59013+ is_textrel_rw = false;
59014+ is_textrel_rx = false;
59015+ continue;
59016+
59017+ case PT_GNU_RELRO:
59018+ if (!is_relro)
59019+ continue;
59020+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59021+ vma->vm_flags &= ~VM_MAYWRITE;
59022+ is_relro = false;
59023+ continue;
59024+
59025+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59026+ case PT_PAX_FLAGS: {
59027+ const char *msg_mprotect = "", *msg_emutramp = "";
59028+ char *buffer_lib, *buffer_exe;
59029+
59030+ if (elf_p.p_flags & PF_NOMPROTECT)
59031+ msg_mprotect = "MPROTECT disabled";
59032+
59033+#ifdef CONFIG_PAX_EMUTRAMP
59034+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59035+ msg_emutramp = "EMUTRAMP enabled";
59036+#endif
59037+
59038+ if (!msg_mprotect[0] && !msg_emutramp[0])
59039+ continue;
59040+
59041+ if (!printk_ratelimit())
59042+ continue;
59043+
59044+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59045+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59046+ if (buffer_lib && buffer_exe) {
59047+ char *path_lib, *path_exe;
59048+
59049+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59050+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59051+
59052+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59053+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59054+
59055+ }
59056+ free_page((unsigned long)buffer_exe);
59057+ free_page((unsigned long)buffer_lib);
59058+ continue;
59059+ }
59060+#endif
59061+
59062+ }
59063+ }
59064+}
59065+#endif
59066+
59067+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59068+
59069+extern int grsec_enable_log_rwxmaps;
59070+
59071+static void elf_handle_mmap(struct file *file)
59072+{
59073+ struct elfhdr elf_h;
59074+ struct elf_phdr elf_p;
59075+ unsigned long i;
59076+
59077+ if (!grsec_enable_log_rwxmaps)
59078+ return;
59079+
59080+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59081+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59082+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59083+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59084+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59085+ return;
59086+
59087+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59088+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59089+ return;
59090+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59091+ gr_log_ptgnustack(file);
59092+ }
59093+}
59094+#endif
59095+
59096 static int __init init_elf_binfmt(void)
59097 {
59098 register_binfmt(&elf_format);
59099diff --git a/fs/block_dev.c b/fs/block_dev.c
59100index b48c41b..e070416 100644
59101--- a/fs/block_dev.c
59102+++ b/fs/block_dev.c
59103@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59104 else if (bdev->bd_contains == bdev)
59105 return true; /* is a whole device which isn't held */
59106
59107- else if (whole->bd_holder == bd_may_claim)
59108+ else if (whole->bd_holder == (void *)bd_may_claim)
59109 return true; /* is a partition of a device that is being partitioned */
59110 else if (whole->bd_holder != NULL)
59111 return false; /* is a partition of a held device */
59112diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59113index f54511d..58acdec 100644
59114--- a/fs/btrfs/ctree.c
59115+++ b/fs/btrfs/ctree.c
59116@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59117 free_extent_buffer(buf);
59118 add_root_to_dirty_list(root);
59119 } else {
59120- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59121- parent_start = parent->start;
59122- else
59123+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59124+ if (parent)
59125+ parent_start = parent->start;
59126+ else
59127+ parent_start = 0;
59128+ } else
59129 parent_start = 0;
59130
59131 WARN_ON(trans->transid != btrfs_header_generation(parent));
59132diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59133index de4e70f..b41dc45 100644
59134--- a/fs/btrfs/delayed-inode.c
59135+++ b/fs/btrfs/delayed-inode.c
59136@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59137
59138 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59139 {
59140- int seq = atomic_inc_return(&delayed_root->items_seq);
59141+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59142 if ((atomic_dec_return(&delayed_root->items) <
59143 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59144 waitqueue_active(&delayed_root->wait))
59145@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59146
59147 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59148 {
59149- int val = atomic_read(&delayed_root->items_seq);
59150+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59151
59152 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59153 return 1;
59154@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59155 int seq;
59156 int ret;
59157
59158- seq = atomic_read(&delayed_root->items_seq);
59159+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59160
59161 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59162 if (ret)
59163diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59164index f70119f..ab5894d 100644
59165--- a/fs/btrfs/delayed-inode.h
59166+++ b/fs/btrfs/delayed-inode.h
59167@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59168 */
59169 struct list_head prepare_list;
59170 atomic_t items; /* for delayed items */
59171- atomic_t items_seq; /* for delayed items */
59172+ atomic_unchecked_t items_seq; /* for delayed items */
59173 int nodes; /* for delayed nodes */
59174 wait_queue_head_t wait;
59175 };
59176@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59177 struct btrfs_delayed_root *delayed_root)
59178 {
59179 atomic_set(&delayed_root->items, 0);
59180- atomic_set(&delayed_root->items_seq, 0);
59181+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59182 delayed_root->nodes = 0;
59183 spin_lock_init(&delayed_root->lock);
59184 init_waitqueue_head(&delayed_root->wait);
59185diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
59186index d49fe8a..2e11037 100644
59187--- a/fs/btrfs/ioctl.c
59188+++ b/fs/btrfs/ioctl.c
59189@@ -3925,9 +3925,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
59190 for (i = 0; i < num_types; i++) {
59191 struct btrfs_space_info *tmp;
59192
59193+ /* Don't copy in more than we allocated */
59194 if (!slot_count)
59195 break;
59196
59197+ slot_count--;
59198+
59199 info = NULL;
59200 rcu_read_lock();
59201 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
59202@@ -3949,10 +3952,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
59203 memcpy(dest, &space, sizeof(space));
59204 dest++;
59205 space_args.total_spaces++;
59206- slot_count--;
59207 }
59208- if (!slot_count)
59209- break;
59210 }
59211 up_read(&info->groups_sem);
59212 }
59213diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59214index 6f49b28..483410f 100644
59215--- a/fs/btrfs/super.c
59216+++ b/fs/btrfs/super.c
59217@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59218 function, line, errstr);
59219 return;
59220 }
59221- ACCESS_ONCE(trans->transaction->aborted) = errno;
59222+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59223 /* Wake up anybody who may be waiting on this transaction */
59224 wake_up(&root->fs_info->transaction_wait);
59225 wake_up(&root->fs_info->transaction_blocked_wait);
59226diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59227index 92db3f6..898a561 100644
59228--- a/fs/btrfs/sysfs.c
59229+++ b/fs/btrfs/sysfs.c
59230@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59231 for (set = 0; set < FEAT_MAX; set++) {
59232 int i;
59233 struct attribute *attrs[2];
59234- struct attribute_group agroup = {
59235+ attribute_group_no_const agroup = {
59236 .name = "features",
59237 .attrs = attrs,
59238 };
59239diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59240index 2299bfd..4098e72 100644
59241--- a/fs/btrfs/tests/free-space-tests.c
59242+++ b/fs/btrfs/tests/free-space-tests.c
59243@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59244 * extent entry.
59245 */
59246 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59247- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59248+ pax_open_kernel();
59249+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59250+ pax_close_kernel();
59251
59252 /*
59253 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59254@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59255 if (ret)
59256 return ret;
59257
59258- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59259+ pax_open_kernel();
59260+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59261+ pax_close_kernel();
59262 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59263
59264 return 0;
59265diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
59266index f78e9dc..069ab24 100644
59267--- a/fs/btrfs/tree-log.c
59268+++ b/fs/btrfs/tree-log.c
59269@@ -1010,7 +1010,7 @@ again:
59270 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
59271
59272 while (cur_offset < item_size) {
59273- extref = (struct btrfs_inode_extref *)base + cur_offset;
59274+ extref = (struct btrfs_inode_extref *)(base + cur_offset);
59275
59276 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
59277
59278diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59279index 154990c..d0cf699 100644
59280--- a/fs/btrfs/tree-log.h
59281+++ b/fs/btrfs/tree-log.h
59282@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59283 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59284 struct btrfs_trans_handle *trans)
59285 {
59286- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59287+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59288 }
59289
59290 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59291diff --git a/fs/buffer.c b/fs/buffer.c
59292index 20805db..2e8fc69 100644
59293--- a/fs/buffer.c
59294+++ b/fs/buffer.c
59295@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59296 bh_cachep = kmem_cache_create("buffer_head",
59297 sizeof(struct buffer_head), 0,
59298 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59299- SLAB_MEM_SPREAD),
59300+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59301 NULL);
59302
59303 /*
59304diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59305index fbb08e9..0fda764 100644
59306--- a/fs/cachefiles/bind.c
59307+++ b/fs/cachefiles/bind.c
59308@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59309 args);
59310
59311 /* start by checking things over */
59312- ASSERT(cache->fstop_percent >= 0 &&
59313- cache->fstop_percent < cache->fcull_percent &&
59314+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59315 cache->fcull_percent < cache->frun_percent &&
59316 cache->frun_percent < 100);
59317
59318- ASSERT(cache->bstop_percent >= 0 &&
59319- cache->bstop_percent < cache->bcull_percent &&
59320+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59321 cache->bcull_percent < cache->brun_percent &&
59322 cache->brun_percent < 100);
59323
59324diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59325index ce1b115..4a6852c 100644
59326--- a/fs/cachefiles/daemon.c
59327+++ b/fs/cachefiles/daemon.c
59328@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59329 if (n > buflen)
59330 return -EMSGSIZE;
59331
59332- if (copy_to_user(_buffer, buffer, n) != 0)
59333+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59334 return -EFAULT;
59335
59336 return n;
59337@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59338 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59339 return -EIO;
59340
59341- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59342+ if (datalen > PAGE_SIZE - 1)
59343 return -EOPNOTSUPP;
59344
59345 /* drag the command string into the kernel so we can parse it */
59346@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59347 if (args[0] != '%' || args[1] != '\0')
59348 return -EINVAL;
59349
59350- if (fstop < 0 || fstop >= cache->fcull_percent)
59351+ if (fstop >= cache->fcull_percent)
59352 return cachefiles_daemon_range_error(cache, args);
59353
59354 cache->fstop_percent = fstop;
59355@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59356 if (args[0] != '%' || args[1] != '\0')
59357 return -EINVAL;
59358
59359- if (bstop < 0 || bstop >= cache->bcull_percent)
59360+ if (bstop >= cache->bcull_percent)
59361 return cachefiles_daemon_range_error(cache, args);
59362
59363 cache->bstop_percent = bstop;
59364diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59365index 8c52472..c4e3a69 100644
59366--- a/fs/cachefiles/internal.h
59367+++ b/fs/cachefiles/internal.h
59368@@ -66,7 +66,7 @@ struct cachefiles_cache {
59369 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59370 struct rb_root active_nodes; /* active nodes (can't be culled) */
59371 rwlock_t active_lock; /* lock for active_nodes */
59372- atomic_t gravecounter; /* graveyard uniquifier */
59373+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59374 unsigned frun_percent; /* when to stop culling (% files) */
59375 unsigned fcull_percent; /* when to start culling (% files) */
59376 unsigned fstop_percent; /* when to stop allocating (% files) */
59377@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59378 * proc.c
59379 */
59380 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59381-extern atomic_t cachefiles_lookup_histogram[HZ];
59382-extern atomic_t cachefiles_mkdir_histogram[HZ];
59383-extern atomic_t cachefiles_create_histogram[HZ];
59384+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59385+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59386+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59387
59388 extern int __init cachefiles_proc_init(void);
59389 extern void cachefiles_proc_cleanup(void);
59390 static inline
59391-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59392+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59393 {
59394 unsigned long jif = jiffies - start_jif;
59395 if (jif >= HZ)
59396 jif = HZ - 1;
59397- atomic_inc(&histogram[jif]);
59398+ atomic_inc_unchecked(&histogram[jif]);
59399 }
59400
59401 #else
59402diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59403index 7f8e83f..8951aa4 100644
59404--- a/fs/cachefiles/namei.c
59405+++ b/fs/cachefiles/namei.c
59406@@ -309,7 +309,7 @@ try_again:
59407 /* first step is to make up a grave dentry in the graveyard */
59408 sprintf(nbuffer, "%08x%08x",
59409 (uint32_t) get_seconds(),
59410- (uint32_t) atomic_inc_return(&cache->gravecounter));
59411+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59412
59413 /* do the multiway lock magic */
59414 trap = lock_rename(cache->graveyard, dir);
59415diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59416index eccd339..4c1d995 100644
59417--- a/fs/cachefiles/proc.c
59418+++ b/fs/cachefiles/proc.c
59419@@ -14,9 +14,9 @@
59420 #include <linux/seq_file.h>
59421 #include "internal.h"
59422
59423-atomic_t cachefiles_lookup_histogram[HZ];
59424-atomic_t cachefiles_mkdir_histogram[HZ];
59425-atomic_t cachefiles_create_histogram[HZ];
59426+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59427+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59428+atomic_unchecked_t cachefiles_create_histogram[HZ];
59429
59430 /*
59431 * display the latency histogram
59432@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59433 return 0;
59434 default:
59435 index = (unsigned long) v - 3;
59436- x = atomic_read(&cachefiles_lookup_histogram[index]);
59437- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59438- z = atomic_read(&cachefiles_create_histogram[index]);
59439+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59440+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59441+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59442 if (x == 0 && y == 0 && z == 0)
59443 return 0;
59444
59445diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59446index c241603..56bae60 100644
59447--- a/fs/ceph/dir.c
59448+++ b/fs/ceph/dir.c
59449@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59450 struct dentry *dentry, *last;
59451 struct ceph_dentry_info *di;
59452 int err = 0;
59453+ char d_name[DNAME_INLINE_LEN];
59454+ const unsigned char *name;
59455
59456 /* claim ref on last dentry we returned */
59457 last = fi->dentry;
59458@@ -192,7 +194,12 @@ more:
59459
59460 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59461 dentry, dentry, dentry->d_inode);
59462- if (!dir_emit(ctx, dentry->d_name.name,
59463+ name = dentry->d_name.name;
59464+ if (name == dentry->d_iname) {
59465+ memcpy(d_name, name, dentry->d_name.len);
59466+ name = d_name;
59467+ }
59468+ if (!dir_emit(ctx, name,
59469 dentry->d_name.len,
59470 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59471 dentry->d_inode->i_mode >> 12)) {
59472@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59473 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59474 struct ceph_mds_client *mdsc = fsc->mdsc;
59475 unsigned frag = fpos_frag(ctx->pos);
59476- int off = fpos_off(ctx->pos);
59477+ unsigned int off = fpos_off(ctx->pos);
59478 int err;
59479 u32 ftype;
59480 struct ceph_mds_reply_info_parsed *rinfo;
59481diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59482index 50f06cd..c7eba3e 100644
59483--- a/fs/ceph/super.c
59484+++ b/fs/ceph/super.c
59485@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59486 /*
59487 * construct our own bdi so we can control readahead, etc.
59488 */
59489-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59490+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59491
59492 static int ceph_register_bdi(struct super_block *sb,
59493 struct ceph_fs_client *fsc)
59494@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
59495 default_backing_dev_info.ra_pages;
59496
59497 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59498- atomic_long_inc_return(&bdi_seq));
59499+ atomic_long_inc_return_unchecked(&bdi_seq));
59500 if (!err)
59501 sb->s_bdi = &fsc->backing_dev_info;
59502 return err;
59503diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59504index 7febcf2..62a5721 100644
59505--- a/fs/cifs/cifs_debug.c
59506+++ b/fs/cifs/cifs_debug.c
59507@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59508
59509 if (strtobool(&c, &bv) == 0) {
59510 #ifdef CONFIG_CIFS_STATS2
59511- atomic_set(&totBufAllocCount, 0);
59512- atomic_set(&totSmBufAllocCount, 0);
59513+ atomic_set_unchecked(&totBufAllocCount, 0);
59514+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59515 #endif /* CONFIG_CIFS_STATS2 */
59516 spin_lock(&cifs_tcp_ses_lock);
59517 list_for_each(tmp1, &cifs_tcp_ses_list) {
59518@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59519 tcon = list_entry(tmp3,
59520 struct cifs_tcon,
59521 tcon_list);
59522- atomic_set(&tcon->num_smbs_sent, 0);
59523+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59524 if (server->ops->clear_stats)
59525 server->ops->clear_stats(tcon);
59526 }
59527@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59528 smBufAllocCount.counter, cifs_min_small);
59529 #ifdef CONFIG_CIFS_STATS2
59530 seq_printf(m, "Total Large %d Small %d Allocations\n",
59531- atomic_read(&totBufAllocCount),
59532- atomic_read(&totSmBufAllocCount));
59533+ atomic_read_unchecked(&totBufAllocCount),
59534+ atomic_read_unchecked(&totSmBufAllocCount));
59535 #endif /* CONFIG_CIFS_STATS2 */
59536
59537 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59538@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59539 if (tcon->need_reconnect)
59540 seq_puts(m, "\tDISCONNECTED ");
59541 seq_printf(m, "\nSMBs: %d",
59542- atomic_read(&tcon->num_smbs_sent));
59543+ atomic_read_unchecked(&tcon->num_smbs_sent));
59544 if (server->ops->print_stats)
59545 server->ops->print_stats(m, tcon);
59546 }
59547diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59548index d72fe37..ded5511 100644
59549--- a/fs/cifs/cifsfs.c
59550+++ b/fs/cifs/cifsfs.c
59551@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59552 */
59553 cifs_req_cachep = kmem_cache_create("cifs_request",
59554 CIFSMaxBufSize + max_hdr_size, 0,
59555- SLAB_HWCACHE_ALIGN, NULL);
59556+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59557 if (cifs_req_cachep == NULL)
59558 return -ENOMEM;
59559
59560@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59561 efficient to alloc 1 per page off the slab compared to 17K (5page)
59562 alloc of large cifs buffers even when page debugging is on */
59563 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59564- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59565+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59566 NULL);
59567 if (cifs_sm_req_cachep == NULL) {
59568 mempool_destroy(cifs_req_poolp);
59569@@ -1204,8 +1204,8 @@ init_cifs(void)
59570 atomic_set(&bufAllocCount, 0);
59571 atomic_set(&smBufAllocCount, 0);
59572 #ifdef CONFIG_CIFS_STATS2
59573- atomic_set(&totBufAllocCount, 0);
59574- atomic_set(&totSmBufAllocCount, 0);
59575+ atomic_set_unchecked(&totBufAllocCount, 0);
59576+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59577 #endif /* CONFIG_CIFS_STATS2 */
59578
59579 atomic_set(&midCount, 0);
59580diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59581index 22b289a..bbbba08 100644
59582--- a/fs/cifs/cifsglob.h
59583+++ b/fs/cifs/cifsglob.h
59584@@ -823,35 +823,35 @@ struct cifs_tcon {
59585 __u16 Flags; /* optional support bits */
59586 enum statusEnum tidStatus;
59587 #ifdef CONFIG_CIFS_STATS
59588- atomic_t num_smbs_sent;
59589+ atomic_unchecked_t num_smbs_sent;
59590 union {
59591 struct {
59592- atomic_t num_writes;
59593- atomic_t num_reads;
59594- atomic_t num_flushes;
59595- atomic_t num_oplock_brks;
59596- atomic_t num_opens;
59597- atomic_t num_closes;
59598- atomic_t num_deletes;
59599- atomic_t num_mkdirs;
59600- atomic_t num_posixopens;
59601- atomic_t num_posixmkdirs;
59602- atomic_t num_rmdirs;
59603- atomic_t num_renames;
59604- atomic_t num_t2renames;
59605- atomic_t num_ffirst;
59606- atomic_t num_fnext;
59607- atomic_t num_fclose;
59608- atomic_t num_hardlinks;
59609- atomic_t num_symlinks;
59610- atomic_t num_locks;
59611- atomic_t num_acl_get;
59612- atomic_t num_acl_set;
59613+ atomic_unchecked_t num_writes;
59614+ atomic_unchecked_t num_reads;
59615+ atomic_unchecked_t num_flushes;
59616+ atomic_unchecked_t num_oplock_brks;
59617+ atomic_unchecked_t num_opens;
59618+ atomic_unchecked_t num_closes;
59619+ atomic_unchecked_t num_deletes;
59620+ atomic_unchecked_t num_mkdirs;
59621+ atomic_unchecked_t num_posixopens;
59622+ atomic_unchecked_t num_posixmkdirs;
59623+ atomic_unchecked_t num_rmdirs;
59624+ atomic_unchecked_t num_renames;
59625+ atomic_unchecked_t num_t2renames;
59626+ atomic_unchecked_t num_ffirst;
59627+ atomic_unchecked_t num_fnext;
59628+ atomic_unchecked_t num_fclose;
59629+ atomic_unchecked_t num_hardlinks;
59630+ atomic_unchecked_t num_symlinks;
59631+ atomic_unchecked_t num_locks;
59632+ atomic_unchecked_t num_acl_get;
59633+ atomic_unchecked_t num_acl_set;
59634 } cifs_stats;
59635 #ifdef CONFIG_CIFS_SMB2
59636 struct {
59637- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59638- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59639+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59640+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59641 } smb2_stats;
59642 #endif /* CONFIG_CIFS_SMB2 */
59643 } stats;
59644@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
59645 }
59646
59647 #ifdef CONFIG_CIFS_STATS
59648-#define cifs_stats_inc atomic_inc
59649+#define cifs_stats_inc atomic_inc_unchecked
59650
59651 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
59652 unsigned int bytes)
59653@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
59654 /* Various Debug counters */
59655 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
59656 #ifdef CONFIG_CIFS_STATS2
59657-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
59658-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
59659+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
59660+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
59661 #endif
59662 GLOBAL_EXTERN atomic_t smBufAllocCount;
59663 GLOBAL_EXTERN atomic_t midCount;
59664diff --git a/fs/cifs/file.c b/fs/cifs/file.c
59665index 74f1287..7ef0237 100644
59666--- a/fs/cifs/file.c
59667+++ b/fs/cifs/file.c
59668@@ -2060,10 +2060,14 @@ static int cifs_writepages(struct address_space *mapping,
59669 index = mapping->writeback_index; /* Start from prev offset */
59670 end = -1;
59671 } else {
59672- index = wbc->range_start >> PAGE_CACHE_SHIFT;
59673- end = wbc->range_end >> PAGE_CACHE_SHIFT;
59674- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
59675+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
59676 range_whole = true;
59677+ index = 0;
59678+ end = ULONG_MAX;
59679+ } else {
59680+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
59681+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
59682+ }
59683 scanned = true;
59684 }
59685 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
59686diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
59687index 3379463..3af418a 100644
59688--- a/fs/cifs/misc.c
59689+++ b/fs/cifs/misc.c
59690@@ -170,7 +170,7 @@ cifs_buf_get(void)
59691 memset(ret_buf, 0, buf_size + 3);
59692 atomic_inc(&bufAllocCount);
59693 #ifdef CONFIG_CIFS_STATS2
59694- atomic_inc(&totBufAllocCount);
59695+ atomic_inc_unchecked(&totBufAllocCount);
59696 #endif /* CONFIG_CIFS_STATS2 */
59697 }
59698
59699@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
59700 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
59701 atomic_inc(&smBufAllocCount);
59702 #ifdef CONFIG_CIFS_STATS2
59703- atomic_inc(&totSmBufAllocCount);
59704+ atomic_inc_unchecked(&totSmBufAllocCount);
59705 #endif /* CONFIG_CIFS_STATS2 */
59706
59707 }
59708diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
59709index d297903..1cb7516 100644
59710--- a/fs/cifs/smb1ops.c
59711+++ b/fs/cifs/smb1ops.c
59712@@ -622,27 +622,27 @@ static void
59713 cifs_clear_stats(struct cifs_tcon *tcon)
59714 {
59715 #ifdef CONFIG_CIFS_STATS
59716- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
59717- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
59718- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
59719- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59720- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
59721- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
59722- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59723- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
59724- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
59725- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
59726- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
59727- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
59728- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
59729- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
59730- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
59731- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
59732- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
59733- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
59734- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
59735- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
59736- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
59737+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
59738+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
59739+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
59740+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59741+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
59742+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
59743+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59744+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
59745+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
59746+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
59747+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
59748+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
59749+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
59750+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
59751+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
59752+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
59753+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
59754+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
59755+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
59756+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
59757+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
59758 #endif
59759 }
59760
59761@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59762 {
59763 #ifdef CONFIG_CIFS_STATS
59764 seq_printf(m, " Oplocks breaks: %d",
59765- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
59766+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
59767 seq_printf(m, "\nReads: %d Bytes: %llu",
59768- atomic_read(&tcon->stats.cifs_stats.num_reads),
59769+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
59770 (long long)(tcon->bytes_read));
59771 seq_printf(m, "\nWrites: %d Bytes: %llu",
59772- atomic_read(&tcon->stats.cifs_stats.num_writes),
59773+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
59774 (long long)(tcon->bytes_written));
59775 seq_printf(m, "\nFlushes: %d",
59776- atomic_read(&tcon->stats.cifs_stats.num_flushes));
59777+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
59778 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
59779- atomic_read(&tcon->stats.cifs_stats.num_locks),
59780- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
59781- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
59782+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
59783+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
59784+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
59785 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
59786- atomic_read(&tcon->stats.cifs_stats.num_opens),
59787- atomic_read(&tcon->stats.cifs_stats.num_closes),
59788- atomic_read(&tcon->stats.cifs_stats.num_deletes));
59789+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
59790+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
59791+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
59792 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
59793- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
59794- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
59795+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
59796+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
59797 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
59798- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
59799- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
59800+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
59801+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
59802 seq_printf(m, "\nRenames: %d T2 Renames %d",
59803- atomic_read(&tcon->stats.cifs_stats.num_renames),
59804- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
59805+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
59806+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
59807 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
59808- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
59809- atomic_read(&tcon->stats.cifs_stats.num_fnext),
59810- atomic_read(&tcon->stats.cifs_stats.num_fclose));
59811+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
59812+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
59813+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
59814 #endif
59815 }
59816
59817diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59818index 96b5d40..e5db0c1 100644
59819--- a/fs/cifs/smb2ops.c
59820+++ b/fs/cifs/smb2ops.c
59821@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
59822 #ifdef CONFIG_CIFS_STATS
59823 int i;
59824 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
59825- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59826- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59827+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59828+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59829 }
59830 #endif
59831 }
59832@@ -459,65 +459,65 @@ static void
59833 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59834 {
59835 #ifdef CONFIG_CIFS_STATS
59836- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59837- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59838+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59839+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59840 seq_printf(m, "\nNegotiates: %d sent %d failed",
59841- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
59842- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
59843+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
59844+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
59845 seq_printf(m, "\nSessionSetups: %d sent %d failed",
59846- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
59847- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
59848+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
59849+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
59850 seq_printf(m, "\nLogoffs: %d sent %d failed",
59851- atomic_read(&sent[SMB2_LOGOFF_HE]),
59852- atomic_read(&failed[SMB2_LOGOFF_HE]));
59853+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
59854+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
59855 seq_printf(m, "\nTreeConnects: %d sent %d failed",
59856- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
59857- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
59858+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
59859+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
59860 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
59861- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
59862- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
59863+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
59864+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
59865 seq_printf(m, "\nCreates: %d sent %d failed",
59866- atomic_read(&sent[SMB2_CREATE_HE]),
59867- atomic_read(&failed[SMB2_CREATE_HE]));
59868+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
59869+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
59870 seq_printf(m, "\nCloses: %d sent %d failed",
59871- atomic_read(&sent[SMB2_CLOSE_HE]),
59872- atomic_read(&failed[SMB2_CLOSE_HE]));
59873+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
59874+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
59875 seq_printf(m, "\nFlushes: %d sent %d failed",
59876- atomic_read(&sent[SMB2_FLUSH_HE]),
59877- atomic_read(&failed[SMB2_FLUSH_HE]));
59878+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
59879+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
59880 seq_printf(m, "\nReads: %d sent %d failed",
59881- atomic_read(&sent[SMB2_READ_HE]),
59882- atomic_read(&failed[SMB2_READ_HE]));
59883+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
59884+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
59885 seq_printf(m, "\nWrites: %d sent %d failed",
59886- atomic_read(&sent[SMB2_WRITE_HE]),
59887- atomic_read(&failed[SMB2_WRITE_HE]));
59888+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
59889+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
59890 seq_printf(m, "\nLocks: %d sent %d failed",
59891- atomic_read(&sent[SMB2_LOCK_HE]),
59892- atomic_read(&failed[SMB2_LOCK_HE]));
59893+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
59894+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
59895 seq_printf(m, "\nIOCTLs: %d sent %d failed",
59896- atomic_read(&sent[SMB2_IOCTL_HE]),
59897- atomic_read(&failed[SMB2_IOCTL_HE]));
59898+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
59899+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
59900 seq_printf(m, "\nCancels: %d sent %d failed",
59901- atomic_read(&sent[SMB2_CANCEL_HE]),
59902- atomic_read(&failed[SMB2_CANCEL_HE]));
59903+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
59904+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
59905 seq_printf(m, "\nEchos: %d sent %d failed",
59906- atomic_read(&sent[SMB2_ECHO_HE]),
59907- atomic_read(&failed[SMB2_ECHO_HE]));
59908+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
59909+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
59910 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
59911- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
59912- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
59913+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
59914+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
59915 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
59916- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
59917- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
59918+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
59919+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
59920 seq_printf(m, "\nQueryInfos: %d sent %d failed",
59921- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
59922- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
59923+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
59924+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
59925 seq_printf(m, "\nSetInfos: %d sent %d failed",
59926- atomic_read(&sent[SMB2_SET_INFO_HE]),
59927- atomic_read(&failed[SMB2_SET_INFO_HE]));
59928+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
59929+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
59930 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
59931- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
59932- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
59933+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
59934+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
59935 #endif
59936 }
59937
59938diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
59939index 3417340..b942390 100644
59940--- a/fs/cifs/smb2pdu.c
59941+++ b/fs/cifs/smb2pdu.c
59942@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
59943 default:
59944 cifs_dbg(VFS, "info level %u isn't supported\n",
59945 srch_inf->info_level);
59946- rc = -EINVAL;
59947- goto qdir_exit;
59948+ return -EINVAL;
59949 }
59950
59951 req->FileIndex = cpu_to_le32(index);
59952diff --git a/fs/coda/cache.c b/fs/coda/cache.c
59953index 46ee6f2..89a9e7f 100644
59954--- a/fs/coda/cache.c
59955+++ b/fs/coda/cache.c
59956@@ -24,7 +24,7 @@
59957 #include "coda_linux.h"
59958 #include "coda_cache.h"
59959
59960-static atomic_t permission_epoch = ATOMIC_INIT(0);
59961+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
59962
59963 /* replace or extend an acl cache hit */
59964 void coda_cache_enter(struct inode *inode, int mask)
59965@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
59966 struct coda_inode_info *cii = ITOC(inode);
59967
59968 spin_lock(&cii->c_lock);
59969- cii->c_cached_epoch = atomic_read(&permission_epoch);
59970+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
59971 if (!uid_eq(cii->c_uid, current_fsuid())) {
59972 cii->c_uid = current_fsuid();
59973 cii->c_cached_perm = mask;
59974@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
59975 {
59976 struct coda_inode_info *cii = ITOC(inode);
59977 spin_lock(&cii->c_lock);
59978- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
59979+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
59980 spin_unlock(&cii->c_lock);
59981 }
59982
59983 /* remove all acl caches */
59984 void coda_cache_clear_all(struct super_block *sb)
59985 {
59986- atomic_inc(&permission_epoch);
59987+ atomic_inc_unchecked(&permission_epoch);
59988 }
59989
59990
59991@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
59992 spin_lock(&cii->c_lock);
59993 hit = (mask & cii->c_cached_perm) == mask &&
59994 uid_eq(cii->c_uid, current_fsuid()) &&
59995- cii->c_cached_epoch == atomic_read(&permission_epoch);
59996+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
59997 spin_unlock(&cii->c_lock);
59998
59999 return hit;
60000diff --git a/fs/compat.c b/fs/compat.c
60001index 6fd272d..dd34ba2 100644
60002--- a/fs/compat.c
60003+++ b/fs/compat.c
60004@@ -54,7 +54,7 @@
60005 #include <asm/ioctls.h>
60006 #include "internal.h"
60007
60008-int compat_log = 1;
60009+int compat_log = 0;
60010
60011 int compat_printk(const char *fmt, ...)
60012 {
60013@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60014
60015 set_fs(KERNEL_DS);
60016 /* The __user pointer cast is valid because of the set_fs() */
60017- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60018+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60019 set_fs(oldfs);
60020 /* truncating is ok because it's a user address */
60021 if (!ret)
60022@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60023 goto out;
60024
60025 ret = -EINVAL;
60026- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60027+ if (nr_segs > UIO_MAXIOV)
60028 goto out;
60029 if (nr_segs > fast_segs) {
60030 ret = -ENOMEM;
60031@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60032 struct compat_readdir_callback {
60033 struct dir_context ctx;
60034 struct compat_old_linux_dirent __user *dirent;
60035+ struct file * file;
60036 int result;
60037 };
60038
60039@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60040 buf->result = -EOVERFLOW;
60041 return -EOVERFLOW;
60042 }
60043+
60044+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60045+ return 0;
60046+
60047 buf->result++;
60048 dirent = buf->dirent;
60049 if (!access_ok(VERIFY_WRITE, dirent,
60050@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60051 if (!f.file)
60052 return -EBADF;
60053
60054+ buf.file = f.file;
60055 error = iterate_dir(f.file, &buf.ctx);
60056 if (buf.result)
60057 error = buf.result;
60058@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60059 struct dir_context ctx;
60060 struct compat_linux_dirent __user *current_dir;
60061 struct compat_linux_dirent __user *previous;
60062+ struct file * file;
60063 int count;
60064 int error;
60065 };
60066@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60067 buf->error = -EOVERFLOW;
60068 return -EOVERFLOW;
60069 }
60070+
60071+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60072+ return 0;
60073+
60074 dirent = buf->previous;
60075 if (dirent) {
60076 if (__put_user(offset, &dirent->d_off))
60077@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60078 if (!f.file)
60079 return -EBADF;
60080
60081+ buf.file = f.file;
60082 error = iterate_dir(f.file, &buf.ctx);
60083 if (error >= 0)
60084 error = buf.error;
60085@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60086 struct dir_context ctx;
60087 struct linux_dirent64 __user *current_dir;
60088 struct linux_dirent64 __user *previous;
60089+ struct file * file;
60090 int count;
60091 int error;
60092 };
60093@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60094 buf->error = -EINVAL; /* only used if we fail.. */
60095 if (reclen > buf->count)
60096 return -EINVAL;
60097+
60098+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60099+ return 0;
60100+
60101 dirent = buf->previous;
60102
60103 if (dirent) {
60104@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60105 if (!f.file)
60106 return -EBADF;
60107
60108+ buf.file = f.file;
60109 error = iterate_dir(f.file, &buf.ctx);
60110 if (error >= 0)
60111 error = buf.error;
60112diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60113index 4d24d17..4f8c09e 100644
60114--- a/fs/compat_binfmt_elf.c
60115+++ b/fs/compat_binfmt_elf.c
60116@@ -30,11 +30,13 @@
60117 #undef elf_phdr
60118 #undef elf_shdr
60119 #undef elf_note
60120+#undef elf_dyn
60121 #undef elf_addr_t
60122 #define elfhdr elf32_hdr
60123 #define elf_phdr elf32_phdr
60124 #define elf_shdr elf32_shdr
60125 #define elf_note elf32_note
60126+#define elf_dyn Elf32_Dyn
60127 #define elf_addr_t Elf32_Addr
60128
60129 /*
60130diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60131index afec645..9c65620 100644
60132--- a/fs/compat_ioctl.c
60133+++ b/fs/compat_ioctl.c
60134@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60135 return -EFAULT;
60136 if (__get_user(udata, &ss32->iomem_base))
60137 return -EFAULT;
60138- ss.iomem_base = compat_ptr(udata);
60139+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60140 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60141 __get_user(ss.port_high, &ss32->port_high))
60142 return -EFAULT;
60143@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60144 for (i = 0; i < nmsgs; i++) {
60145 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60146 return -EFAULT;
60147- if (get_user(datap, &umsgs[i].buf) ||
60148- put_user(compat_ptr(datap), &tmsgs[i].buf))
60149+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60150+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60151 return -EFAULT;
60152 }
60153 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60154@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60155 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60156 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60157 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60158- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60159+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60160 return -EFAULT;
60161
60162 return ioctl_preallocate(file, p);
60163@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60164 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60165 {
60166 unsigned int a, b;
60167- a = *(unsigned int *)p;
60168- b = *(unsigned int *)q;
60169+ a = *(const unsigned int *)p;
60170+ b = *(const unsigned int *)q;
60171 if (a > b)
60172 return 1;
60173 if (a < b)
60174diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60175index c9c298b..544d100 100644
60176--- a/fs/configfs/dir.c
60177+++ b/fs/configfs/dir.c
60178@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60179 }
60180 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60181 struct configfs_dirent *next;
60182- const char *name;
60183+ const unsigned char * name;
60184+ char d_name[sizeof(next->s_dentry->d_iname)];
60185 int len;
60186 struct inode *inode = NULL;
60187
60188@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60189 continue;
60190
60191 name = configfs_get_name(next);
60192- len = strlen(name);
60193+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60194+ len = next->s_dentry->d_name.len;
60195+ memcpy(d_name, name, len);
60196+ name = d_name;
60197+ } else
60198+ len = strlen(name);
60199
60200 /*
60201 * We'll have a dentry and an inode for
60202diff --git a/fs/coredump.c b/fs/coredump.c
60203index b5c86ff..0dac262 100644
60204--- a/fs/coredump.c
60205+++ b/fs/coredump.c
60206@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60207 struct pipe_inode_info *pipe = file->private_data;
60208
60209 pipe_lock(pipe);
60210- pipe->readers++;
60211- pipe->writers--;
60212+ atomic_inc(&pipe->readers);
60213+ atomic_dec(&pipe->writers);
60214 wake_up_interruptible_sync(&pipe->wait);
60215 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60216 pipe_unlock(pipe);
60217@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60218 * We actually want wait_event_freezable() but then we need
60219 * to clear TIF_SIGPENDING and improve dump_interrupted().
60220 */
60221- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60222+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60223
60224 pipe_lock(pipe);
60225- pipe->readers--;
60226- pipe->writers++;
60227+ atomic_dec(&pipe->readers);
60228+ atomic_inc(&pipe->writers);
60229 pipe_unlock(pipe);
60230 }
60231
60232@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60233 struct files_struct *displaced;
60234 bool need_nonrelative = false;
60235 bool core_dumped = false;
60236- static atomic_t core_dump_count = ATOMIC_INIT(0);
60237+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60238+ long signr = siginfo->si_signo;
60239+ int dumpable;
60240 struct coredump_params cprm = {
60241 .siginfo = siginfo,
60242 .regs = signal_pt_regs(),
60243@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60244 .mm_flags = mm->flags,
60245 };
60246
60247- audit_core_dumps(siginfo->si_signo);
60248+ audit_core_dumps(signr);
60249+
60250+ dumpable = __get_dumpable(cprm.mm_flags);
60251+
60252+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60253+ gr_handle_brute_attach(dumpable);
60254
60255 binfmt = mm->binfmt;
60256 if (!binfmt || !binfmt->core_dump)
60257 goto fail;
60258- if (!__get_dumpable(cprm.mm_flags))
60259+ if (!dumpable)
60260 goto fail;
60261
60262 cred = prepare_creds();
60263@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60264 need_nonrelative = true;
60265 }
60266
60267- retval = coredump_wait(siginfo->si_signo, &core_state);
60268+ retval = coredump_wait(signr, &core_state);
60269 if (retval < 0)
60270 goto fail_creds;
60271
60272@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60273 }
60274 cprm.limit = RLIM_INFINITY;
60275
60276- dump_count = atomic_inc_return(&core_dump_count);
60277+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60278 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60279 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60280 task_tgid_vnr(current), current->comm);
60281@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60282 } else {
60283 struct inode *inode;
60284
60285+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60286+
60287 if (cprm.limit < binfmt->min_coredump)
60288 goto fail_unlock;
60289
60290@@ -681,7 +690,7 @@ close_fail:
60291 filp_close(cprm.file, NULL);
60292 fail_dropcount:
60293 if (ispipe)
60294- atomic_dec(&core_dump_count);
60295+ atomic_dec_unchecked(&core_dump_count);
60296 fail_unlock:
60297 kfree(cn.corename);
60298 coredump_finish(mm, core_dumped);
60299@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60300 struct file *file = cprm->file;
60301 loff_t pos = file->f_pos;
60302 ssize_t n;
60303+
60304+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60305 if (cprm->written + nr > cprm->limit)
60306 return 0;
60307 while (nr) {
60308diff --git a/fs/dcache.c b/fs/dcache.c
60309index e368d4f..b40ba59 100644
60310--- a/fs/dcache.c
60311+++ b/fs/dcache.c
60312@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
60313 * dentry_iput drops the locks, at which point nobody (except
60314 * transient RCU lookups) can reach this dentry.
60315 */
60316- BUG_ON((int)dentry->d_lockref.count > 0);
60317+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60318 this_cpu_dec(nr_dentry);
60319 if (dentry->d_op && dentry->d_op->d_release)
60320 dentry->d_op->d_release(dentry);
60321@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60322 struct dentry *parent = dentry->d_parent;
60323 if (IS_ROOT(dentry))
60324 return NULL;
60325- if (unlikely((int)dentry->d_lockref.count < 0))
60326+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60327 return NULL;
60328 if (likely(spin_trylock(&parent->d_lock)))
60329 return parent;
60330@@ -638,7 +638,7 @@ repeat:
60331 dentry->d_flags |= DCACHE_REFERENCED;
60332 dentry_lru_add(dentry);
60333
60334- dentry->d_lockref.count--;
60335+ __lockref_dec(&dentry->d_lockref);
60336 spin_unlock(&dentry->d_lock);
60337 return;
60338
60339@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
60340 /* This must be called with d_lock held */
60341 static inline void __dget_dlock(struct dentry *dentry)
60342 {
60343- dentry->d_lockref.count++;
60344+ __lockref_inc(&dentry->d_lockref);
60345 }
60346
60347 static inline void __dget(struct dentry *dentry)
60348@@ -694,8 +694,8 @@ repeat:
60349 goto repeat;
60350 }
60351 rcu_read_unlock();
60352- BUG_ON(!ret->d_lockref.count);
60353- ret->d_lockref.count++;
60354+ BUG_ON(!__lockref_read(&ret->d_lockref));
60355+ __lockref_inc(&ret->d_lockref);
60356 spin_unlock(&ret->d_lock);
60357 return ret;
60358 }
60359@@ -773,9 +773,9 @@ restart:
60360 spin_lock(&inode->i_lock);
60361 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60362 spin_lock(&dentry->d_lock);
60363- if (!dentry->d_lockref.count) {
60364+ if (!__lockref_read(&dentry->d_lockref)) {
60365 struct dentry *parent = lock_parent(dentry);
60366- if (likely(!dentry->d_lockref.count)) {
60367+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60368 __dentry_kill(dentry);
60369 dput(parent);
60370 goto restart;
60371@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
60372 * We found an inuse dentry which was not removed from
60373 * the LRU because of laziness during lookup. Do not free it.
60374 */
60375- if ((int)dentry->d_lockref.count > 0) {
60376+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60377 spin_unlock(&dentry->d_lock);
60378 if (parent)
60379 spin_unlock(&parent->d_lock);
60380@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
60381 dentry = parent;
60382 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60383 parent = lock_parent(dentry);
60384- if (dentry->d_lockref.count != 1) {
60385- dentry->d_lockref.count--;
60386+ if (__lockref_read(&dentry->d_lockref) != 1) {
60387+ __lockref_inc(&dentry->d_lockref);
60388 spin_unlock(&dentry->d_lock);
60389 if (parent)
60390 spin_unlock(&parent->d_lock);
60391@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60392 * counts, just remove them from the LRU. Otherwise give them
60393 * another pass through the LRU.
60394 */
60395- if (dentry->d_lockref.count) {
60396+ if (__lockref_read(&dentry->d_lockref) > 0) {
60397 d_lru_isolate(dentry);
60398 spin_unlock(&dentry->d_lock);
60399 return LRU_REMOVED;
60400@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60401 } else {
60402 if (dentry->d_flags & DCACHE_LRU_LIST)
60403 d_lru_del(dentry);
60404- if (!dentry->d_lockref.count) {
60405+ if (!__lockref_read(&dentry->d_lockref)) {
60406 d_shrink_add(dentry, &data->dispose);
60407 data->found++;
60408 }
60409@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60410 return D_WALK_CONTINUE;
60411
60412 /* root with refcount 1 is fine */
60413- if (dentry == _data && dentry->d_lockref.count == 1)
60414+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60415 return D_WALK_CONTINUE;
60416
60417 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60418@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60419 dentry->d_inode ?
60420 dentry->d_inode->i_ino : 0UL,
60421 dentry,
60422- dentry->d_lockref.count,
60423+ __lockref_read(&dentry->d_lockref),
60424 dentry->d_sb->s_type->name,
60425 dentry->d_sb->s_id);
60426 WARN_ON(1);
60427@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60428 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60429 if (name->len > DNAME_INLINE_LEN-1) {
60430 size_t size = offsetof(struct external_name, name[1]);
60431- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60432+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60433 if (!p) {
60434 kmem_cache_free(dentry_cache, dentry);
60435 return NULL;
60436@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60437 smp_wmb();
60438 dentry->d_name.name = dname;
60439
60440- dentry->d_lockref.count = 1;
60441+ __lockref_set(&dentry->d_lockref, 1);
60442 dentry->d_flags = 0;
60443 spin_lock_init(&dentry->d_lock);
60444 seqcount_init(&dentry->d_seq);
60445@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60446 dentry->d_sb = sb;
60447 dentry->d_op = NULL;
60448 dentry->d_fsdata = NULL;
60449+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60450+ atomic_set(&dentry->chroot_refcnt, 0);
60451+#endif
60452 INIT_HLIST_BL_NODE(&dentry->d_hash);
60453 INIT_LIST_HEAD(&dentry->d_lru);
60454 INIT_LIST_HEAD(&dentry->d_subdirs);
60455@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60456 goto next;
60457 }
60458
60459- dentry->d_lockref.count++;
60460+ __lockref_inc(&dentry->d_lockref);
60461 found = dentry;
60462 spin_unlock(&dentry->d_lock);
60463 break;
60464@@ -2250,7 +2253,7 @@ again:
60465 spin_lock(&dentry->d_lock);
60466 inode = dentry->d_inode;
60467 isdir = S_ISDIR(inode->i_mode);
60468- if (dentry->d_lockref.count == 1) {
60469+ if (__lockref_read(&dentry->d_lockref) == 1) {
60470 if (!spin_trylock(&inode->i_lock)) {
60471 spin_unlock(&dentry->d_lock);
60472 cpu_relax();
60473@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60474
60475 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60476 dentry->d_flags |= DCACHE_GENOCIDE;
60477- dentry->d_lockref.count--;
60478+ __lockref_dec(&dentry->d_lockref);
60479 }
60480 }
60481 return D_WALK_CONTINUE;
60482@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
60483 mempages -= reserve;
60484
60485 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60486- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60487+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60488+ SLAB_NO_SANITIZE, NULL);
60489
60490 dcache_init();
60491 inode_init();
60492diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60493index 05f2960..780f4f8 100644
60494--- a/fs/debugfs/inode.c
60495+++ b/fs/debugfs/inode.c
60496@@ -246,10 +246,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
60497 return 0;
60498 }
60499
60500+static void debugfs_evict_inode(struct inode *inode)
60501+{
60502+ truncate_inode_pages_final(&inode->i_data);
60503+ clear_inode(inode);
60504+ if (S_ISLNK(inode->i_mode))
60505+ kfree(inode->i_private);
60506+}
60507+
60508 static const struct super_operations debugfs_super_operations = {
60509 .statfs = simple_statfs,
60510 .remount_fs = debugfs_remount,
60511 .show_options = debugfs_show_options,
60512+ .evict_inode = debugfs_evict_inode,
60513 };
60514
60515 static int debug_fill_super(struct super_block *sb, void *data, int silent)
60516@@ -416,7 +425,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60517 */
60518 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60519 {
60520+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60521+ return __create_file(name, S_IFDIR | S_IRWXU,
60522+#else
60523 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60524+#endif
60525 parent, NULL, NULL);
60526 }
60527 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60528@@ -466,23 +479,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
60529 int ret = 0;
60530
60531 if (debugfs_positive(dentry)) {
60532- if (dentry->d_inode) {
60533- dget(dentry);
60534- switch (dentry->d_inode->i_mode & S_IFMT) {
60535- case S_IFDIR:
60536- ret = simple_rmdir(parent->d_inode, dentry);
60537- break;
60538- case S_IFLNK:
60539- kfree(dentry->d_inode->i_private);
60540- /* fall through */
60541- default:
60542- simple_unlink(parent->d_inode, dentry);
60543- break;
60544- }
60545- if (!ret)
60546- d_delete(dentry);
60547- dput(dentry);
60548- }
60549+ dget(dentry);
60550+ if (S_ISDIR(dentry->d_inode->i_mode))
60551+ ret = simple_rmdir(parent->d_inode, dentry);
60552+ else
60553+ simple_unlink(parent->d_inode, dentry);
60554+ if (!ret)
60555+ d_delete(dentry);
60556+ dput(dentry);
60557 }
60558 return ret;
60559 }
60560diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60561index 1686dc2..9611c50 100644
60562--- a/fs/ecryptfs/inode.c
60563+++ b/fs/ecryptfs/inode.c
60564@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60565 old_fs = get_fs();
60566 set_fs(get_ds());
60567 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60568- (char __user *)lower_buf,
60569+ (char __force_user *)lower_buf,
60570 PATH_MAX);
60571 set_fs(old_fs);
60572 if (rc < 0)
60573diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60574index e4141f2..d8263e8 100644
60575--- a/fs/ecryptfs/miscdev.c
60576+++ b/fs/ecryptfs/miscdev.c
60577@@ -304,7 +304,7 @@ check_list:
60578 goto out_unlock_msg_ctx;
60579 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60580 if (msg_ctx->msg) {
60581- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60582+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60583 goto out_unlock_msg_ctx;
60584 i += packet_length_size;
60585 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60586diff --git a/fs/exec.c b/fs/exec.c
60587index ad8798e..5f872c9 100644
60588--- a/fs/exec.c
60589+++ b/fs/exec.c
60590@@ -56,8 +56,20 @@
60591 #include <linux/pipe_fs_i.h>
60592 #include <linux/oom.h>
60593 #include <linux/compat.h>
60594+#include <linux/random.h>
60595+#include <linux/seq_file.h>
60596+#include <linux/coredump.h>
60597+#include <linux/mman.h>
60598+
60599+#ifdef CONFIG_PAX_REFCOUNT
60600+#include <linux/kallsyms.h>
60601+#include <linux/kdebug.h>
60602+#endif
60603+
60604+#include <trace/events/fs.h>
60605
60606 #include <asm/uaccess.h>
60607+#include <asm/sections.h>
60608 #include <asm/mmu_context.h>
60609 #include <asm/tlb.h>
60610
60611@@ -66,19 +78,34 @@
60612
60613 #include <trace/events/sched.h>
60614
60615+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60616+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60617+{
60618+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
60619+}
60620+#endif
60621+
60622+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
60623+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60624+EXPORT_SYMBOL(pax_set_initial_flags_func);
60625+#endif
60626+
60627 int suid_dumpable = 0;
60628
60629 static LIST_HEAD(formats);
60630 static DEFINE_RWLOCK(binfmt_lock);
60631
60632+extern int gr_process_kernel_exec_ban(void);
60633+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
60634+
60635 void __register_binfmt(struct linux_binfmt * fmt, int insert)
60636 {
60637 BUG_ON(!fmt);
60638 if (WARN_ON(!fmt->load_binary))
60639 return;
60640 write_lock(&binfmt_lock);
60641- insert ? list_add(&fmt->lh, &formats) :
60642- list_add_tail(&fmt->lh, &formats);
60643+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
60644+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
60645 write_unlock(&binfmt_lock);
60646 }
60647
60648@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
60649 void unregister_binfmt(struct linux_binfmt * fmt)
60650 {
60651 write_lock(&binfmt_lock);
60652- list_del(&fmt->lh);
60653+ pax_list_del((struct list_head *)&fmt->lh);
60654 write_unlock(&binfmt_lock);
60655 }
60656
60657@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60658 int write)
60659 {
60660 struct page *page;
60661- int ret;
60662
60663-#ifdef CONFIG_STACK_GROWSUP
60664- if (write) {
60665- ret = expand_downwards(bprm->vma, pos);
60666- if (ret < 0)
60667- return NULL;
60668- }
60669-#endif
60670- ret = get_user_pages(current, bprm->mm, pos,
60671- 1, write, 1, &page, NULL);
60672- if (ret <= 0)
60673+ if (0 > expand_downwards(bprm->vma, pos))
60674+ return NULL;
60675+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
60676 return NULL;
60677
60678 if (write) {
60679@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60680 if (size <= ARG_MAX)
60681 return page;
60682
60683+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60684+ // only allow 512KB for argv+env on suid/sgid binaries
60685+ // to prevent easy ASLR exhaustion
60686+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
60687+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
60688+ (size > (512 * 1024))) {
60689+ put_page(page);
60690+ return NULL;
60691+ }
60692+#endif
60693+
60694 /*
60695 * Limit to 1/4-th the stack size for the argv+env strings.
60696 * This ensures that:
60697@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60698 vma->vm_end = STACK_TOP_MAX;
60699 vma->vm_start = vma->vm_end - PAGE_SIZE;
60700 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
60701+
60702+#ifdef CONFIG_PAX_SEGMEXEC
60703+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
60704+#endif
60705+
60706 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
60707 INIT_LIST_HEAD(&vma->anon_vma_chain);
60708
60709@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60710 arch_bprm_mm_init(mm, vma);
60711 up_write(&mm->mmap_sem);
60712 bprm->p = vma->vm_end - sizeof(void *);
60713+
60714+#ifdef CONFIG_PAX_RANDUSTACK
60715+ if (randomize_va_space)
60716+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
60717+#endif
60718+
60719 return 0;
60720 err:
60721 up_write(&mm->mmap_sem);
60722@@ -396,7 +437,7 @@ struct user_arg_ptr {
60723 } ptr;
60724 };
60725
60726-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60727+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60728 {
60729 const char __user *native;
60730
60731@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60732 compat_uptr_t compat;
60733
60734 if (get_user(compat, argv.ptr.compat + nr))
60735- return ERR_PTR(-EFAULT);
60736+ return (const char __force_user *)ERR_PTR(-EFAULT);
60737
60738 return compat_ptr(compat);
60739 }
60740 #endif
60741
60742 if (get_user(native, argv.ptr.native + nr))
60743- return ERR_PTR(-EFAULT);
60744+ return (const char __force_user *)ERR_PTR(-EFAULT);
60745
60746 return native;
60747 }
60748@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
60749 if (!p)
60750 break;
60751
60752- if (IS_ERR(p))
60753+ if (IS_ERR((const char __force_kernel *)p))
60754 return -EFAULT;
60755
60756 if (i >= max)
60757@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
60758
60759 ret = -EFAULT;
60760 str = get_user_arg_ptr(argv, argc);
60761- if (IS_ERR(str))
60762+ if (IS_ERR((const char __force_kernel *)str))
60763 goto out;
60764
60765 len = strnlen_user(str, MAX_ARG_STRLEN);
60766@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
60767 int r;
60768 mm_segment_t oldfs = get_fs();
60769 struct user_arg_ptr argv = {
60770- .ptr.native = (const char __user *const __user *)__argv,
60771+ .ptr.native = (const char __user * const __force_user *)__argv,
60772 };
60773
60774 set_fs(KERNEL_DS);
60775@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60776 unsigned long new_end = old_end - shift;
60777 struct mmu_gather tlb;
60778
60779- BUG_ON(new_start > new_end);
60780+ if (new_start >= new_end || new_start < mmap_min_addr)
60781+ return -ENOMEM;
60782
60783 /*
60784 * ensure there are no vmas between where we want to go
60785@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60786 if (vma != find_vma(mm, new_start))
60787 return -EFAULT;
60788
60789+#ifdef CONFIG_PAX_SEGMEXEC
60790+ BUG_ON(pax_find_mirror_vma(vma));
60791+#endif
60792+
60793 /*
60794 * cover the whole range: [new_start, old_end)
60795 */
60796@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60797 stack_top = arch_align_stack(stack_top);
60798 stack_top = PAGE_ALIGN(stack_top);
60799
60800- if (unlikely(stack_top < mmap_min_addr) ||
60801- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
60802- return -ENOMEM;
60803-
60804 stack_shift = vma->vm_end - stack_top;
60805
60806 bprm->p -= stack_shift;
60807@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
60808 bprm->exec -= stack_shift;
60809
60810 down_write(&mm->mmap_sem);
60811+
60812+ /* Move stack pages down in memory. */
60813+ if (stack_shift) {
60814+ ret = shift_arg_pages(vma, stack_shift);
60815+ if (ret)
60816+ goto out_unlock;
60817+ }
60818+
60819 vm_flags = VM_STACK_FLAGS;
60820
60821+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60822+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60823+ vm_flags &= ~VM_EXEC;
60824+
60825+#ifdef CONFIG_PAX_MPROTECT
60826+ if (mm->pax_flags & MF_PAX_MPROTECT)
60827+ vm_flags &= ~VM_MAYEXEC;
60828+#endif
60829+
60830+ }
60831+#endif
60832+
60833 /*
60834 * Adjust stack execute permissions; explicitly enable for
60835 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
60836@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60837 goto out_unlock;
60838 BUG_ON(prev != vma);
60839
60840- /* Move stack pages down in memory. */
60841- if (stack_shift) {
60842- ret = shift_arg_pages(vma, stack_shift);
60843- if (ret)
60844- goto out_unlock;
60845- }
60846-
60847 /* mprotect_fixup is overkill to remove the temporary stack flags */
60848 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
60849
60850@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
60851 #endif
60852 current->mm->start_stack = bprm->p;
60853 ret = expand_stack(vma, stack_base);
60854+
60855+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
60856+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
60857+ unsigned long size;
60858+ vm_flags_t vm_flags;
60859+
60860+ size = STACK_TOP - vma->vm_end;
60861+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
60862+
60863+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
60864+
60865+#ifdef CONFIG_X86
60866+ if (!ret) {
60867+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
60868+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
60869+ }
60870+#endif
60871+
60872+ }
60873+#endif
60874+
60875 if (ret)
60876 ret = -EFAULT;
60877
60878@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
60879 if (err)
60880 goto exit;
60881
60882- if (name->name[0] != '\0')
60883+ if (name->name[0] != '\0') {
60884 fsnotify_open(file);
60885+ trace_open_exec(name->name);
60886+ }
60887
60888 out:
60889 return file;
60890@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
60891 old_fs = get_fs();
60892 set_fs(get_ds());
60893 /* The cast to a user pointer is valid due to the set_fs() */
60894- result = vfs_read(file, (void __user *)addr, count, &pos);
60895+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
60896 set_fs(old_fs);
60897 return result;
60898 }
60899@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
60900 tsk->mm = mm;
60901 tsk->active_mm = mm;
60902 activate_mm(active_mm, mm);
60903+ populate_stack();
60904 tsk->mm->vmacache_seqnum = 0;
60905 vmacache_flush(tsk);
60906 task_unlock(tsk);
60907@@ -1252,7 +1331,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
60908 }
60909 rcu_read_unlock();
60910
60911- if (p->fs->users > n_fs)
60912+ if (atomic_read(&p->fs->users) > n_fs)
60913 bprm->unsafe |= LSM_UNSAFE_SHARE;
60914 else
60915 p->fs->in_exec = 1;
60916@@ -1433,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
60917 return ret;
60918 }
60919
60920+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60921+static DEFINE_PER_CPU(u64, exec_counter);
60922+static int __init init_exec_counters(void)
60923+{
60924+ unsigned int cpu;
60925+
60926+ for_each_possible_cpu(cpu) {
60927+ per_cpu(exec_counter, cpu) = (u64)cpu;
60928+ }
60929+
60930+ return 0;
60931+}
60932+early_initcall(init_exec_counters);
60933+static inline void increment_exec_counter(void)
60934+{
60935+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
60936+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
60937+}
60938+#else
60939+static inline void increment_exec_counter(void) {}
60940+#endif
60941+
60942+extern void gr_handle_exec_args(struct linux_binprm *bprm,
60943+ struct user_arg_ptr argv);
60944+
60945 /*
60946 * sys_execve() executes a new program.
60947 */
60948@@ -1441,6 +1545,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60949 struct user_arg_ptr envp,
60950 int flags)
60951 {
60952+#ifdef CONFIG_GRKERNSEC
60953+ struct file *old_exec_file;
60954+ struct acl_subject_label *old_acl;
60955+ struct rlimit old_rlim[RLIM_NLIMITS];
60956+#endif
60957 char *pathbuf = NULL;
60958 struct linux_binprm *bprm;
60959 struct file *file;
60960@@ -1450,6 +1559,8 @@ static int do_execveat_common(int fd, struct filename *filename,
60961 if (IS_ERR(filename))
60962 return PTR_ERR(filename);
60963
60964+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
60965+
60966 /*
60967 * We move the actual failure in case of RLIMIT_NPROC excess from
60968 * set*uid() to execve() because too many poorly written programs
60969@@ -1487,6 +1598,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60970 if (IS_ERR(file))
60971 goto out_unmark;
60972
60973+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
60974+ retval = -EPERM;
60975+ goto out_unmark;
60976+ }
60977+
60978 sched_exec();
60979
60980 bprm->file = file;
60981@@ -1513,6 +1629,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60982 }
60983 bprm->interp = bprm->filename;
60984
60985+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
60986+ retval = -EACCES;
60987+ goto out_unmark;
60988+ }
60989+
60990 retval = bprm_mm_init(bprm);
60991 if (retval)
60992 goto out_unmark;
60993@@ -1529,24 +1650,70 @@ static int do_execveat_common(int fd, struct filename *filename,
60994 if (retval < 0)
60995 goto out;
60996
60997+#ifdef CONFIG_GRKERNSEC
60998+ old_acl = current->acl;
60999+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61000+ old_exec_file = current->exec_file;
61001+ get_file(file);
61002+ current->exec_file = file;
61003+#endif
61004+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61005+ /* limit suid stack to 8MB
61006+ * we saved the old limits above and will restore them if this exec fails
61007+ */
61008+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61009+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61010+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61011+#endif
61012+
61013+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61014+ retval = -EPERM;
61015+ goto out_fail;
61016+ }
61017+
61018+ if (!gr_tpe_allow(file)) {
61019+ retval = -EACCES;
61020+ goto out_fail;
61021+ }
61022+
61023+ if (gr_check_crash_exec(file)) {
61024+ retval = -EACCES;
61025+ goto out_fail;
61026+ }
61027+
61028+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61029+ bprm->unsafe);
61030+ if (retval < 0)
61031+ goto out_fail;
61032+
61033 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61034 if (retval < 0)
61035- goto out;
61036+ goto out_fail;
61037
61038 bprm->exec = bprm->p;
61039 retval = copy_strings(bprm->envc, envp, bprm);
61040 if (retval < 0)
61041- goto out;
61042+ goto out_fail;
61043
61044 retval = copy_strings(bprm->argc, argv, bprm);
61045 if (retval < 0)
61046- goto out;
61047+ goto out_fail;
61048+
61049+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61050+
61051+ gr_handle_exec_args(bprm, argv);
61052
61053 retval = exec_binprm(bprm);
61054 if (retval < 0)
61055- goto out;
61056+ goto out_fail;
61057+#ifdef CONFIG_GRKERNSEC
61058+ if (old_exec_file)
61059+ fput(old_exec_file);
61060+#endif
61061
61062 /* execve succeeded */
61063+
61064+ increment_exec_counter();
61065 current->fs->in_exec = 0;
61066 current->in_execve = 0;
61067 acct_update_integrals(current);
61068@@ -1558,6 +1725,14 @@ static int do_execveat_common(int fd, struct filename *filename,
61069 put_files_struct(displaced);
61070 return retval;
61071
61072+out_fail:
61073+#ifdef CONFIG_GRKERNSEC
61074+ current->acl = old_acl;
61075+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61076+ fput(current->exec_file);
61077+ current->exec_file = old_exec_file;
61078+#endif
61079+
61080 out:
61081 if (bprm->mm) {
61082 acct_arg_size(bprm, 0);
61083@@ -1704,3 +1879,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61084 argv, envp, flags);
61085 }
61086 #endif
61087+
61088+int pax_check_flags(unsigned long *flags)
61089+{
61090+ int retval = 0;
61091+
61092+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61093+ if (*flags & MF_PAX_SEGMEXEC)
61094+ {
61095+ *flags &= ~MF_PAX_SEGMEXEC;
61096+ retval = -EINVAL;
61097+ }
61098+#endif
61099+
61100+ if ((*flags & MF_PAX_PAGEEXEC)
61101+
61102+#ifdef CONFIG_PAX_PAGEEXEC
61103+ && (*flags & MF_PAX_SEGMEXEC)
61104+#endif
61105+
61106+ )
61107+ {
61108+ *flags &= ~MF_PAX_PAGEEXEC;
61109+ retval = -EINVAL;
61110+ }
61111+
61112+ if ((*flags & MF_PAX_MPROTECT)
61113+
61114+#ifdef CONFIG_PAX_MPROTECT
61115+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61116+#endif
61117+
61118+ )
61119+ {
61120+ *flags &= ~MF_PAX_MPROTECT;
61121+ retval = -EINVAL;
61122+ }
61123+
61124+ if ((*flags & MF_PAX_EMUTRAMP)
61125+
61126+#ifdef CONFIG_PAX_EMUTRAMP
61127+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61128+#endif
61129+
61130+ )
61131+ {
61132+ *flags &= ~MF_PAX_EMUTRAMP;
61133+ retval = -EINVAL;
61134+ }
61135+
61136+ return retval;
61137+}
61138+
61139+EXPORT_SYMBOL(pax_check_flags);
61140+
61141+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61142+char *pax_get_path(const struct path *path, char *buf, int buflen)
61143+{
61144+ char *pathname = d_path(path, buf, buflen);
61145+
61146+ if (IS_ERR(pathname))
61147+ goto toolong;
61148+
61149+ pathname = mangle_path(buf, pathname, "\t\n\\");
61150+ if (!pathname)
61151+ goto toolong;
61152+
61153+ *pathname = 0;
61154+ return buf;
61155+
61156+toolong:
61157+ return "<path too long>";
61158+}
61159+EXPORT_SYMBOL(pax_get_path);
61160+
61161+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61162+{
61163+ struct task_struct *tsk = current;
61164+ struct mm_struct *mm = current->mm;
61165+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61166+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61167+ char *path_exec = NULL;
61168+ char *path_fault = NULL;
61169+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61170+ siginfo_t info = { };
61171+
61172+ if (buffer_exec && buffer_fault) {
61173+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61174+
61175+ down_read(&mm->mmap_sem);
61176+ vma = mm->mmap;
61177+ while (vma && (!vma_exec || !vma_fault)) {
61178+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61179+ vma_exec = vma;
61180+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61181+ vma_fault = vma;
61182+ vma = vma->vm_next;
61183+ }
61184+ if (vma_exec)
61185+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61186+ if (vma_fault) {
61187+ start = vma_fault->vm_start;
61188+ end = vma_fault->vm_end;
61189+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61190+ if (vma_fault->vm_file)
61191+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61192+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61193+ path_fault = "<heap>";
61194+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61195+ path_fault = "<stack>";
61196+ else
61197+ path_fault = "<anonymous mapping>";
61198+ }
61199+ up_read(&mm->mmap_sem);
61200+ }
61201+ if (tsk->signal->curr_ip)
61202+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61203+ else
61204+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61205+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61206+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61207+ free_page((unsigned long)buffer_exec);
61208+ free_page((unsigned long)buffer_fault);
61209+ pax_report_insns(regs, pc, sp);
61210+ info.si_signo = SIGKILL;
61211+ info.si_errno = 0;
61212+ info.si_code = SI_KERNEL;
61213+ info.si_pid = 0;
61214+ info.si_uid = 0;
61215+ do_coredump(&info);
61216+}
61217+#endif
61218+
61219+#ifdef CONFIG_PAX_REFCOUNT
61220+void pax_report_refcount_overflow(struct pt_regs *regs)
61221+{
61222+ if (current->signal->curr_ip)
61223+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61224+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61225+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61226+ else
61227+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61228+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61229+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61230+ preempt_disable();
61231+ show_regs(regs);
61232+ preempt_enable();
61233+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61234+}
61235+#endif
61236+
61237+#ifdef CONFIG_PAX_USERCOPY
61238+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61239+static noinline int check_stack_object(const void *obj, unsigned long len)
61240+{
61241+ const void * const stack = task_stack_page(current);
61242+ const void * const stackend = stack + THREAD_SIZE;
61243+
61244+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61245+ const void *frame = NULL;
61246+ const void *oldframe;
61247+#endif
61248+
61249+ if (obj + len < obj)
61250+ return -1;
61251+
61252+ if (obj + len <= stack || stackend <= obj)
61253+ return 0;
61254+
61255+ if (obj < stack || stackend < obj + len)
61256+ return -1;
61257+
61258+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61259+ oldframe = __builtin_frame_address(1);
61260+ if (oldframe)
61261+ frame = __builtin_frame_address(2);
61262+ /*
61263+ low ----------------------------------------------> high
61264+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61265+ ^----------------^
61266+ allow copies only within here
61267+ */
61268+ while (stack <= frame && frame < stackend) {
61269+ /* if obj + len extends past the last frame, this
61270+ check won't pass and the next frame will be 0,
61271+ causing us to bail out and correctly report
61272+ the copy as invalid
61273+ */
61274+ if (obj + len <= frame)
61275+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61276+ oldframe = frame;
61277+ frame = *(const void * const *)frame;
61278+ }
61279+ return -1;
61280+#else
61281+ return 1;
61282+#endif
61283+}
61284+
61285+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61286+{
61287+ if (current->signal->curr_ip)
61288+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61289+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61290+ else
61291+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61292+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61293+ dump_stack();
61294+ gr_handle_kernel_exploit();
61295+ do_group_exit(SIGKILL);
61296+}
61297+#endif
61298+
61299+#ifdef CONFIG_PAX_USERCOPY
61300+
61301+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61302+{
61303+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61304+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61305+#ifdef CONFIG_MODULES
61306+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61307+#else
61308+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61309+#endif
61310+
61311+#else
61312+ unsigned long textlow = (unsigned long)_stext;
61313+ unsigned long texthigh = (unsigned long)_etext;
61314+
61315+#ifdef CONFIG_X86_64
61316+ /* check against linear mapping as well */
61317+ if (high > (unsigned long)__va(__pa(textlow)) &&
61318+ low < (unsigned long)__va(__pa(texthigh)))
61319+ return true;
61320+#endif
61321+
61322+#endif
61323+
61324+ if (high <= textlow || low >= texthigh)
61325+ return false;
61326+ else
61327+ return true;
61328+}
61329+#endif
61330+
61331+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61332+{
61333+#ifdef CONFIG_PAX_USERCOPY
61334+ const char *type;
61335+#endif
61336+
61337+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61338+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61339+ unsigned long currentsp = (unsigned long)&stackstart;
61340+ if (unlikely((currentsp < stackstart + 512 ||
61341+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61342+ BUG();
61343+#endif
61344+
61345+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61346+ if (const_size)
61347+ return;
61348+#endif
61349+
61350+#ifdef CONFIG_PAX_USERCOPY
61351+ if (!n)
61352+ return;
61353+
61354+ type = check_heap_object(ptr, n);
61355+ if (!type) {
61356+ int ret = check_stack_object(ptr, n);
61357+ if (ret == 1 || ret == 2)
61358+ return;
61359+ if (ret == 0) {
61360+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61361+ type = "<kernel text>";
61362+ else
61363+ return;
61364+ } else
61365+ type = "<process stack>";
61366+ }
61367+
61368+ pax_report_usercopy(ptr, n, to_user, type);
61369+#endif
61370+
61371+}
61372+EXPORT_SYMBOL(__check_object_size);
61373+
61374+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61375+void pax_track_stack(void)
61376+{
61377+ unsigned long sp = (unsigned long)&sp;
61378+ if (sp < current_thread_info()->lowest_stack &&
61379+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61380+ current_thread_info()->lowest_stack = sp;
61381+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61382+ BUG();
61383+}
61384+EXPORT_SYMBOL(pax_track_stack);
61385+#endif
61386+
61387+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61388+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61389+{
61390+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61391+ dump_stack();
61392+ do_group_exit(SIGKILL);
61393+}
61394+EXPORT_SYMBOL(report_size_overflow);
61395+#endif
61396diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61397index 9f9992b..8b59411 100644
61398--- a/fs/ext2/balloc.c
61399+++ b/fs/ext2/balloc.c
61400@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61401
61402 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61403 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61404- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61405+ if (free_blocks < root_blocks + 1 &&
61406 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61407 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61408- !in_group_p (sbi->s_resgid))) {
61409+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61410 return 0;
61411 }
61412 return 1;
61413diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61414index ae55fdd..5e64c27 100644
61415--- a/fs/ext2/super.c
61416+++ b/fs/ext2/super.c
61417@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61418 #ifdef CONFIG_EXT2_FS_XATTR
61419 if (test_opt(sb, XATTR_USER))
61420 seq_puts(seq, ",user_xattr");
61421- if (!test_opt(sb, XATTR_USER) &&
61422- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61423+ if (!test_opt(sb, XATTR_USER))
61424 seq_puts(seq, ",nouser_xattr");
61425- }
61426 #endif
61427
61428 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61429@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61430 if (def_mount_opts & EXT2_DEFM_UID16)
61431 set_opt(sbi->s_mount_opt, NO_UID32);
61432 #ifdef CONFIG_EXT2_FS_XATTR
61433- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61434- set_opt(sbi->s_mount_opt, XATTR_USER);
61435+ /* always enable user xattrs */
61436+ set_opt(sbi->s_mount_opt, XATTR_USER);
61437 #endif
61438 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61439 if (def_mount_opts & EXT2_DEFM_ACL)
61440diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61441index 9142614..97484fa 100644
61442--- a/fs/ext2/xattr.c
61443+++ b/fs/ext2/xattr.c
61444@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61445 struct buffer_head *bh = NULL;
61446 struct ext2_xattr_entry *entry;
61447 char *end;
61448- size_t rest = buffer_size;
61449+ size_t rest = buffer_size, total_size = 0;
61450 int error;
61451
61452 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61453@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61454 buffer += size;
61455 }
61456 rest -= size;
61457+ total_size += size;
61458 }
61459 }
61460- error = buffer_size - rest; /* total size */
61461+ error = total_size;
61462
61463 cleanup:
61464 brelse(bh);
61465diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61466index 158b5d4..2432610 100644
61467--- a/fs/ext3/balloc.c
61468+++ b/fs/ext3/balloc.c
61469@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61470
61471 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61472 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61473- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61474+ if (free_blocks < root_blocks + 1 &&
61475 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61476 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61477- !in_group_p (sbi->s_resgid))) {
61478+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61479 return 0;
61480 }
61481 return 1;
61482diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61483index 9b4e7d7..048d025 100644
61484--- a/fs/ext3/super.c
61485+++ b/fs/ext3/super.c
61486@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61487 #ifdef CONFIG_EXT3_FS_XATTR
61488 if (test_opt(sb, XATTR_USER))
61489 seq_puts(seq, ",user_xattr");
61490- if (!test_opt(sb, XATTR_USER) &&
61491- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61492+ if (!test_opt(sb, XATTR_USER))
61493 seq_puts(seq, ",nouser_xattr");
61494- }
61495 #endif
61496 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61497 if (test_opt(sb, POSIX_ACL))
61498@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61499 if (def_mount_opts & EXT3_DEFM_UID16)
61500 set_opt(sbi->s_mount_opt, NO_UID32);
61501 #ifdef CONFIG_EXT3_FS_XATTR
61502- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61503- set_opt(sbi->s_mount_opt, XATTR_USER);
61504+ /* always enable user xattrs */
61505+ set_opt(sbi->s_mount_opt, XATTR_USER);
61506 #endif
61507 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61508 if (def_mount_opts & EXT3_DEFM_ACL)
61509diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61510index c6874be..f8a6ae8 100644
61511--- a/fs/ext3/xattr.c
61512+++ b/fs/ext3/xattr.c
61513@@ -330,7 +330,7 @@ static int
61514 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61515 char *buffer, size_t buffer_size)
61516 {
61517- size_t rest = buffer_size;
61518+ size_t rest = buffer_size, total_size = 0;
61519
61520 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61521 const struct xattr_handler *handler =
61522@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61523 buffer += size;
61524 }
61525 rest -= size;
61526+ total_size += size;
61527 }
61528 }
61529- return buffer_size - rest;
61530+ return total_size;
61531 }
61532
61533 static int
61534diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61535index 83a6f49..d4e4d03 100644
61536--- a/fs/ext4/balloc.c
61537+++ b/fs/ext4/balloc.c
61538@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61539 /* Hm, nope. Are (enough) root reserved clusters available? */
61540 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61541 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61542- capable(CAP_SYS_RESOURCE) ||
61543- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61544+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61545+ capable_nolog(CAP_SYS_RESOURCE)) {
61546
61547 if (free_clusters >= (nclusters + dirty_clusters +
61548 resv_clusters))
61549diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61550index a75fba6..8235fca 100644
61551--- a/fs/ext4/ext4.h
61552+++ b/fs/ext4/ext4.h
61553@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
61554 unsigned long s_mb_last_start;
61555
61556 /* stats for buddy allocator */
61557- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61558- atomic_t s_bal_success; /* we found long enough chunks */
61559- atomic_t s_bal_allocated; /* in blocks */
61560- atomic_t s_bal_ex_scanned; /* total extents scanned */
61561- atomic_t s_bal_goals; /* goal hits */
61562- atomic_t s_bal_breaks; /* too long searches */
61563- atomic_t s_bal_2orders; /* 2^order hits */
61564+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61565+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61566+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61567+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61568+ atomic_unchecked_t s_bal_goals; /* goal hits */
61569+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61570+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61571 spinlock_t s_bal_lock;
61572 unsigned long s_mb_buddies_generated;
61573 unsigned long long s_mb_generation_time;
61574- atomic_t s_mb_lost_chunks;
61575- atomic_t s_mb_preallocated;
61576- atomic_t s_mb_discarded;
61577+ atomic_unchecked_t s_mb_lost_chunks;
61578+ atomic_unchecked_t s_mb_preallocated;
61579+ atomic_unchecked_t s_mb_discarded;
61580 atomic_t s_lock_busy;
61581
61582 /* locality groups */
61583diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61584index 8d1e602..abf497b 100644
61585--- a/fs/ext4/mballoc.c
61586+++ b/fs/ext4/mballoc.c
61587@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61588 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61589
61590 if (EXT4_SB(sb)->s_mb_stats)
61591- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61592+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61593
61594 break;
61595 }
61596@@ -2211,7 +2211,7 @@ repeat:
61597 ac->ac_status = AC_STATUS_CONTINUE;
61598 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61599 cr = 3;
61600- atomic_inc(&sbi->s_mb_lost_chunks);
61601+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61602 goto repeat;
61603 }
61604 }
61605@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
61606 if (sbi->s_mb_stats) {
61607 ext4_msg(sb, KERN_INFO,
61608 "mballoc: %u blocks %u reqs (%u success)",
61609- atomic_read(&sbi->s_bal_allocated),
61610- atomic_read(&sbi->s_bal_reqs),
61611- atomic_read(&sbi->s_bal_success));
61612+ atomic_read_unchecked(&sbi->s_bal_allocated),
61613+ atomic_read_unchecked(&sbi->s_bal_reqs),
61614+ atomic_read_unchecked(&sbi->s_bal_success));
61615 ext4_msg(sb, KERN_INFO,
61616 "mballoc: %u extents scanned, %u goal hits, "
61617 "%u 2^N hits, %u breaks, %u lost",
61618- atomic_read(&sbi->s_bal_ex_scanned),
61619- atomic_read(&sbi->s_bal_goals),
61620- atomic_read(&sbi->s_bal_2orders),
61621- atomic_read(&sbi->s_bal_breaks),
61622- atomic_read(&sbi->s_mb_lost_chunks));
61623+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61624+ atomic_read_unchecked(&sbi->s_bal_goals),
61625+ atomic_read_unchecked(&sbi->s_bal_2orders),
61626+ atomic_read_unchecked(&sbi->s_bal_breaks),
61627+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61628 ext4_msg(sb, KERN_INFO,
61629 "mballoc: %lu generated and it took %Lu",
61630 sbi->s_mb_buddies_generated,
61631 sbi->s_mb_generation_time);
61632 ext4_msg(sb, KERN_INFO,
61633 "mballoc: %u preallocated, %u discarded",
61634- atomic_read(&sbi->s_mb_preallocated),
61635- atomic_read(&sbi->s_mb_discarded));
61636+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61637+ atomic_read_unchecked(&sbi->s_mb_discarded));
61638 }
61639
61640 free_percpu(sbi->s_locality_groups);
61641@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61642 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61643
61644 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61645- atomic_inc(&sbi->s_bal_reqs);
61646- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61647+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61648+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61649 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61650- atomic_inc(&sbi->s_bal_success);
61651- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61652+ atomic_inc_unchecked(&sbi->s_bal_success);
61653+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61654 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61655 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61656- atomic_inc(&sbi->s_bal_goals);
61657+ atomic_inc_unchecked(&sbi->s_bal_goals);
61658 if (ac->ac_found > sbi->s_mb_max_to_scan)
61659- atomic_inc(&sbi->s_bal_breaks);
61660+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61661 }
61662
61663 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61664@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
61665 trace_ext4_mb_new_inode_pa(ac, pa);
61666
61667 ext4_mb_use_inode_pa(ac, pa);
61668- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
61669+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
61670
61671 ei = EXT4_I(ac->ac_inode);
61672 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61673@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
61674 trace_ext4_mb_new_group_pa(ac, pa);
61675
61676 ext4_mb_use_group_pa(ac, pa);
61677- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61678+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61679
61680 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61681 lg = ac->ac_lg;
61682@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
61683 * from the bitmap and continue.
61684 */
61685 }
61686- atomic_add(free, &sbi->s_mb_discarded);
61687+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
61688
61689 return err;
61690 }
61691@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
61692 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
61693 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
61694 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
61695- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61696+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61697 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
61698
61699 return 0;
61700diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
61701index 8313ca3..8a37d08 100644
61702--- a/fs/ext4/mmp.c
61703+++ b/fs/ext4/mmp.c
61704@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
61705 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
61706 const char *function, unsigned int line, const char *msg)
61707 {
61708- __ext4_warning(sb, function, line, msg);
61709+ __ext4_warning(sb, function, line, "%s", msg);
61710 __ext4_warning(sb, function, line,
61711 "MMP failure info: last update time: %llu, last update "
61712 "node: %s, last update device: %s\n",
61713diff --git a/fs/ext4/super.c b/fs/ext4/super.c
61714index fc29b2c..6c8b255 100644
61715--- a/fs/ext4/super.c
61716+++ b/fs/ext4/super.c
61717@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
61718 }
61719
61720 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
61721-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61722+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61723 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
61724
61725 #ifdef CONFIG_QUOTA
61726@@ -2440,7 +2440,7 @@ struct ext4_attr {
61727 int offset;
61728 int deprecated_val;
61729 } u;
61730-};
61731+} __do_const;
61732
61733 static int parse_strtoull(const char *buf,
61734 unsigned long long max, unsigned long long *value)
61735diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
61736index 1e09fc7..0400dd4 100644
61737--- a/fs/ext4/xattr.c
61738+++ b/fs/ext4/xattr.c
61739@@ -399,7 +399,7 @@ static int
61740 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61741 char *buffer, size_t buffer_size)
61742 {
61743- size_t rest = buffer_size;
61744+ size_t rest = buffer_size, total_size = 0;
61745
61746 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
61747 const struct xattr_handler *handler =
61748@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61749 buffer += size;
61750 }
61751 rest -= size;
61752+ total_size += size;
61753 }
61754 }
61755- return buffer_size - rest;
61756+ return total_size;
61757 }
61758
61759 static int
61760diff --git a/fs/fcntl.c b/fs/fcntl.c
61761index ee85cd4..9dd0d20 100644
61762--- a/fs/fcntl.c
61763+++ b/fs/fcntl.c
61764@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
61765 int force)
61766 {
61767 security_file_set_fowner(filp);
61768+ if (gr_handle_chroot_fowner(pid, type))
61769+ return;
61770+ if (gr_check_protected_task_fowner(pid, type))
61771+ return;
61772 f_modown(filp, pid, type, force);
61773 }
61774 EXPORT_SYMBOL(__f_setown);
61775diff --git a/fs/fhandle.c b/fs/fhandle.c
61776index 999ff5c..2281df9 100644
61777--- a/fs/fhandle.c
61778+++ b/fs/fhandle.c
61779@@ -8,6 +8,7 @@
61780 #include <linux/fs_struct.h>
61781 #include <linux/fsnotify.h>
61782 #include <linux/personality.h>
61783+#include <linux/grsecurity.h>
61784 #include <asm/uaccess.h>
61785 #include "internal.h"
61786 #include "mount.h"
61787@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
61788 } else
61789 retval = 0;
61790 /* copy the mount id */
61791- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
61792- sizeof(*mnt_id)) ||
61793+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
61794 copy_to_user(ufh, handle,
61795 sizeof(struct file_handle) + handle_bytes))
61796 retval = -EFAULT;
61797@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61798 * the directory. Ideally we would like CAP_DAC_SEARCH.
61799 * But we don't have that
61800 */
61801- if (!capable(CAP_DAC_READ_SEARCH)) {
61802+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
61803 retval = -EPERM;
61804 goto out_err;
61805 }
61806@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61807 goto out_err;
61808 }
61809 /* copy the full handle */
61810- if (copy_from_user(handle, ufh,
61811- sizeof(struct file_handle) +
61812+ *handle = f_handle;
61813+ if (copy_from_user(&handle->f_handle,
61814+ &ufh->f_handle,
61815 f_handle.handle_bytes)) {
61816 retval = -EFAULT;
61817 goto out_handle;
61818diff --git a/fs/file.c b/fs/file.c
61819index ee738ea..f6c1562 100644
61820--- a/fs/file.c
61821+++ b/fs/file.c
61822@@ -16,6 +16,7 @@
61823 #include <linux/slab.h>
61824 #include <linux/vmalloc.h>
61825 #include <linux/file.h>
61826+#include <linux/security.h>
61827 #include <linux/fdtable.h>
61828 #include <linux/bitops.h>
61829 #include <linux/interrupt.h>
61830@@ -139,7 +140,7 @@ out:
61831 * Return <0 error code on error; 1 on successful completion.
61832 * The files->file_lock should be held on entry, and will be held on exit.
61833 */
61834-static int expand_fdtable(struct files_struct *files, int nr)
61835+static int expand_fdtable(struct files_struct *files, unsigned int nr)
61836 __releases(files->file_lock)
61837 __acquires(files->file_lock)
61838 {
61839@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
61840 * expanded and execution may have blocked.
61841 * The files->file_lock should be held on entry, and will be held on exit.
61842 */
61843-static int expand_files(struct files_struct *files, int nr)
61844+static int expand_files(struct files_struct *files, unsigned int nr)
61845 {
61846 struct fdtable *fdt;
61847
61848@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
61849 if (!file)
61850 return __close_fd(files, fd);
61851
61852+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
61853 if (fd >= rlimit(RLIMIT_NOFILE))
61854 return -EBADF;
61855
61856@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
61857 if (unlikely(oldfd == newfd))
61858 return -EINVAL;
61859
61860+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
61861 if (newfd >= rlimit(RLIMIT_NOFILE))
61862 return -EBADF;
61863
61864@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
61865 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
61866 {
61867 int err;
61868+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
61869 if (from >= rlimit(RLIMIT_NOFILE))
61870 return -EINVAL;
61871 err = alloc_fd(from, flags);
61872diff --git a/fs/filesystems.c b/fs/filesystems.c
61873index 5797d45..7d7d79a 100644
61874--- a/fs/filesystems.c
61875+++ b/fs/filesystems.c
61876@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
61877 int len = dot ? dot - name : strlen(name);
61878
61879 fs = __get_fs_type(name, len);
61880+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61881+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
61882+#else
61883 if (!fs && (request_module("fs-%.*s", len, name) == 0))
61884+#endif
61885 fs = __get_fs_type(name, len);
61886
61887 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
61888diff --git a/fs/fs_struct.c b/fs/fs_struct.c
61889index 7dca743..2f2786d 100644
61890--- a/fs/fs_struct.c
61891+++ b/fs/fs_struct.c
61892@@ -4,6 +4,7 @@
61893 #include <linux/path.h>
61894 #include <linux/slab.h>
61895 #include <linux/fs_struct.h>
61896+#include <linux/grsecurity.h>
61897 #include "internal.h"
61898
61899 /*
61900@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
61901 struct path old_root;
61902
61903 path_get(path);
61904+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
61905 spin_lock(&fs->lock);
61906 write_seqcount_begin(&fs->seq);
61907 old_root = fs->root;
61908 fs->root = *path;
61909+ gr_set_chroot_entries(current, path);
61910 write_seqcount_end(&fs->seq);
61911 spin_unlock(&fs->lock);
61912- if (old_root.dentry)
61913+ if (old_root.dentry) {
61914+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
61915 path_put(&old_root);
61916+ }
61917 }
61918
61919 /*
61920@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61921 int hits = 0;
61922 spin_lock(&fs->lock);
61923 write_seqcount_begin(&fs->seq);
61924+ /* this root replacement is only done by pivot_root,
61925+ leave grsec's chroot tagging alone for this task
61926+ so that a pivoted root isn't treated as a chroot
61927+ */
61928 hits += replace_path(&fs->root, old_root, new_root);
61929 hits += replace_path(&fs->pwd, old_root, new_root);
61930 write_seqcount_end(&fs->seq);
61931@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61932
61933 void free_fs_struct(struct fs_struct *fs)
61934 {
61935+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
61936 path_put(&fs->root);
61937 path_put(&fs->pwd);
61938 kmem_cache_free(fs_cachep, fs);
61939@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
61940 task_lock(tsk);
61941 spin_lock(&fs->lock);
61942 tsk->fs = NULL;
61943- kill = !--fs->users;
61944+ gr_clear_chroot_entries(tsk);
61945+ kill = !atomic_dec_return(&fs->users);
61946 spin_unlock(&fs->lock);
61947 task_unlock(tsk);
61948 if (kill)
61949@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
61950 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
61951 /* We don't need to lock fs - think why ;-) */
61952 if (fs) {
61953- fs->users = 1;
61954+ atomic_set(&fs->users, 1);
61955 fs->in_exec = 0;
61956 spin_lock_init(&fs->lock);
61957 seqcount_init(&fs->seq);
61958@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
61959 spin_lock(&old->lock);
61960 fs->root = old->root;
61961 path_get(&fs->root);
61962+ /* instead of calling gr_set_chroot_entries here,
61963+ we call it from every caller of this function
61964+ */
61965 fs->pwd = old->pwd;
61966 path_get(&fs->pwd);
61967 spin_unlock(&old->lock);
61968@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
61969
61970 task_lock(current);
61971 spin_lock(&fs->lock);
61972- kill = !--fs->users;
61973+ kill = !atomic_dec_return(&fs->users);
61974 current->fs = new_fs;
61975+ gr_set_chroot_entries(current, &new_fs->root);
61976 spin_unlock(&fs->lock);
61977 task_unlock(current);
61978
61979@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
61980
61981 int current_umask(void)
61982 {
61983- return current->fs->umask;
61984+ return current->fs->umask | gr_acl_umask();
61985 }
61986 EXPORT_SYMBOL(current_umask);
61987
61988 /* to be mentioned only in INIT_TASK */
61989 struct fs_struct init_fs = {
61990- .users = 1,
61991+ .users = ATOMIC_INIT(1),
61992 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
61993 .seq = SEQCNT_ZERO(init_fs.seq),
61994 .umask = 0022,
61995diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
61996index 89acec7..a575262 100644
61997--- a/fs/fscache/cookie.c
61998+++ b/fs/fscache/cookie.c
61999@@ -19,7 +19,7 @@
62000
62001 struct kmem_cache *fscache_cookie_jar;
62002
62003-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62004+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62005
62006 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62007 static int fscache_alloc_object(struct fscache_cache *cache,
62008@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62009 parent ? (char *) parent->def->name : "<no-parent>",
62010 def->name, netfs_data, enable);
62011
62012- fscache_stat(&fscache_n_acquires);
62013+ fscache_stat_unchecked(&fscache_n_acquires);
62014
62015 /* if there's no parent cookie, then we don't create one here either */
62016 if (!parent) {
62017- fscache_stat(&fscache_n_acquires_null);
62018+ fscache_stat_unchecked(&fscache_n_acquires_null);
62019 _leave(" [no parent]");
62020 return NULL;
62021 }
62022@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62023 /* allocate and initialise a cookie */
62024 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62025 if (!cookie) {
62026- fscache_stat(&fscache_n_acquires_oom);
62027+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62028 _leave(" [ENOMEM]");
62029 return NULL;
62030 }
62031@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62032
62033 switch (cookie->def->type) {
62034 case FSCACHE_COOKIE_TYPE_INDEX:
62035- fscache_stat(&fscache_n_cookie_index);
62036+ fscache_stat_unchecked(&fscache_n_cookie_index);
62037 break;
62038 case FSCACHE_COOKIE_TYPE_DATAFILE:
62039- fscache_stat(&fscache_n_cookie_data);
62040+ fscache_stat_unchecked(&fscache_n_cookie_data);
62041 break;
62042 default:
62043- fscache_stat(&fscache_n_cookie_special);
62044+ fscache_stat_unchecked(&fscache_n_cookie_special);
62045 break;
62046 }
62047
62048@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62049 } else {
62050 atomic_dec(&parent->n_children);
62051 __fscache_cookie_put(cookie);
62052- fscache_stat(&fscache_n_acquires_nobufs);
62053+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62054 _leave(" = NULL");
62055 return NULL;
62056 }
62057@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62058 }
62059 }
62060
62061- fscache_stat(&fscache_n_acquires_ok);
62062+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62063 _leave(" = %p", cookie);
62064 return cookie;
62065 }
62066@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62067 cache = fscache_select_cache_for_object(cookie->parent);
62068 if (!cache) {
62069 up_read(&fscache_addremove_sem);
62070- fscache_stat(&fscache_n_acquires_no_cache);
62071+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62072 _leave(" = -ENOMEDIUM [no cache]");
62073 return -ENOMEDIUM;
62074 }
62075@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62076 object = cache->ops->alloc_object(cache, cookie);
62077 fscache_stat_d(&fscache_n_cop_alloc_object);
62078 if (IS_ERR(object)) {
62079- fscache_stat(&fscache_n_object_no_alloc);
62080+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62081 ret = PTR_ERR(object);
62082 goto error;
62083 }
62084
62085- fscache_stat(&fscache_n_object_alloc);
62086+ fscache_stat_unchecked(&fscache_n_object_alloc);
62087
62088- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62089+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62090
62091 _debug("ALLOC OBJ%x: %s {%lx}",
62092 object->debug_id, cookie->def->name, object->events);
62093@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62094
62095 _enter("{%s}", cookie->def->name);
62096
62097- fscache_stat(&fscache_n_invalidates);
62098+ fscache_stat_unchecked(&fscache_n_invalidates);
62099
62100 /* Only permit invalidation of data files. Invalidating an index will
62101 * require the caller to release all its attachments to the tree rooted
62102@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62103 {
62104 struct fscache_object *object;
62105
62106- fscache_stat(&fscache_n_updates);
62107+ fscache_stat_unchecked(&fscache_n_updates);
62108
62109 if (!cookie) {
62110- fscache_stat(&fscache_n_updates_null);
62111+ fscache_stat_unchecked(&fscache_n_updates_null);
62112 _leave(" [no cookie]");
62113 return;
62114 }
62115@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62116 */
62117 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62118 {
62119- fscache_stat(&fscache_n_relinquishes);
62120+ fscache_stat_unchecked(&fscache_n_relinquishes);
62121 if (retire)
62122- fscache_stat(&fscache_n_relinquishes_retire);
62123+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62124
62125 if (!cookie) {
62126- fscache_stat(&fscache_n_relinquishes_null);
62127+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62128 _leave(" [no cookie]");
62129 return;
62130 }
62131@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62132 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62133 goto inconsistent;
62134
62135- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62136+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62137
62138 __fscache_use_cookie(cookie);
62139 if (fscache_submit_op(object, op) < 0)
62140diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62141index 7872a62..d91b19f 100644
62142--- a/fs/fscache/internal.h
62143+++ b/fs/fscache/internal.h
62144@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62145 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62146 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62147 struct fscache_operation *,
62148- atomic_t *,
62149- atomic_t *,
62150+ atomic_unchecked_t *,
62151+ atomic_unchecked_t *,
62152 void (*)(struct fscache_operation *));
62153 extern void fscache_invalidate_writes(struct fscache_cookie *);
62154
62155@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62156 * stats.c
62157 */
62158 #ifdef CONFIG_FSCACHE_STATS
62159-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62160-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62161+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62162+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62163
62164-extern atomic_t fscache_n_op_pend;
62165-extern atomic_t fscache_n_op_run;
62166-extern atomic_t fscache_n_op_enqueue;
62167-extern atomic_t fscache_n_op_deferred_release;
62168-extern atomic_t fscache_n_op_release;
62169-extern atomic_t fscache_n_op_gc;
62170-extern atomic_t fscache_n_op_cancelled;
62171-extern atomic_t fscache_n_op_rejected;
62172+extern atomic_unchecked_t fscache_n_op_pend;
62173+extern atomic_unchecked_t fscache_n_op_run;
62174+extern atomic_unchecked_t fscache_n_op_enqueue;
62175+extern atomic_unchecked_t fscache_n_op_deferred_release;
62176+extern atomic_unchecked_t fscache_n_op_release;
62177+extern atomic_unchecked_t fscache_n_op_gc;
62178+extern atomic_unchecked_t fscache_n_op_cancelled;
62179+extern atomic_unchecked_t fscache_n_op_rejected;
62180
62181-extern atomic_t fscache_n_attr_changed;
62182-extern atomic_t fscache_n_attr_changed_ok;
62183-extern atomic_t fscache_n_attr_changed_nobufs;
62184-extern atomic_t fscache_n_attr_changed_nomem;
62185-extern atomic_t fscache_n_attr_changed_calls;
62186+extern atomic_unchecked_t fscache_n_attr_changed;
62187+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62188+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62189+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62190+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62191
62192-extern atomic_t fscache_n_allocs;
62193-extern atomic_t fscache_n_allocs_ok;
62194-extern atomic_t fscache_n_allocs_wait;
62195-extern atomic_t fscache_n_allocs_nobufs;
62196-extern atomic_t fscache_n_allocs_intr;
62197-extern atomic_t fscache_n_allocs_object_dead;
62198-extern atomic_t fscache_n_alloc_ops;
62199-extern atomic_t fscache_n_alloc_op_waits;
62200+extern atomic_unchecked_t fscache_n_allocs;
62201+extern atomic_unchecked_t fscache_n_allocs_ok;
62202+extern atomic_unchecked_t fscache_n_allocs_wait;
62203+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62204+extern atomic_unchecked_t fscache_n_allocs_intr;
62205+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62206+extern atomic_unchecked_t fscache_n_alloc_ops;
62207+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62208
62209-extern atomic_t fscache_n_retrievals;
62210-extern atomic_t fscache_n_retrievals_ok;
62211-extern atomic_t fscache_n_retrievals_wait;
62212-extern atomic_t fscache_n_retrievals_nodata;
62213-extern atomic_t fscache_n_retrievals_nobufs;
62214-extern atomic_t fscache_n_retrievals_intr;
62215-extern atomic_t fscache_n_retrievals_nomem;
62216-extern atomic_t fscache_n_retrievals_object_dead;
62217-extern atomic_t fscache_n_retrieval_ops;
62218-extern atomic_t fscache_n_retrieval_op_waits;
62219+extern atomic_unchecked_t fscache_n_retrievals;
62220+extern atomic_unchecked_t fscache_n_retrievals_ok;
62221+extern atomic_unchecked_t fscache_n_retrievals_wait;
62222+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62223+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62224+extern atomic_unchecked_t fscache_n_retrievals_intr;
62225+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62226+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62227+extern atomic_unchecked_t fscache_n_retrieval_ops;
62228+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62229
62230-extern atomic_t fscache_n_stores;
62231-extern atomic_t fscache_n_stores_ok;
62232-extern atomic_t fscache_n_stores_again;
62233-extern atomic_t fscache_n_stores_nobufs;
62234-extern atomic_t fscache_n_stores_oom;
62235-extern atomic_t fscache_n_store_ops;
62236-extern atomic_t fscache_n_store_calls;
62237-extern atomic_t fscache_n_store_pages;
62238-extern atomic_t fscache_n_store_radix_deletes;
62239-extern atomic_t fscache_n_store_pages_over_limit;
62240+extern atomic_unchecked_t fscache_n_stores;
62241+extern atomic_unchecked_t fscache_n_stores_ok;
62242+extern atomic_unchecked_t fscache_n_stores_again;
62243+extern atomic_unchecked_t fscache_n_stores_nobufs;
62244+extern atomic_unchecked_t fscache_n_stores_oom;
62245+extern atomic_unchecked_t fscache_n_store_ops;
62246+extern atomic_unchecked_t fscache_n_store_calls;
62247+extern atomic_unchecked_t fscache_n_store_pages;
62248+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62249+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62250
62251-extern atomic_t fscache_n_store_vmscan_not_storing;
62252-extern atomic_t fscache_n_store_vmscan_gone;
62253-extern atomic_t fscache_n_store_vmscan_busy;
62254-extern atomic_t fscache_n_store_vmscan_cancelled;
62255-extern atomic_t fscache_n_store_vmscan_wait;
62256+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62257+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62258+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62259+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62260+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62261
62262-extern atomic_t fscache_n_marks;
62263-extern atomic_t fscache_n_uncaches;
62264+extern atomic_unchecked_t fscache_n_marks;
62265+extern atomic_unchecked_t fscache_n_uncaches;
62266
62267-extern atomic_t fscache_n_acquires;
62268-extern atomic_t fscache_n_acquires_null;
62269-extern atomic_t fscache_n_acquires_no_cache;
62270-extern atomic_t fscache_n_acquires_ok;
62271-extern atomic_t fscache_n_acquires_nobufs;
62272-extern atomic_t fscache_n_acquires_oom;
62273+extern atomic_unchecked_t fscache_n_acquires;
62274+extern atomic_unchecked_t fscache_n_acquires_null;
62275+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62276+extern atomic_unchecked_t fscache_n_acquires_ok;
62277+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62278+extern atomic_unchecked_t fscache_n_acquires_oom;
62279
62280-extern atomic_t fscache_n_invalidates;
62281-extern atomic_t fscache_n_invalidates_run;
62282+extern atomic_unchecked_t fscache_n_invalidates;
62283+extern atomic_unchecked_t fscache_n_invalidates_run;
62284
62285-extern atomic_t fscache_n_updates;
62286-extern atomic_t fscache_n_updates_null;
62287-extern atomic_t fscache_n_updates_run;
62288+extern atomic_unchecked_t fscache_n_updates;
62289+extern atomic_unchecked_t fscache_n_updates_null;
62290+extern atomic_unchecked_t fscache_n_updates_run;
62291
62292-extern atomic_t fscache_n_relinquishes;
62293-extern atomic_t fscache_n_relinquishes_null;
62294-extern atomic_t fscache_n_relinquishes_waitcrt;
62295-extern atomic_t fscache_n_relinquishes_retire;
62296+extern atomic_unchecked_t fscache_n_relinquishes;
62297+extern atomic_unchecked_t fscache_n_relinquishes_null;
62298+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62299+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62300
62301-extern atomic_t fscache_n_cookie_index;
62302-extern atomic_t fscache_n_cookie_data;
62303-extern atomic_t fscache_n_cookie_special;
62304+extern atomic_unchecked_t fscache_n_cookie_index;
62305+extern atomic_unchecked_t fscache_n_cookie_data;
62306+extern atomic_unchecked_t fscache_n_cookie_special;
62307
62308-extern atomic_t fscache_n_object_alloc;
62309-extern atomic_t fscache_n_object_no_alloc;
62310-extern atomic_t fscache_n_object_lookups;
62311-extern atomic_t fscache_n_object_lookups_negative;
62312-extern atomic_t fscache_n_object_lookups_positive;
62313-extern atomic_t fscache_n_object_lookups_timed_out;
62314-extern atomic_t fscache_n_object_created;
62315-extern atomic_t fscache_n_object_avail;
62316-extern atomic_t fscache_n_object_dead;
62317+extern atomic_unchecked_t fscache_n_object_alloc;
62318+extern atomic_unchecked_t fscache_n_object_no_alloc;
62319+extern atomic_unchecked_t fscache_n_object_lookups;
62320+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62321+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62322+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62323+extern atomic_unchecked_t fscache_n_object_created;
62324+extern atomic_unchecked_t fscache_n_object_avail;
62325+extern atomic_unchecked_t fscache_n_object_dead;
62326
62327-extern atomic_t fscache_n_checkaux_none;
62328-extern atomic_t fscache_n_checkaux_okay;
62329-extern atomic_t fscache_n_checkaux_update;
62330-extern atomic_t fscache_n_checkaux_obsolete;
62331+extern atomic_unchecked_t fscache_n_checkaux_none;
62332+extern atomic_unchecked_t fscache_n_checkaux_okay;
62333+extern atomic_unchecked_t fscache_n_checkaux_update;
62334+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62335
62336 extern atomic_t fscache_n_cop_alloc_object;
62337 extern atomic_t fscache_n_cop_lookup_object;
62338@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62339 atomic_inc(stat);
62340 }
62341
62342+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62343+{
62344+ atomic_inc_unchecked(stat);
62345+}
62346+
62347 static inline void fscache_stat_d(atomic_t *stat)
62348 {
62349 atomic_dec(stat);
62350@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62351
62352 #define __fscache_stat(stat) (NULL)
62353 #define fscache_stat(stat) do {} while (0)
62354+#define fscache_stat_unchecked(stat) do {} while (0)
62355 #define fscache_stat_d(stat) do {} while (0)
62356 #endif
62357
62358diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62359index da032da..0076ce7 100644
62360--- a/fs/fscache/object.c
62361+++ b/fs/fscache/object.c
62362@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62363 _debug("LOOKUP \"%s\" in \"%s\"",
62364 cookie->def->name, object->cache->tag->name);
62365
62366- fscache_stat(&fscache_n_object_lookups);
62367+ fscache_stat_unchecked(&fscache_n_object_lookups);
62368 fscache_stat(&fscache_n_cop_lookup_object);
62369 ret = object->cache->ops->lookup_object(object);
62370 fscache_stat_d(&fscache_n_cop_lookup_object);
62371@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62372 if (ret == -ETIMEDOUT) {
62373 /* probably stuck behind another object, so move this one to
62374 * the back of the queue */
62375- fscache_stat(&fscache_n_object_lookups_timed_out);
62376+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62377 _leave(" [timeout]");
62378 return NO_TRANSIT;
62379 }
62380@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62381 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62382
62383 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62384- fscache_stat(&fscache_n_object_lookups_negative);
62385+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62386
62387 /* Allow write requests to begin stacking up and read requests to begin
62388 * returning ENODATA.
62389@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62390 /* if we were still looking up, then we must have a positive lookup
62391 * result, in which case there may be data available */
62392 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62393- fscache_stat(&fscache_n_object_lookups_positive);
62394+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62395
62396 /* We do (presumably) have data */
62397 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62398@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62399 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62400 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62401 } else {
62402- fscache_stat(&fscache_n_object_created);
62403+ fscache_stat_unchecked(&fscache_n_object_created);
62404 }
62405
62406 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62407@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62408 fscache_stat_d(&fscache_n_cop_lookup_complete);
62409
62410 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62411- fscache_stat(&fscache_n_object_avail);
62412+ fscache_stat_unchecked(&fscache_n_object_avail);
62413
62414 _leave("");
62415 return transit_to(JUMPSTART_DEPS);
62416@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62417
62418 /* this just shifts the object release to the work processor */
62419 fscache_put_object(object);
62420- fscache_stat(&fscache_n_object_dead);
62421+ fscache_stat_unchecked(&fscache_n_object_dead);
62422
62423 _leave("");
62424 return transit_to(OBJECT_DEAD);
62425@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62426 enum fscache_checkaux result;
62427
62428 if (!object->cookie->def->check_aux) {
62429- fscache_stat(&fscache_n_checkaux_none);
62430+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62431 return FSCACHE_CHECKAUX_OKAY;
62432 }
62433
62434@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62435 switch (result) {
62436 /* entry okay as is */
62437 case FSCACHE_CHECKAUX_OKAY:
62438- fscache_stat(&fscache_n_checkaux_okay);
62439+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62440 break;
62441
62442 /* entry requires update */
62443 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62444- fscache_stat(&fscache_n_checkaux_update);
62445+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62446 break;
62447
62448 /* entry requires deletion */
62449 case FSCACHE_CHECKAUX_OBSOLETE:
62450- fscache_stat(&fscache_n_checkaux_obsolete);
62451+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62452 break;
62453
62454 default:
62455@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62456 {
62457 const struct fscache_state *s;
62458
62459- fscache_stat(&fscache_n_invalidates_run);
62460+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62461 fscache_stat(&fscache_n_cop_invalidate_object);
62462 s = _fscache_invalidate_object(object, event);
62463 fscache_stat_d(&fscache_n_cop_invalidate_object);
62464@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62465 {
62466 _enter("{OBJ%x},%d", object->debug_id, event);
62467
62468- fscache_stat(&fscache_n_updates_run);
62469+ fscache_stat_unchecked(&fscache_n_updates_run);
62470 fscache_stat(&fscache_n_cop_update_object);
62471 object->cache->ops->update_object(object);
62472 fscache_stat_d(&fscache_n_cop_update_object);
62473diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62474index e7b87a0..a85d47a 100644
62475--- a/fs/fscache/operation.c
62476+++ b/fs/fscache/operation.c
62477@@ -17,7 +17,7 @@
62478 #include <linux/slab.h>
62479 #include "internal.h"
62480
62481-atomic_t fscache_op_debug_id;
62482+atomic_unchecked_t fscache_op_debug_id;
62483 EXPORT_SYMBOL(fscache_op_debug_id);
62484
62485 /**
62486@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62487 ASSERTCMP(atomic_read(&op->usage), >, 0);
62488 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62489
62490- fscache_stat(&fscache_n_op_enqueue);
62491+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62492 switch (op->flags & FSCACHE_OP_TYPE) {
62493 case FSCACHE_OP_ASYNC:
62494 _debug("queue async");
62495@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62496 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62497 if (op->processor)
62498 fscache_enqueue_operation(op);
62499- fscache_stat(&fscache_n_op_run);
62500+ fscache_stat_unchecked(&fscache_n_op_run);
62501 }
62502
62503 /*
62504@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62505 if (object->n_in_progress > 0) {
62506 atomic_inc(&op->usage);
62507 list_add_tail(&op->pend_link, &object->pending_ops);
62508- fscache_stat(&fscache_n_op_pend);
62509+ fscache_stat_unchecked(&fscache_n_op_pend);
62510 } else if (!list_empty(&object->pending_ops)) {
62511 atomic_inc(&op->usage);
62512 list_add_tail(&op->pend_link, &object->pending_ops);
62513- fscache_stat(&fscache_n_op_pend);
62514+ fscache_stat_unchecked(&fscache_n_op_pend);
62515 fscache_start_operations(object);
62516 } else {
62517 ASSERTCMP(object->n_in_progress, ==, 0);
62518@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62519 object->n_exclusive++; /* reads and writes must wait */
62520 atomic_inc(&op->usage);
62521 list_add_tail(&op->pend_link, &object->pending_ops);
62522- fscache_stat(&fscache_n_op_pend);
62523+ fscache_stat_unchecked(&fscache_n_op_pend);
62524 ret = 0;
62525 } else {
62526 /* If we're in any other state, there must have been an I/O
62527@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62528 if (object->n_exclusive > 0) {
62529 atomic_inc(&op->usage);
62530 list_add_tail(&op->pend_link, &object->pending_ops);
62531- fscache_stat(&fscache_n_op_pend);
62532+ fscache_stat_unchecked(&fscache_n_op_pend);
62533 } else if (!list_empty(&object->pending_ops)) {
62534 atomic_inc(&op->usage);
62535 list_add_tail(&op->pend_link, &object->pending_ops);
62536- fscache_stat(&fscache_n_op_pend);
62537+ fscache_stat_unchecked(&fscache_n_op_pend);
62538 fscache_start_operations(object);
62539 } else {
62540 ASSERTCMP(object->n_exclusive, ==, 0);
62541@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62542 object->n_ops++;
62543 atomic_inc(&op->usage);
62544 list_add_tail(&op->pend_link, &object->pending_ops);
62545- fscache_stat(&fscache_n_op_pend);
62546+ fscache_stat_unchecked(&fscache_n_op_pend);
62547 ret = 0;
62548 } else if (fscache_object_is_dying(object)) {
62549- fscache_stat(&fscache_n_op_rejected);
62550+ fscache_stat_unchecked(&fscache_n_op_rejected);
62551 op->state = FSCACHE_OP_ST_CANCELLED;
62552 ret = -ENOBUFS;
62553 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62554@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62555 ret = -EBUSY;
62556 if (op->state == FSCACHE_OP_ST_PENDING) {
62557 ASSERT(!list_empty(&op->pend_link));
62558- fscache_stat(&fscache_n_op_cancelled);
62559+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62560 list_del_init(&op->pend_link);
62561 if (do_cancel)
62562 do_cancel(op);
62563@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62564 while (!list_empty(&object->pending_ops)) {
62565 op = list_entry(object->pending_ops.next,
62566 struct fscache_operation, pend_link);
62567- fscache_stat(&fscache_n_op_cancelled);
62568+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62569 list_del_init(&op->pend_link);
62570
62571 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62572@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62573 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62574 op->state = FSCACHE_OP_ST_DEAD;
62575
62576- fscache_stat(&fscache_n_op_release);
62577+ fscache_stat_unchecked(&fscache_n_op_release);
62578
62579 if (op->release) {
62580 op->release(op);
62581@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62582 * lock, and defer it otherwise */
62583 if (!spin_trylock(&object->lock)) {
62584 _debug("defer put");
62585- fscache_stat(&fscache_n_op_deferred_release);
62586+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62587
62588 cache = object->cache;
62589 spin_lock(&cache->op_gc_list_lock);
62590@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62591
62592 _debug("GC DEFERRED REL OBJ%x OP%x",
62593 object->debug_id, op->debug_id);
62594- fscache_stat(&fscache_n_op_gc);
62595+ fscache_stat_unchecked(&fscache_n_op_gc);
62596
62597 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62598 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62599diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62600index de33b3f..8be4d29 100644
62601--- a/fs/fscache/page.c
62602+++ b/fs/fscache/page.c
62603@@ -74,7 +74,7 @@ try_again:
62604 val = radix_tree_lookup(&cookie->stores, page->index);
62605 if (!val) {
62606 rcu_read_unlock();
62607- fscache_stat(&fscache_n_store_vmscan_not_storing);
62608+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62609 __fscache_uncache_page(cookie, page);
62610 return true;
62611 }
62612@@ -104,11 +104,11 @@ try_again:
62613 spin_unlock(&cookie->stores_lock);
62614
62615 if (xpage) {
62616- fscache_stat(&fscache_n_store_vmscan_cancelled);
62617- fscache_stat(&fscache_n_store_radix_deletes);
62618+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62619+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62620 ASSERTCMP(xpage, ==, page);
62621 } else {
62622- fscache_stat(&fscache_n_store_vmscan_gone);
62623+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62624 }
62625
62626 wake_up_bit(&cookie->flags, 0);
62627@@ -123,11 +123,11 @@ page_busy:
62628 * sleeping on memory allocation, so we may need to impose a timeout
62629 * too. */
62630 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
62631- fscache_stat(&fscache_n_store_vmscan_busy);
62632+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
62633 return false;
62634 }
62635
62636- fscache_stat(&fscache_n_store_vmscan_wait);
62637+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
62638 if (!release_page_wait_timeout(cookie, page))
62639 _debug("fscache writeout timeout page: %p{%lx}",
62640 page, page->index);
62641@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
62642 FSCACHE_COOKIE_STORING_TAG);
62643 if (!radix_tree_tag_get(&cookie->stores, page->index,
62644 FSCACHE_COOKIE_PENDING_TAG)) {
62645- fscache_stat(&fscache_n_store_radix_deletes);
62646+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62647 xpage = radix_tree_delete(&cookie->stores, page->index);
62648 }
62649 spin_unlock(&cookie->stores_lock);
62650@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
62651
62652 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
62653
62654- fscache_stat(&fscache_n_attr_changed_calls);
62655+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
62656
62657 if (fscache_object_is_active(object)) {
62658 fscache_stat(&fscache_n_cop_attr_changed);
62659@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62660
62661 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62662
62663- fscache_stat(&fscache_n_attr_changed);
62664+ fscache_stat_unchecked(&fscache_n_attr_changed);
62665
62666 op = kzalloc(sizeof(*op), GFP_KERNEL);
62667 if (!op) {
62668- fscache_stat(&fscache_n_attr_changed_nomem);
62669+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
62670 _leave(" = -ENOMEM");
62671 return -ENOMEM;
62672 }
62673@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62674 if (fscache_submit_exclusive_op(object, op) < 0)
62675 goto nobufs_dec;
62676 spin_unlock(&cookie->lock);
62677- fscache_stat(&fscache_n_attr_changed_ok);
62678+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
62679 fscache_put_operation(op);
62680 _leave(" = 0");
62681 return 0;
62682@@ -242,7 +242,7 @@ nobufs:
62683 kfree(op);
62684 if (wake_cookie)
62685 __fscache_wake_unused_cookie(cookie);
62686- fscache_stat(&fscache_n_attr_changed_nobufs);
62687+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
62688 _leave(" = %d", -ENOBUFS);
62689 return -ENOBUFS;
62690 }
62691@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
62692 /* allocate a retrieval operation and attempt to submit it */
62693 op = kzalloc(sizeof(*op), GFP_NOIO);
62694 if (!op) {
62695- fscache_stat(&fscache_n_retrievals_nomem);
62696+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62697 return NULL;
62698 }
62699
62700@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
62701 return 0;
62702 }
62703
62704- fscache_stat(&fscache_n_retrievals_wait);
62705+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
62706
62707 jif = jiffies;
62708 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
62709 TASK_INTERRUPTIBLE) != 0) {
62710- fscache_stat(&fscache_n_retrievals_intr);
62711+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62712 _leave(" = -ERESTARTSYS");
62713 return -ERESTARTSYS;
62714 }
62715@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
62716 */
62717 int fscache_wait_for_operation_activation(struct fscache_object *object,
62718 struct fscache_operation *op,
62719- atomic_t *stat_op_waits,
62720- atomic_t *stat_object_dead,
62721+ atomic_unchecked_t *stat_op_waits,
62722+ atomic_unchecked_t *stat_object_dead,
62723 void (*do_cancel)(struct fscache_operation *))
62724 {
62725 int ret;
62726@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62727
62728 _debug(">>> WT");
62729 if (stat_op_waits)
62730- fscache_stat(stat_op_waits);
62731+ fscache_stat_unchecked(stat_op_waits);
62732 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
62733 TASK_INTERRUPTIBLE) != 0) {
62734 ret = fscache_cancel_op(op, do_cancel);
62735@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62736 check_if_dead:
62737 if (op->state == FSCACHE_OP_ST_CANCELLED) {
62738 if (stat_object_dead)
62739- fscache_stat(stat_object_dead);
62740+ fscache_stat_unchecked(stat_object_dead);
62741 _leave(" = -ENOBUFS [cancelled]");
62742 return -ENOBUFS;
62743 }
62744@@ -381,7 +381,7 @@ check_if_dead:
62745 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
62746 fscache_cancel_op(op, do_cancel);
62747 if (stat_object_dead)
62748- fscache_stat(stat_object_dead);
62749+ fscache_stat_unchecked(stat_object_dead);
62750 return -ENOBUFS;
62751 }
62752 return 0;
62753@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62754
62755 _enter("%p,%p,,,", cookie, page);
62756
62757- fscache_stat(&fscache_n_retrievals);
62758+ fscache_stat_unchecked(&fscache_n_retrievals);
62759
62760 if (hlist_empty(&cookie->backing_objects))
62761 goto nobufs;
62762@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62763 goto nobufs_unlock_dec;
62764 spin_unlock(&cookie->lock);
62765
62766- fscache_stat(&fscache_n_retrieval_ops);
62767+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62768
62769 /* pin the netfs read context in case we need to do the actual netfs
62770 * read because we've encountered a cache read failure */
62771@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62772
62773 error:
62774 if (ret == -ENOMEM)
62775- fscache_stat(&fscache_n_retrievals_nomem);
62776+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62777 else if (ret == -ERESTARTSYS)
62778- fscache_stat(&fscache_n_retrievals_intr);
62779+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62780 else if (ret == -ENODATA)
62781- fscache_stat(&fscache_n_retrievals_nodata);
62782+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62783 else if (ret < 0)
62784- fscache_stat(&fscache_n_retrievals_nobufs);
62785+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62786 else
62787- fscache_stat(&fscache_n_retrievals_ok);
62788+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62789
62790 fscache_put_retrieval(op);
62791 _leave(" = %d", ret);
62792@@ -505,7 +505,7 @@ nobufs_unlock:
62793 __fscache_wake_unused_cookie(cookie);
62794 kfree(op);
62795 nobufs:
62796- fscache_stat(&fscache_n_retrievals_nobufs);
62797+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62798 _leave(" = -ENOBUFS");
62799 return -ENOBUFS;
62800 }
62801@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62802
62803 _enter("%p,,%d,,,", cookie, *nr_pages);
62804
62805- fscache_stat(&fscache_n_retrievals);
62806+ fscache_stat_unchecked(&fscache_n_retrievals);
62807
62808 if (hlist_empty(&cookie->backing_objects))
62809 goto nobufs;
62810@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62811 goto nobufs_unlock_dec;
62812 spin_unlock(&cookie->lock);
62813
62814- fscache_stat(&fscache_n_retrieval_ops);
62815+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62816
62817 /* pin the netfs read context in case we need to do the actual netfs
62818 * read because we've encountered a cache read failure */
62819@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62820
62821 error:
62822 if (ret == -ENOMEM)
62823- fscache_stat(&fscache_n_retrievals_nomem);
62824+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62825 else if (ret == -ERESTARTSYS)
62826- fscache_stat(&fscache_n_retrievals_intr);
62827+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62828 else if (ret == -ENODATA)
62829- fscache_stat(&fscache_n_retrievals_nodata);
62830+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62831 else if (ret < 0)
62832- fscache_stat(&fscache_n_retrievals_nobufs);
62833+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62834 else
62835- fscache_stat(&fscache_n_retrievals_ok);
62836+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62837
62838 fscache_put_retrieval(op);
62839 _leave(" = %d", ret);
62840@@ -636,7 +636,7 @@ nobufs_unlock:
62841 if (wake_cookie)
62842 __fscache_wake_unused_cookie(cookie);
62843 nobufs:
62844- fscache_stat(&fscache_n_retrievals_nobufs);
62845+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62846 _leave(" = -ENOBUFS");
62847 return -ENOBUFS;
62848 }
62849@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62850
62851 _enter("%p,%p,,,", cookie, page);
62852
62853- fscache_stat(&fscache_n_allocs);
62854+ fscache_stat_unchecked(&fscache_n_allocs);
62855
62856 if (hlist_empty(&cookie->backing_objects))
62857 goto nobufs;
62858@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62859 goto nobufs_unlock_dec;
62860 spin_unlock(&cookie->lock);
62861
62862- fscache_stat(&fscache_n_alloc_ops);
62863+ fscache_stat_unchecked(&fscache_n_alloc_ops);
62864
62865 ret = fscache_wait_for_operation_activation(
62866 object, &op->op,
62867@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62868
62869 error:
62870 if (ret == -ERESTARTSYS)
62871- fscache_stat(&fscache_n_allocs_intr);
62872+ fscache_stat_unchecked(&fscache_n_allocs_intr);
62873 else if (ret < 0)
62874- fscache_stat(&fscache_n_allocs_nobufs);
62875+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62876 else
62877- fscache_stat(&fscache_n_allocs_ok);
62878+ fscache_stat_unchecked(&fscache_n_allocs_ok);
62879
62880 fscache_put_retrieval(op);
62881 _leave(" = %d", ret);
62882@@ -730,7 +730,7 @@ nobufs_unlock:
62883 if (wake_cookie)
62884 __fscache_wake_unused_cookie(cookie);
62885 nobufs:
62886- fscache_stat(&fscache_n_allocs_nobufs);
62887+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62888 _leave(" = -ENOBUFS");
62889 return -ENOBUFS;
62890 }
62891@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62892
62893 spin_lock(&cookie->stores_lock);
62894
62895- fscache_stat(&fscache_n_store_calls);
62896+ fscache_stat_unchecked(&fscache_n_store_calls);
62897
62898 /* find a page to store */
62899 page = NULL;
62900@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62901 page = results[0];
62902 _debug("gang %d [%lx]", n, page->index);
62903 if (page->index > op->store_limit) {
62904- fscache_stat(&fscache_n_store_pages_over_limit);
62905+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
62906 goto superseded;
62907 }
62908
62909@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62910 spin_unlock(&cookie->stores_lock);
62911 spin_unlock(&object->lock);
62912
62913- fscache_stat(&fscache_n_store_pages);
62914+ fscache_stat_unchecked(&fscache_n_store_pages);
62915 fscache_stat(&fscache_n_cop_write_page);
62916 ret = object->cache->ops->write_page(op, page);
62917 fscache_stat_d(&fscache_n_cop_write_page);
62918@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62919 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62920 ASSERT(PageFsCache(page));
62921
62922- fscache_stat(&fscache_n_stores);
62923+ fscache_stat_unchecked(&fscache_n_stores);
62924
62925 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
62926 _leave(" = -ENOBUFS [invalidating]");
62927@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62928 spin_unlock(&cookie->stores_lock);
62929 spin_unlock(&object->lock);
62930
62931- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
62932+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62933 op->store_limit = object->store_limit;
62934
62935 __fscache_use_cookie(cookie);
62936@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62937
62938 spin_unlock(&cookie->lock);
62939 radix_tree_preload_end();
62940- fscache_stat(&fscache_n_store_ops);
62941- fscache_stat(&fscache_n_stores_ok);
62942+ fscache_stat_unchecked(&fscache_n_store_ops);
62943+ fscache_stat_unchecked(&fscache_n_stores_ok);
62944
62945 /* the work queue now carries its own ref on the object */
62946 fscache_put_operation(&op->op);
62947@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62948 return 0;
62949
62950 already_queued:
62951- fscache_stat(&fscache_n_stores_again);
62952+ fscache_stat_unchecked(&fscache_n_stores_again);
62953 already_pending:
62954 spin_unlock(&cookie->stores_lock);
62955 spin_unlock(&object->lock);
62956 spin_unlock(&cookie->lock);
62957 radix_tree_preload_end();
62958 kfree(op);
62959- fscache_stat(&fscache_n_stores_ok);
62960+ fscache_stat_unchecked(&fscache_n_stores_ok);
62961 _leave(" = 0");
62962 return 0;
62963
62964@@ -1039,14 +1039,14 @@ nobufs:
62965 kfree(op);
62966 if (wake_cookie)
62967 __fscache_wake_unused_cookie(cookie);
62968- fscache_stat(&fscache_n_stores_nobufs);
62969+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
62970 _leave(" = -ENOBUFS");
62971 return -ENOBUFS;
62972
62973 nomem_free:
62974 kfree(op);
62975 nomem:
62976- fscache_stat(&fscache_n_stores_oom);
62977+ fscache_stat_unchecked(&fscache_n_stores_oom);
62978 _leave(" = -ENOMEM");
62979 return -ENOMEM;
62980 }
62981@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
62982 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62983 ASSERTCMP(page, !=, NULL);
62984
62985- fscache_stat(&fscache_n_uncaches);
62986+ fscache_stat_unchecked(&fscache_n_uncaches);
62987
62988 /* cache withdrawal may beat us to it */
62989 if (!PageFsCache(page))
62990@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
62991 struct fscache_cookie *cookie = op->op.object->cookie;
62992
62993 #ifdef CONFIG_FSCACHE_STATS
62994- atomic_inc(&fscache_n_marks);
62995+ atomic_inc_unchecked(&fscache_n_marks);
62996 #endif
62997
62998 _debug("- mark %p{%lx}", page, page->index);
62999diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63000index 40d13c7..ddf52b9 100644
63001--- a/fs/fscache/stats.c
63002+++ b/fs/fscache/stats.c
63003@@ -18,99 +18,99 @@
63004 /*
63005 * operation counters
63006 */
63007-atomic_t fscache_n_op_pend;
63008-atomic_t fscache_n_op_run;
63009-atomic_t fscache_n_op_enqueue;
63010-atomic_t fscache_n_op_requeue;
63011-atomic_t fscache_n_op_deferred_release;
63012-atomic_t fscache_n_op_release;
63013-atomic_t fscache_n_op_gc;
63014-atomic_t fscache_n_op_cancelled;
63015-atomic_t fscache_n_op_rejected;
63016+atomic_unchecked_t fscache_n_op_pend;
63017+atomic_unchecked_t fscache_n_op_run;
63018+atomic_unchecked_t fscache_n_op_enqueue;
63019+atomic_unchecked_t fscache_n_op_requeue;
63020+atomic_unchecked_t fscache_n_op_deferred_release;
63021+atomic_unchecked_t fscache_n_op_release;
63022+atomic_unchecked_t fscache_n_op_gc;
63023+atomic_unchecked_t fscache_n_op_cancelled;
63024+atomic_unchecked_t fscache_n_op_rejected;
63025
63026-atomic_t fscache_n_attr_changed;
63027-atomic_t fscache_n_attr_changed_ok;
63028-atomic_t fscache_n_attr_changed_nobufs;
63029-atomic_t fscache_n_attr_changed_nomem;
63030-atomic_t fscache_n_attr_changed_calls;
63031+atomic_unchecked_t fscache_n_attr_changed;
63032+atomic_unchecked_t fscache_n_attr_changed_ok;
63033+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63034+atomic_unchecked_t fscache_n_attr_changed_nomem;
63035+atomic_unchecked_t fscache_n_attr_changed_calls;
63036
63037-atomic_t fscache_n_allocs;
63038-atomic_t fscache_n_allocs_ok;
63039-atomic_t fscache_n_allocs_wait;
63040-atomic_t fscache_n_allocs_nobufs;
63041-atomic_t fscache_n_allocs_intr;
63042-atomic_t fscache_n_allocs_object_dead;
63043-atomic_t fscache_n_alloc_ops;
63044-atomic_t fscache_n_alloc_op_waits;
63045+atomic_unchecked_t fscache_n_allocs;
63046+atomic_unchecked_t fscache_n_allocs_ok;
63047+atomic_unchecked_t fscache_n_allocs_wait;
63048+atomic_unchecked_t fscache_n_allocs_nobufs;
63049+atomic_unchecked_t fscache_n_allocs_intr;
63050+atomic_unchecked_t fscache_n_allocs_object_dead;
63051+atomic_unchecked_t fscache_n_alloc_ops;
63052+atomic_unchecked_t fscache_n_alloc_op_waits;
63053
63054-atomic_t fscache_n_retrievals;
63055-atomic_t fscache_n_retrievals_ok;
63056-atomic_t fscache_n_retrievals_wait;
63057-atomic_t fscache_n_retrievals_nodata;
63058-atomic_t fscache_n_retrievals_nobufs;
63059-atomic_t fscache_n_retrievals_intr;
63060-atomic_t fscache_n_retrievals_nomem;
63061-atomic_t fscache_n_retrievals_object_dead;
63062-atomic_t fscache_n_retrieval_ops;
63063-atomic_t fscache_n_retrieval_op_waits;
63064+atomic_unchecked_t fscache_n_retrievals;
63065+atomic_unchecked_t fscache_n_retrievals_ok;
63066+atomic_unchecked_t fscache_n_retrievals_wait;
63067+atomic_unchecked_t fscache_n_retrievals_nodata;
63068+atomic_unchecked_t fscache_n_retrievals_nobufs;
63069+atomic_unchecked_t fscache_n_retrievals_intr;
63070+atomic_unchecked_t fscache_n_retrievals_nomem;
63071+atomic_unchecked_t fscache_n_retrievals_object_dead;
63072+atomic_unchecked_t fscache_n_retrieval_ops;
63073+atomic_unchecked_t fscache_n_retrieval_op_waits;
63074
63075-atomic_t fscache_n_stores;
63076-atomic_t fscache_n_stores_ok;
63077-atomic_t fscache_n_stores_again;
63078-atomic_t fscache_n_stores_nobufs;
63079-atomic_t fscache_n_stores_oom;
63080-atomic_t fscache_n_store_ops;
63081-atomic_t fscache_n_store_calls;
63082-atomic_t fscache_n_store_pages;
63083-atomic_t fscache_n_store_radix_deletes;
63084-atomic_t fscache_n_store_pages_over_limit;
63085+atomic_unchecked_t fscache_n_stores;
63086+atomic_unchecked_t fscache_n_stores_ok;
63087+atomic_unchecked_t fscache_n_stores_again;
63088+atomic_unchecked_t fscache_n_stores_nobufs;
63089+atomic_unchecked_t fscache_n_stores_oom;
63090+atomic_unchecked_t fscache_n_store_ops;
63091+atomic_unchecked_t fscache_n_store_calls;
63092+atomic_unchecked_t fscache_n_store_pages;
63093+atomic_unchecked_t fscache_n_store_radix_deletes;
63094+atomic_unchecked_t fscache_n_store_pages_over_limit;
63095
63096-atomic_t fscache_n_store_vmscan_not_storing;
63097-atomic_t fscache_n_store_vmscan_gone;
63098-atomic_t fscache_n_store_vmscan_busy;
63099-atomic_t fscache_n_store_vmscan_cancelled;
63100-atomic_t fscache_n_store_vmscan_wait;
63101+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63102+atomic_unchecked_t fscache_n_store_vmscan_gone;
63103+atomic_unchecked_t fscache_n_store_vmscan_busy;
63104+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63105+atomic_unchecked_t fscache_n_store_vmscan_wait;
63106
63107-atomic_t fscache_n_marks;
63108-atomic_t fscache_n_uncaches;
63109+atomic_unchecked_t fscache_n_marks;
63110+atomic_unchecked_t fscache_n_uncaches;
63111
63112-atomic_t fscache_n_acquires;
63113-atomic_t fscache_n_acquires_null;
63114-atomic_t fscache_n_acquires_no_cache;
63115-atomic_t fscache_n_acquires_ok;
63116-atomic_t fscache_n_acquires_nobufs;
63117-atomic_t fscache_n_acquires_oom;
63118+atomic_unchecked_t fscache_n_acquires;
63119+atomic_unchecked_t fscache_n_acquires_null;
63120+atomic_unchecked_t fscache_n_acquires_no_cache;
63121+atomic_unchecked_t fscache_n_acquires_ok;
63122+atomic_unchecked_t fscache_n_acquires_nobufs;
63123+atomic_unchecked_t fscache_n_acquires_oom;
63124
63125-atomic_t fscache_n_invalidates;
63126-atomic_t fscache_n_invalidates_run;
63127+atomic_unchecked_t fscache_n_invalidates;
63128+atomic_unchecked_t fscache_n_invalidates_run;
63129
63130-atomic_t fscache_n_updates;
63131-atomic_t fscache_n_updates_null;
63132-atomic_t fscache_n_updates_run;
63133+atomic_unchecked_t fscache_n_updates;
63134+atomic_unchecked_t fscache_n_updates_null;
63135+atomic_unchecked_t fscache_n_updates_run;
63136
63137-atomic_t fscache_n_relinquishes;
63138-atomic_t fscache_n_relinquishes_null;
63139-atomic_t fscache_n_relinquishes_waitcrt;
63140-atomic_t fscache_n_relinquishes_retire;
63141+atomic_unchecked_t fscache_n_relinquishes;
63142+atomic_unchecked_t fscache_n_relinquishes_null;
63143+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63144+atomic_unchecked_t fscache_n_relinquishes_retire;
63145
63146-atomic_t fscache_n_cookie_index;
63147-atomic_t fscache_n_cookie_data;
63148-atomic_t fscache_n_cookie_special;
63149+atomic_unchecked_t fscache_n_cookie_index;
63150+atomic_unchecked_t fscache_n_cookie_data;
63151+atomic_unchecked_t fscache_n_cookie_special;
63152
63153-atomic_t fscache_n_object_alloc;
63154-atomic_t fscache_n_object_no_alloc;
63155-atomic_t fscache_n_object_lookups;
63156-atomic_t fscache_n_object_lookups_negative;
63157-atomic_t fscache_n_object_lookups_positive;
63158-atomic_t fscache_n_object_lookups_timed_out;
63159-atomic_t fscache_n_object_created;
63160-atomic_t fscache_n_object_avail;
63161-atomic_t fscache_n_object_dead;
63162+atomic_unchecked_t fscache_n_object_alloc;
63163+atomic_unchecked_t fscache_n_object_no_alloc;
63164+atomic_unchecked_t fscache_n_object_lookups;
63165+atomic_unchecked_t fscache_n_object_lookups_negative;
63166+atomic_unchecked_t fscache_n_object_lookups_positive;
63167+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63168+atomic_unchecked_t fscache_n_object_created;
63169+atomic_unchecked_t fscache_n_object_avail;
63170+atomic_unchecked_t fscache_n_object_dead;
63171
63172-atomic_t fscache_n_checkaux_none;
63173-atomic_t fscache_n_checkaux_okay;
63174-atomic_t fscache_n_checkaux_update;
63175-atomic_t fscache_n_checkaux_obsolete;
63176+atomic_unchecked_t fscache_n_checkaux_none;
63177+atomic_unchecked_t fscache_n_checkaux_okay;
63178+atomic_unchecked_t fscache_n_checkaux_update;
63179+atomic_unchecked_t fscache_n_checkaux_obsolete;
63180
63181 atomic_t fscache_n_cop_alloc_object;
63182 atomic_t fscache_n_cop_lookup_object;
63183@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63184 seq_puts(m, "FS-Cache statistics\n");
63185
63186 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63187- atomic_read(&fscache_n_cookie_index),
63188- atomic_read(&fscache_n_cookie_data),
63189- atomic_read(&fscache_n_cookie_special));
63190+ atomic_read_unchecked(&fscache_n_cookie_index),
63191+ atomic_read_unchecked(&fscache_n_cookie_data),
63192+ atomic_read_unchecked(&fscache_n_cookie_special));
63193
63194 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63195- atomic_read(&fscache_n_object_alloc),
63196- atomic_read(&fscache_n_object_no_alloc),
63197- atomic_read(&fscache_n_object_avail),
63198- atomic_read(&fscache_n_object_dead));
63199+ atomic_read_unchecked(&fscache_n_object_alloc),
63200+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63201+ atomic_read_unchecked(&fscache_n_object_avail),
63202+ atomic_read_unchecked(&fscache_n_object_dead));
63203 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63204- atomic_read(&fscache_n_checkaux_none),
63205- atomic_read(&fscache_n_checkaux_okay),
63206- atomic_read(&fscache_n_checkaux_update),
63207- atomic_read(&fscache_n_checkaux_obsolete));
63208+ atomic_read_unchecked(&fscache_n_checkaux_none),
63209+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63210+ atomic_read_unchecked(&fscache_n_checkaux_update),
63211+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63212
63213 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63214- atomic_read(&fscache_n_marks),
63215- atomic_read(&fscache_n_uncaches));
63216+ atomic_read_unchecked(&fscache_n_marks),
63217+ atomic_read_unchecked(&fscache_n_uncaches));
63218
63219 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63220 " oom=%u\n",
63221- atomic_read(&fscache_n_acquires),
63222- atomic_read(&fscache_n_acquires_null),
63223- atomic_read(&fscache_n_acquires_no_cache),
63224- atomic_read(&fscache_n_acquires_ok),
63225- atomic_read(&fscache_n_acquires_nobufs),
63226- atomic_read(&fscache_n_acquires_oom));
63227+ atomic_read_unchecked(&fscache_n_acquires),
63228+ atomic_read_unchecked(&fscache_n_acquires_null),
63229+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63230+ atomic_read_unchecked(&fscache_n_acquires_ok),
63231+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63232+ atomic_read_unchecked(&fscache_n_acquires_oom));
63233
63234 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63235- atomic_read(&fscache_n_object_lookups),
63236- atomic_read(&fscache_n_object_lookups_negative),
63237- atomic_read(&fscache_n_object_lookups_positive),
63238- atomic_read(&fscache_n_object_created),
63239- atomic_read(&fscache_n_object_lookups_timed_out));
63240+ atomic_read_unchecked(&fscache_n_object_lookups),
63241+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63242+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63243+ atomic_read_unchecked(&fscache_n_object_created),
63244+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63245
63246 seq_printf(m, "Invals : n=%u run=%u\n",
63247- atomic_read(&fscache_n_invalidates),
63248- atomic_read(&fscache_n_invalidates_run));
63249+ atomic_read_unchecked(&fscache_n_invalidates),
63250+ atomic_read_unchecked(&fscache_n_invalidates_run));
63251
63252 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63253- atomic_read(&fscache_n_updates),
63254- atomic_read(&fscache_n_updates_null),
63255- atomic_read(&fscache_n_updates_run));
63256+ atomic_read_unchecked(&fscache_n_updates),
63257+ atomic_read_unchecked(&fscache_n_updates_null),
63258+ atomic_read_unchecked(&fscache_n_updates_run));
63259
63260 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63261- atomic_read(&fscache_n_relinquishes),
63262- atomic_read(&fscache_n_relinquishes_null),
63263- atomic_read(&fscache_n_relinquishes_waitcrt),
63264- atomic_read(&fscache_n_relinquishes_retire));
63265+ atomic_read_unchecked(&fscache_n_relinquishes),
63266+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63267+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63268+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63269
63270 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63271- atomic_read(&fscache_n_attr_changed),
63272- atomic_read(&fscache_n_attr_changed_ok),
63273- atomic_read(&fscache_n_attr_changed_nobufs),
63274- atomic_read(&fscache_n_attr_changed_nomem),
63275- atomic_read(&fscache_n_attr_changed_calls));
63276+ atomic_read_unchecked(&fscache_n_attr_changed),
63277+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63278+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63279+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63280+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63281
63282 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63283- atomic_read(&fscache_n_allocs),
63284- atomic_read(&fscache_n_allocs_ok),
63285- atomic_read(&fscache_n_allocs_wait),
63286- atomic_read(&fscache_n_allocs_nobufs),
63287- atomic_read(&fscache_n_allocs_intr));
63288+ atomic_read_unchecked(&fscache_n_allocs),
63289+ atomic_read_unchecked(&fscache_n_allocs_ok),
63290+ atomic_read_unchecked(&fscache_n_allocs_wait),
63291+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63292+ atomic_read_unchecked(&fscache_n_allocs_intr));
63293 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63294- atomic_read(&fscache_n_alloc_ops),
63295- atomic_read(&fscache_n_alloc_op_waits),
63296- atomic_read(&fscache_n_allocs_object_dead));
63297+ atomic_read_unchecked(&fscache_n_alloc_ops),
63298+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63299+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63300
63301 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63302 " int=%u oom=%u\n",
63303- atomic_read(&fscache_n_retrievals),
63304- atomic_read(&fscache_n_retrievals_ok),
63305- atomic_read(&fscache_n_retrievals_wait),
63306- atomic_read(&fscache_n_retrievals_nodata),
63307- atomic_read(&fscache_n_retrievals_nobufs),
63308- atomic_read(&fscache_n_retrievals_intr),
63309- atomic_read(&fscache_n_retrievals_nomem));
63310+ atomic_read_unchecked(&fscache_n_retrievals),
63311+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63312+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63313+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63314+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63315+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63316+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63317 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63318- atomic_read(&fscache_n_retrieval_ops),
63319- atomic_read(&fscache_n_retrieval_op_waits),
63320- atomic_read(&fscache_n_retrievals_object_dead));
63321+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63322+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63323+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63324
63325 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63326- atomic_read(&fscache_n_stores),
63327- atomic_read(&fscache_n_stores_ok),
63328- atomic_read(&fscache_n_stores_again),
63329- atomic_read(&fscache_n_stores_nobufs),
63330- atomic_read(&fscache_n_stores_oom));
63331+ atomic_read_unchecked(&fscache_n_stores),
63332+ atomic_read_unchecked(&fscache_n_stores_ok),
63333+ atomic_read_unchecked(&fscache_n_stores_again),
63334+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63335+ atomic_read_unchecked(&fscache_n_stores_oom));
63336 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63337- atomic_read(&fscache_n_store_ops),
63338- atomic_read(&fscache_n_store_calls),
63339- atomic_read(&fscache_n_store_pages),
63340- atomic_read(&fscache_n_store_radix_deletes),
63341- atomic_read(&fscache_n_store_pages_over_limit));
63342+ atomic_read_unchecked(&fscache_n_store_ops),
63343+ atomic_read_unchecked(&fscache_n_store_calls),
63344+ atomic_read_unchecked(&fscache_n_store_pages),
63345+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63346+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63347
63348 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63349- atomic_read(&fscache_n_store_vmscan_not_storing),
63350- atomic_read(&fscache_n_store_vmscan_gone),
63351- atomic_read(&fscache_n_store_vmscan_busy),
63352- atomic_read(&fscache_n_store_vmscan_cancelled),
63353- atomic_read(&fscache_n_store_vmscan_wait));
63354+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63355+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63356+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63357+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63358+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63359
63360 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63361- atomic_read(&fscache_n_op_pend),
63362- atomic_read(&fscache_n_op_run),
63363- atomic_read(&fscache_n_op_enqueue),
63364- atomic_read(&fscache_n_op_cancelled),
63365- atomic_read(&fscache_n_op_rejected));
63366+ atomic_read_unchecked(&fscache_n_op_pend),
63367+ atomic_read_unchecked(&fscache_n_op_run),
63368+ atomic_read_unchecked(&fscache_n_op_enqueue),
63369+ atomic_read_unchecked(&fscache_n_op_cancelled),
63370+ atomic_read_unchecked(&fscache_n_op_rejected));
63371 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63372- atomic_read(&fscache_n_op_deferred_release),
63373- atomic_read(&fscache_n_op_release),
63374- atomic_read(&fscache_n_op_gc));
63375+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63376+ atomic_read_unchecked(&fscache_n_op_release),
63377+ atomic_read_unchecked(&fscache_n_op_gc));
63378
63379 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63380 atomic_read(&fscache_n_cop_alloc_object),
63381diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63382index 28d0c7a..04816b7 100644
63383--- a/fs/fuse/cuse.c
63384+++ b/fs/fuse/cuse.c
63385@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63386 INIT_LIST_HEAD(&cuse_conntbl[i]);
63387
63388 /* inherit and extend fuse_dev_operations */
63389- cuse_channel_fops = fuse_dev_operations;
63390- cuse_channel_fops.owner = THIS_MODULE;
63391- cuse_channel_fops.open = cuse_channel_open;
63392- cuse_channel_fops.release = cuse_channel_release;
63393+ pax_open_kernel();
63394+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63395+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63396+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63397+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63398+ pax_close_kernel();
63399
63400 cuse_class = class_create(THIS_MODULE, "cuse");
63401 if (IS_ERR(cuse_class))
63402diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63403index ed19a7d..91e9a4c 100644
63404--- a/fs/fuse/dev.c
63405+++ b/fs/fuse/dev.c
63406@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63407 ret = 0;
63408 pipe_lock(pipe);
63409
63410- if (!pipe->readers) {
63411+ if (!atomic_read(&pipe->readers)) {
63412 send_sig(SIGPIPE, current, 0);
63413 if (!ret)
63414 ret = -EPIPE;
63415@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63416 page_nr++;
63417 ret += buf->len;
63418
63419- if (pipe->files)
63420+ if (atomic_read(&pipe->files))
63421 do_wakeup = 1;
63422 }
63423
63424diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63425index 08e7b1a..d91c6ee 100644
63426--- a/fs/fuse/dir.c
63427+++ b/fs/fuse/dir.c
63428@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63429 return link;
63430 }
63431
63432-static void free_link(char *link)
63433+static void free_link(const char *link)
63434 {
63435 if (!IS_ERR(link))
63436 free_page((unsigned long) link);
63437diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63438index fd62cae..3494dfa 100644
63439--- a/fs/hostfs/hostfs_kern.c
63440+++ b/fs/hostfs/hostfs_kern.c
63441@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63442
63443 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63444 {
63445- char *s = nd_get_link(nd);
63446+ const char *s = nd_get_link(nd);
63447 if (!IS_ERR(s))
63448 __putname(s);
63449 }
63450diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63451index 5eba47f..d353c22 100644
63452--- a/fs/hugetlbfs/inode.c
63453+++ b/fs/hugetlbfs/inode.c
63454@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63455 struct mm_struct *mm = current->mm;
63456 struct vm_area_struct *vma;
63457 struct hstate *h = hstate_file(file);
63458+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63459 struct vm_unmapped_area_info info;
63460
63461 if (len & ~huge_page_mask(h))
63462@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63463 return addr;
63464 }
63465
63466+#ifdef CONFIG_PAX_RANDMMAP
63467+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63468+#endif
63469+
63470 if (addr) {
63471 addr = ALIGN(addr, huge_page_size(h));
63472 vma = find_vma(mm, addr);
63473- if (TASK_SIZE - len >= addr &&
63474- (!vma || addr + len <= vma->vm_start))
63475+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63476 return addr;
63477 }
63478
63479 info.flags = 0;
63480 info.length = len;
63481 info.low_limit = TASK_UNMAPPED_BASE;
63482+
63483+#ifdef CONFIG_PAX_RANDMMAP
63484+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63485+ info.low_limit += mm->delta_mmap;
63486+#endif
63487+
63488 info.high_limit = TASK_SIZE;
63489 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63490 info.align_offset = 0;
63491@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63492 };
63493 MODULE_ALIAS_FS("hugetlbfs");
63494
63495-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63496+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63497
63498 static int can_do_hugetlb_shm(void)
63499 {
63500diff --git a/fs/inode.c b/fs/inode.c
63501index aa149e7..46f1f65 100644
63502--- a/fs/inode.c
63503+++ b/fs/inode.c
63504@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
63505 unsigned int *p = &get_cpu_var(last_ino);
63506 unsigned int res = *p;
63507
63508+start:
63509+
63510 #ifdef CONFIG_SMP
63511 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63512- static atomic_t shared_last_ino;
63513- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63514+ static atomic_unchecked_t shared_last_ino;
63515+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63516
63517 res = next - LAST_INO_BATCH;
63518 }
63519 #endif
63520
63521- *p = ++res;
63522+ if (unlikely(!++res))
63523+ goto start; /* never zero */
63524+ *p = res;
63525 put_cpu_var(last_ino);
63526 return res;
63527 }
63528diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63529index 4a6cf28..d3a29d3 100644
63530--- a/fs/jffs2/erase.c
63531+++ b/fs/jffs2/erase.c
63532@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63533 struct jffs2_unknown_node marker = {
63534 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63535 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63536- .totlen = cpu_to_je32(c->cleanmarker_size)
63537+ .totlen = cpu_to_je32(c->cleanmarker_size),
63538+ .hdr_crc = cpu_to_je32(0)
63539 };
63540
63541 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63542diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63543index 09ed551..45684f8 100644
63544--- a/fs/jffs2/wbuf.c
63545+++ b/fs/jffs2/wbuf.c
63546@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63547 {
63548 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63549 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63550- .totlen = constant_cpu_to_je32(8)
63551+ .totlen = constant_cpu_to_je32(8),
63552+ .hdr_crc = constant_cpu_to_je32(0)
63553 };
63554
63555 /*
63556diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63557index 16c3a95..e9cb75d 100644
63558--- a/fs/jfs/super.c
63559+++ b/fs/jfs/super.c
63560@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
63561
63562 jfs_inode_cachep =
63563 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63564- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63565+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63566 init_once);
63567 if (jfs_inode_cachep == NULL)
63568 return -ENOMEM;
63569diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63570index 2d881b3..fe1ac77 100644
63571--- a/fs/kernfs/dir.c
63572+++ b/fs/kernfs/dir.c
63573@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63574 *
63575 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63576 */
63577-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63578+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63579 {
63580 unsigned long hash = init_name_hash();
63581 unsigned int len = strlen(name);
63582@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
63583 ret = scops->mkdir(parent, dentry->d_name.name, mode);
63584
63585 kernfs_put_active(parent);
63586+
63587+ if (!ret) {
63588+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
63589+ ret = PTR_ERR_OR_ZERO(dentry_ret);
63590+ }
63591+
63592 return ret;
63593 }
63594
63595diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63596index ddc9f96..fce1040 100644
63597--- a/fs/kernfs/file.c
63598+++ b/fs/kernfs/file.c
63599@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63600
63601 struct kernfs_open_node {
63602 atomic_t refcnt;
63603- atomic_t event;
63604+ atomic_unchecked_t event;
63605 wait_queue_head_t poll;
63606 struct list_head files; /* goes through kernfs_open_file.list */
63607 };
63608@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63609 {
63610 struct kernfs_open_file *of = sf->private;
63611
63612- of->event = atomic_read(&of->kn->attr.open->event);
63613+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63614
63615 return of->kn->attr.ops->seq_show(sf, v);
63616 }
63617@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63618 return ret;
63619 }
63620
63621-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63622- void *buf, int len, int write)
63623+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63624+ void *buf, size_t len, int write)
63625 {
63626 struct file *file = vma->vm_file;
63627 struct kernfs_open_file *of = kernfs_of(file);
63628- int ret;
63629+ ssize_t ret;
63630
63631 if (!of->vm_ops)
63632 return -EINVAL;
63633@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63634 return -ENOMEM;
63635
63636 atomic_set(&new_on->refcnt, 0);
63637- atomic_set(&new_on->event, 1);
63638+ atomic_set_unchecked(&new_on->event, 1);
63639 init_waitqueue_head(&new_on->poll);
63640 INIT_LIST_HEAD(&new_on->files);
63641 goto retry;
63642@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
63643
63644 kernfs_put_active(kn);
63645
63646- if (of->event != atomic_read(&on->event))
63647+ if (of->event != atomic_read_unchecked(&on->event))
63648 goto trigger;
63649
63650 return DEFAULT_POLLMASK;
63651@@ -823,7 +823,7 @@ repeat:
63652
63653 on = kn->attr.open;
63654 if (on) {
63655- atomic_inc(&on->event);
63656+ atomic_inc_unchecked(&on->event);
63657 wake_up_interruptible(&on->poll);
63658 }
63659
63660diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
63661index 8a19889..4c3069a 100644
63662--- a/fs/kernfs/symlink.c
63663+++ b/fs/kernfs/symlink.c
63664@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
63665 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
63666 void *cookie)
63667 {
63668- char *page = nd_get_link(nd);
63669+ const char *page = nd_get_link(nd);
63670 if (!IS_ERR(page))
63671 free_page((unsigned long)page);
63672 }
63673diff --git a/fs/libfs.c b/fs/libfs.c
63674index 005843c..06c4191 100644
63675--- a/fs/libfs.c
63676+++ b/fs/libfs.c
63677@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63678
63679 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
63680 struct dentry *next = list_entry(p, struct dentry, d_child);
63681+ char d_name[sizeof(next->d_iname)];
63682+ const unsigned char *name;
63683+
63684 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
63685 if (!simple_positive(next)) {
63686 spin_unlock(&next->d_lock);
63687@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63688
63689 spin_unlock(&next->d_lock);
63690 spin_unlock(&dentry->d_lock);
63691- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
63692+ name = next->d_name.name;
63693+ if (name == next->d_iname) {
63694+ memcpy(d_name, name, next->d_name.len);
63695+ name = d_name;
63696+ }
63697+ if (!dir_emit(ctx, name, next->d_name.len,
63698 next->d_inode->i_ino, dt_type(next->d_inode)))
63699 return 0;
63700 spin_lock(&dentry->d_lock);
63701@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
63702 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
63703 void *cookie)
63704 {
63705- char *s = nd_get_link(nd);
63706+ const char *s = nd_get_link(nd);
63707 if (!IS_ERR(s))
63708 kfree(s);
63709 }
63710diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
63711index acd3947..1f896e2 100644
63712--- a/fs/lockd/clntproc.c
63713+++ b/fs/lockd/clntproc.c
63714@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
63715 /*
63716 * Cookie counter for NLM requests
63717 */
63718-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
63719+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
63720
63721 void nlmclnt_next_cookie(struct nlm_cookie *c)
63722 {
63723- u32 cookie = atomic_inc_return(&nlm_cookie);
63724+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
63725
63726 memcpy(c->data, &cookie, 4);
63727 c->len=4;
63728diff --git a/fs/locks.c b/fs/locks.c
63729index 59e2f90..bd69071 100644
63730--- a/fs/locks.c
63731+++ b/fs/locks.c
63732@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
63733 locks_remove_posix(filp, filp);
63734
63735 if (filp->f_op->flock) {
63736- struct file_lock fl = {
63737+ struct file_lock flock = {
63738 .fl_owner = filp,
63739 .fl_pid = current->tgid,
63740 .fl_file = filp,
63741@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
63742 .fl_type = F_UNLCK,
63743 .fl_end = OFFSET_MAX,
63744 };
63745- filp->f_op->flock(filp, F_SETLKW, &fl);
63746- if (fl.fl_ops && fl.fl_ops->fl_release_private)
63747- fl.fl_ops->fl_release_private(&fl);
63748+ filp->f_op->flock(filp, F_SETLKW, &flock);
63749+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
63750+ flock.fl_ops->fl_release_private(&flock);
63751 }
63752
63753 spin_lock(&inode->i_lock);
63754diff --git a/fs/mount.h b/fs/mount.h
63755index 0ad6f76..a04c146 100644
63756--- a/fs/mount.h
63757+++ b/fs/mount.h
63758@@ -12,7 +12,7 @@ struct mnt_namespace {
63759 u64 seq; /* Sequence number to prevent loops */
63760 wait_queue_head_t poll;
63761 u64 event;
63762-};
63763+} __randomize_layout;
63764
63765 struct mnt_pcp {
63766 int mnt_count;
63767@@ -63,7 +63,7 @@ struct mount {
63768 int mnt_expiry_mark; /* true if marked for expiry */
63769 struct hlist_head mnt_pins;
63770 struct path mnt_ex_mountpoint;
63771-};
63772+} __randomize_layout;
63773
63774 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
63775
63776diff --git a/fs/namei.c b/fs/namei.c
63777index bc35b02..7ed1f1d 100644
63778--- a/fs/namei.c
63779+++ b/fs/namei.c
63780@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
63781 if (ret != -EACCES)
63782 return ret;
63783
63784+#ifdef CONFIG_GRKERNSEC
63785+ /* we'll block if we have to log due to a denied capability use */
63786+ if (mask & MAY_NOT_BLOCK)
63787+ return -ECHILD;
63788+#endif
63789+
63790 if (S_ISDIR(inode->i_mode)) {
63791 /* DACs are overridable for directories */
63792- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63793- return 0;
63794 if (!(mask & MAY_WRITE))
63795- if (capable_wrt_inode_uidgid(inode,
63796- CAP_DAC_READ_SEARCH))
63797+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63798+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63799 return 0;
63800+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63801+ return 0;
63802 return -EACCES;
63803 }
63804 /*
63805+ * Searching includes executable on directories, else just read.
63806+ */
63807+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63808+ if (mask == MAY_READ)
63809+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63810+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63811+ return 0;
63812+
63813+ /*
63814 * Read/write DACs are always overridable.
63815 * Executable DACs are overridable when there is
63816 * at least one exec bit set.
63817@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
63818 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63819 return 0;
63820
63821- /*
63822- * Searching includes executable on directories, else just read.
63823- */
63824- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63825- if (mask == MAY_READ)
63826- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63827- return 0;
63828-
63829 return -EACCES;
63830 }
63831 EXPORT_SYMBOL(generic_permission);
63832@@ -497,7 +504,7 @@ struct nameidata {
63833 int last_type;
63834 unsigned depth;
63835 struct file *base;
63836- char *saved_names[MAX_NESTED_LINKS + 1];
63837+ const char *saved_names[MAX_NESTED_LINKS + 1];
63838 };
63839
63840 /*
63841@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
63842 nd->flags |= LOOKUP_JUMPED;
63843 }
63844
63845-void nd_set_link(struct nameidata *nd, char *path)
63846+void nd_set_link(struct nameidata *nd, const char *path)
63847 {
63848 nd->saved_names[nd->depth] = path;
63849 }
63850 EXPORT_SYMBOL(nd_set_link);
63851
63852-char *nd_get_link(struct nameidata *nd)
63853+const char *nd_get_link(const struct nameidata *nd)
63854 {
63855 return nd->saved_names[nd->depth];
63856 }
63857@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63858 {
63859 struct dentry *dentry = link->dentry;
63860 int error;
63861- char *s;
63862+ const char *s;
63863
63864 BUG_ON(nd->flags & LOOKUP_RCU);
63865
63866@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63867 if (error)
63868 goto out_put_nd_path;
63869
63870+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
63871+ dentry->d_inode, dentry, nd->path.mnt)) {
63872+ error = -EACCES;
63873+ goto out_put_nd_path;
63874+ }
63875+
63876 nd->last_type = LAST_BIND;
63877 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
63878 error = PTR_ERR(*p);
63879@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
63880 if (res)
63881 break;
63882 res = walk_component(nd, path, LOOKUP_FOLLOW);
63883+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
63884+ res = -EACCES;
63885 put_link(nd, &link, cookie);
63886 } while (res > 0);
63887
63888@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
63889 static inline u64 hash_name(const char *name)
63890 {
63891 unsigned long a, b, adata, bdata, mask, hash, len;
63892- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63893+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63894
63895 hash = a = 0;
63896 len = -sizeof(unsigned long);
63897@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
63898 if (err)
63899 break;
63900 err = lookup_last(nd, &path);
63901+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
63902+ err = -EACCES;
63903 put_link(nd, &link, cookie);
63904 }
63905 }
63906@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
63907 if (!err)
63908 err = complete_walk(nd);
63909
63910+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
63911+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
63912+ path_put(&nd->path);
63913+ err = -ENOENT;
63914+ }
63915+ }
63916+
63917 if (!err && nd->flags & LOOKUP_DIRECTORY) {
63918 if (!d_can_lookup(nd->path.dentry)) {
63919 path_put(&nd->path);
63920@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
63921 retval = path_lookupat(dfd, name->name,
63922 flags | LOOKUP_REVAL, nd);
63923
63924- if (likely(!retval))
63925+ if (likely(!retval)) {
63926 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
63927+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
63928+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
63929+ path_put(&nd->path);
63930+ return -ENOENT;
63931+ }
63932+ }
63933+ }
63934 return retval;
63935 }
63936
63937@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
63938 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
63939 return -EPERM;
63940
63941+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
63942+ return -EPERM;
63943+ if (gr_handle_rawio(inode))
63944+ return -EPERM;
63945+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
63946+ return -EACCES;
63947+
63948 return 0;
63949 }
63950
63951@@ -2826,7 +2864,7 @@ looked_up:
63952 * cleared otherwise prior to returning.
63953 */
63954 static int lookup_open(struct nameidata *nd, struct path *path,
63955- struct file *file,
63956+ struct path *link, struct file *file,
63957 const struct open_flags *op,
63958 bool got_write, int *opened)
63959 {
63960@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
63961 /* Negative dentry, just create the file */
63962 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
63963 umode_t mode = op->mode;
63964+
63965+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
63966+ error = -EACCES;
63967+ goto out_dput;
63968+ }
63969+
63970+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
63971+ error = -EACCES;
63972+ goto out_dput;
63973+ }
63974+
63975 if (!IS_POSIXACL(dir->d_inode))
63976 mode &= ~current_umask();
63977 /*
63978@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
63979 nd->flags & LOOKUP_EXCL);
63980 if (error)
63981 goto out_dput;
63982+ else
63983+ gr_handle_create(dentry, nd->path.mnt);
63984 }
63985 out_no_open:
63986 path->dentry = dentry;
63987@@ -2896,7 +2947,7 @@ out_dput:
63988 /*
63989 * Handle the last step of open()
63990 */
63991-static int do_last(struct nameidata *nd, struct path *path,
63992+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
63993 struct file *file, const struct open_flags *op,
63994 int *opened, struct filename *name)
63995 {
63996@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
63997 if (error)
63998 return error;
63999
64000+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64001+ error = -ENOENT;
64002+ goto out;
64003+ }
64004+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64005+ error = -EACCES;
64006+ goto out;
64007+ }
64008+
64009 audit_inode(name, dir, LOOKUP_PARENT);
64010 error = -EISDIR;
64011 /* trailing slashes? */
64012@@ -2965,7 +3025,7 @@ retry_lookup:
64013 */
64014 }
64015 mutex_lock(&dir->d_inode->i_mutex);
64016- error = lookup_open(nd, path, file, op, got_write, opened);
64017+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64018 mutex_unlock(&dir->d_inode->i_mutex);
64019
64020 if (error <= 0) {
64021@@ -2989,11 +3049,28 @@ retry_lookup:
64022 goto finish_open_created;
64023 }
64024
64025+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64026+ error = -ENOENT;
64027+ goto exit_dput;
64028+ }
64029+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64030+ error = -EACCES;
64031+ goto exit_dput;
64032+ }
64033+
64034 /*
64035 * create/update audit record if it already exists.
64036 */
64037- if (d_is_positive(path->dentry))
64038+ if (d_is_positive(path->dentry)) {
64039+ /* only check if O_CREAT is specified, all other checks need to go
64040+ into may_open */
64041+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64042+ error = -EACCES;
64043+ goto exit_dput;
64044+ }
64045+
64046 audit_inode(name, path->dentry, 0);
64047+ }
64048
64049 /*
64050 * If atomic_open() acquired write access it is dropped now due to
64051@@ -3034,6 +3111,11 @@ finish_lookup:
64052 }
64053 }
64054 BUG_ON(inode != path->dentry->d_inode);
64055+ /* if we're resolving a symlink to another symlink */
64056+ if (link && gr_handle_symlink_owner(link, inode)) {
64057+ error = -EACCES;
64058+ goto out;
64059+ }
64060 return 1;
64061 }
64062
64063@@ -3053,7 +3135,18 @@ finish_open:
64064 path_put(&save_parent);
64065 return error;
64066 }
64067+
64068+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64069+ error = -ENOENT;
64070+ goto out;
64071+ }
64072+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64073+ error = -EACCES;
64074+ goto out;
64075+ }
64076+
64077 audit_inode(name, nd->path.dentry, 0);
64078+
64079 error = -EISDIR;
64080 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64081 goto out;
64082@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64083 if (unlikely(error))
64084 goto out;
64085
64086- error = do_last(nd, &path, file, op, &opened, pathname);
64087+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64088 while (unlikely(error > 0)) { /* trailing symlink */
64089 struct path link = path;
64090 void *cookie;
64091@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64092 error = follow_link(&link, nd, &cookie);
64093 if (unlikely(error))
64094 break;
64095- error = do_last(nd, &path, file, op, &opened, pathname);
64096+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64097 put_link(nd, &link, cookie);
64098 }
64099 out:
64100@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64101 goto unlock;
64102
64103 error = -EEXIST;
64104- if (d_is_positive(dentry))
64105+ if (d_is_positive(dentry)) {
64106+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64107+ error = -ENOENT;
64108 goto fail;
64109-
64110+ }
64111 /*
64112 * Special case - lookup gave negative, but... we had foo/bar/
64113 * From the vfs_mknod() POV we just have a negative dentry -
64114@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64115 }
64116 EXPORT_SYMBOL(user_path_create);
64117
64118+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64119+{
64120+ struct filename *tmp = getname(pathname);
64121+ struct dentry *res;
64122+ if (IS_ERR(tmp))
64123+ return ERR_CAST(tmp);
64124+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64125+ if (IS_ERR(res))
64126+ putname(tmp);
64127+ else
64128+ *to = tmp;
64129+ return res;
64130+}
64131+
64132 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64133 {
64134 int error = may_create(dir, dentry);
64135@@ -3446,6 +3555,17 @@ retry:
64136
64137 if (!IS_POSIXACL(path.dentry->d_inode))
64138 mode &= ~current_umask();
64139+
64140+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64141+ error = -EPERM;
64142+ goto out;
64143+ }
64144+
64145+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64146+ error = -EACCES;
64147+ goto out;
64148+ }
64149+
64150 error = security_path_mknod(&path, dentry, mode, dev);
64151 if (error)
64152 goto out;
64153@@ -3461,6 +3581,8 @@ retry:
64154 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64155 break;
64156 }
64157+ if (!error)
64158+ gr_handle_create(dentry, path.mnt);
64159 out:
64160 done_path_create(&path, dentry);
64161 if (retry_estale(error, lookup_flags)) {
64162@@ -3515,9 +3637,16 @@ retry:
64163
64164 if (!IS_POSIXACL(path.dentry->d_inode))
64165 mode &= ~current_umask();
64166+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64167+ error = -EACCES;
64168+ goto out;
64169+ }
64170 error = security_path_mkdir(&path, dentry, mode);
64171 if (!error)
64172 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64173+ if (!error)
64174+ gr_handle_create(dentry, path.mnt);
64175+out:
64176 done_path_create(&path, dentry);
64177 if (retry_estale(error, lookup_flags)) {
64178 lookup_flags |= LOOKUP_REVAL;
64179@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64180 struct filename *name;
64181 struct dentry *dentry;
64182 struct nameidata nd;
64183+ u64 saved_ino = 0;
64184+ dev_t saved_dev = 0;
64185 unsigned int lookup_flags = 0;
64186 retry:
64187 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64188@@ -3633,10 +3764,21 @@ retry:
64189 error = -ENOENT;
64190 goto exit3;
64191 }
64192+
64193+ saved_ino = gr_get_ino_from_dentry(dentry);
64194+ saved_dev = gr_get_dev_from_dentry(dentry);
64195+
64196+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64197+ error = -EACCES;
64198+ goto exit3;
64199+ }
64200+
64201 error = security_path_rmdir(&nd.path, dentry);
64202 if (error)
64203 goto exit3;
64204 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64205+ if (!error && (saved_dev || saved_ino))
64206+ gr_handle_delete(saved_ino, saved_dev);
64207 exit3:
64208 dput(dentry);
64209 exit2:
64210@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64211 struct nameidata nd;
64212 struct inode *inode = NULL;
64213 struct inode *delegated_inode = NULL;
64214+ u64 saved_ino = 0;
64215+ dev_t saved_dev = 0;
64216 unsigned int lookup_flags = 0;
64217 retry:
64218 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64219@@ -3755,10 +3899,22 @@ retry_deleg:
64220 if (d_is_negative(dentry))
64221 goto slashes;
64222 ihold(inode);
64223+
64224+ if (inode->i_nlink <= 1) {
64225+ saved_ino = gr_get_ino_from_dentry(dentry);
64226+ saved_dev = gr_get_dev_from_dentry(dentry);
64227+ }
64228+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64229+ error = -EACCES;
64230+ goto exit2;
64231+ }
64232+
64233 error = security_path_unlink(&nd.path, dentry);
64234 if (error)
64235 goto exit2;
64236 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64237+ if (!error && (saved_ino || saved_dev))
64238+ gr_handle_delete(saved_ino, saved_dev);
64239 exit2:
64240 dput(dentry);
64241 }
64242@@ -3847,9 +4003,17 @@ retry:
64243 if (IS_ERR(dentry))
64244 goto out_putname;
64245
64246+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64247+ error = -EACCES;
64248+ goto out;
64249+ }
64250+
64251 error = security_path_symlink(&path, dentry, from->name);
64252 if (!error)
64253 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64254+ if (!error)
64255+ gr_handle_create(dentry, path.mnt);
64256+out:
64257 done_path_create(&path, dentry);
64258 if (retry_estale(error, lookup_flags)) {
64259 lookup_flags |= LOOKUP_REVAL;
64260@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64261 struct dentry *new_dentry;
64262 struct path old_path, new_path;
64263 struct inode *delegated_inode = NULL;
64264+ struct filename *to = NULL;
64265 int how = 0;
64266 int error;
64267
64268@@ -3976,7 +4141,7 @@ retry:
64269 if (error)
64270 return error;
64271
64272- new_dentry = user_path_create(newdfd, newname, &new_path,
64273+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64274 (how & LOOKUP_REVAL));
64275 error = PTR_ERR(new_dentry);
64276 if (IS_ERR(new_dentry))
64277@@ -3988,11 +4153,28 @@ retry:
64278 error = may_linkat(&old_path);
64279 if (unlikely(error))
64280 goto out_dput;
64281+
64282+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64283+ old_path.dentry->d_inode,
64284+ old_path.dentry->d_inode->i_mode, to)) {
64285+ error = -EACCES;
64286+ goto out_dput;
64287+ }
64288+
64289+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64290+ old_path.dentry, old_path.mnt, to)) {
64291+ error = -EACCES;
64292+ goto out_dput;
64293+ }
64294+
64295 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64296 if (error)
64297 goto out_dput;
64298 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64299+ if (!error)
64300+ gr_handle_create(new_dentry, new_path.mnt);
64301 out_dput:
64302+ putname(to);
64303 done_path_create(&new_path, new_dentry);
64304 if (delegated_inode) {
64305 error = break_deleg_wait(&delegated_inode);
64306@@ -4308,6 +4490,20 @@ retry_deleg:
64307 if (new_dentry == trap)
64308 goto exit5;
64309
64310+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64311+ /* use EXDEV error to cause 'mv' to switch to an alternative
64312+ * method for usability
64313+ */
64314+ error = -EXDEV;
64315+ goto exit5;
64316+ }
64317+
64318+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64319+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64320+ to, flags);
64321+ if (error)
64322+ goto exit5;
64323+
64324 error = security_path_rename(&oldnd.path, old_dentry,
64325 &newnd.path, new_dentry, flags);
64326 if (error)
64327@@ -4315,6 +4511,9 @@ retry_deleg:
64328 error = vfs_rename(old_dir->d_inode, old_dentry,
64329 new_dir->d_inode, new_dentry,
64330 &delegated_inode, flags);
64331+ if (!error)
64332+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64333+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64334 exit5:
64335 dput(new_dentry);
64336 exit4:
64337@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64338
64339 int readlink_copy(char __user *buffer, int buflen, const char *link)
64340 {
64341+ char tmpbuf[64];
64342+ const char *newlink;
64343 int len = PTR_ERR(link);
64344+
64345 if (IS_ERR(link))
64346 goto out;
64347
64348 len = strlen(link);
64349 if (len > (unsigned) buflen)
64350 len = buflen;
64351- if (copy_to_user(buffer, link, len))
64352+
64353+ if (len < sizeof(tmpbuf)) {
64354+ memcpy(tmpbuf, link, len);
64355+ newlink = tmpbuf;
64356+ } else
64357+ newlink = link;
64358+
64359+ if (copy_to_user(buffer, newlink, len))
64360 len = -EFAULT;
64361 out:
64362 return len;
64363diff --git a/fs/namespace.c b/fs/namespace.c
64364index cd1e968..e64ff16 100644
64365--- a/fs/namespace.c
64366+++ b/fs/namespace.c
64367@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
64368 if (!(sb->s_flags & MS_RDONLY))
64369 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64370 up_write(&sb->s_umount);
64371+
64372+ gr_log_remount(mnt->mnt_devname, retval);
64373+
64374 return retval;
64375 }
64376
64377@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
64378 }
64379 unlock_mount_hash();
64380 namespace_unlock();
64381+
64382+ gr_log_unmount(mnt->mnt_devname, retval);
64383+
64384 return retval;
64385 }
64386
64387@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
64388 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64389 */
64390
64391-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64392+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64393 {
64394 struct path path;
64395 struct mount *mnt;
64396@@ -1565,7 +1571,7 @@ out:
64397 /*
64398 * The 2.0 compatible umount. No flags.
64399 */
64400-SYSCALL_DEFINE1(oldumount, char __user *, name)
64401+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64402 {
64403 return sys_umount(name, 0);
64404 }
64405@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64406 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64407 MS_STRICTATIME);
64408
64409+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64410+ retval = -EPERM;
64411+ goto dput_out;
64412+ }
64413+
64414+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64415+ retval = -EPERM;
64416+ goto dput_out;
64417+ }
64418+
64419 if (flags & MS_REMOUNT)
64420 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64421 data_page);
64422@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64423 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64424 dev_name, data_page);
64425 dput_out:
64426+ gr_log_mount(dev_name, &path, retval);
64427+
64428 path_put(&path);
64429+
64430 return retval;
64431 }
64432
64433@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64434 * number incrementing at 10Ghz will take 12,427 years to wrap which
64435 * is effectively never, so we can ignore the possibility.
64436 */
64437-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64438+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64439
64440 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64441 {
64442@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64443 return ERR_PTR(ret);
64444 }
64445 new_ns->ns.ops = &mntns_operations;
64446- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64447+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
64448 atomic_set(&new_ns->count, 1);
64449 new_ns->root = NULL;
64450 INIT_LIST_HEAD(&new_ns->list);
64451@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64452 return new_ns;
64453 }
64454
64455-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64456+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64457 struct user_namespace *user_ns, struct fs_struct *new_fs)
64458 {
64459 struct mnt_namespace *new_ns;
64460@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64461 }
64462 EXPORT_SYMBOL(mount_subtree);
64463
64464-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64465- char __user *, type, unsigned long, flags, void __user *, data)
64466+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64467+ const char __user *, type, unsigned long, flags, void __user *, data)
64468 {
64469 int ret;
64470 char *kernel_type;
64471@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64472 if (error)
64473 goto out2;
64474
64475+ if (gr_handle_chroot_pivot()) {
64476+ error = -EPERM;
64477+ goto out2;
64478+ }
64479+
64480 get_fs_root(current->fs, &root);
64481 old_mp = lock_mount(&old);
64482 error = PTR_ERR(old_mp);
64483@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
64484 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64485 return -EPERM;
64486
64487- if (fs->users != 1)
64488+ if (atomic_read(&fs->users) != 1)
64489 return -EINVAL;
64490
64491 get_mnt_ns(mnt_ns);
64492diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64493index 02f8d09..a5c25d1 100644
64494--- a/fs/nfs/callback_xdr.c
64495+++ b/fs/nfs/callback_xdr.c
64496@@ -51,7 +51,7 @@ struct callback_op {
64497 callback_decode_arg_t decode_args;
64498 callback_encode_res_t encode_res;
64499 long res_maxsize;
64500-};
64501+} __do_const;
64502
64503 static struct callback_op callback_ops[];
64504
64505diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64506index 2211f6b..30d0950 100644
64507--- a/fs/nfs/inode.c
64508+++ b/fs/nfs/inode.c
64509@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64510 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64511 }
64512
64513-static atomic_long_t nfs_attr_generation_counter;
64514+static atomic_long_unchecked_t nfs_attr_generation_counter;
64515
64516 static unsigned long nfs_read_attr_generation_counter(void)
64517 {
64518- return atomic_long_read(&nfs_attr_generation_counter);
64519+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64520 }
64521
64522 unsigned long nfs_inc_attr_generation_counter(void)
64523 {
64524- return atomic_long_inc_return(&nfs_attr_generation_counter);
64525+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64526 }
64527
64528 void nfs_fattr_init(struct nfs_fattr *fattr)
64529diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64530index ac71d13..a2e590a 100644
64531--- a/fs/nfsd/nfs4proc.c
64532+++ b/fs/nfsd/nfs4proc.c
64533@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
64534 nfsd4op_rsize op_rsize_bop;
64535 stateid_getter op_get_currentstateid;
64536 stateid_setter op_set_currentstateid;
64537-};
64538+} __do_const;
64539
64540 static struct nfsd4_operation nfsd4_ops[];
64541
64542diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64543index 15f7b73..00e230b 100644
64544--- a/fs/nfsd/nfs4xdr.c
64545+++ b/fs/nfsd/nfs4xdr.c
64546@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64547
64548 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64549
64550-static nfsd4_dec nfsd4_dec_ops[] = {
64551+static const nfsd4_dec nfsd4_dec_ops[] = {
64552 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64553 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64554 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64555diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64556index 83a9694..6b7f928 100644
64557--- a/fs/nfsd/nfscache.c
64558+++ b/fs/nfsd/nfscache.c
64559@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64560 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64561 u32 hash;
64562 struct nfsd_drc_bucket *b;
64563- int len;
64564+ long len;
64565 size_t bufsize = 0;
64566
64567 if (!rp)
64568@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64569 hash = nfsd_cache_hash(rp->c_xid);
64570 b = &drc_hashtbl[hash];
64571
64572- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64573- len >>= 2;
64574+ if (statp) {
64575+ len = (char*)statp - (char*)resv->iov_base;
64576+ len = resv->iov_len - len;
64577+ len >>= 2;
64578+ }
64579
64580 /* Don't cache excessive amounts of data and XDR failures */
64581- if (!statp || len > (256 >> 2)) {
64582+ if (!statp || len > (256 >> 2) || len < 0) {
64583 nfsd_reply_cache_free(b, rp);
64584 return;
64585 }
64586@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64587 switch (cachetype) {
64588 case RC_REPLSTAT:
64589 if (len != 1)
64590- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64591+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64592 rp->c_replstat = *statp;
64593 break;
64594 case RC_REPLBUFF:
64595diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64596index 5685c67..73029ef 100644
64597--- a/fs/nfsd/vfs.c
64598+++ b/fs/nfsd/vfs.c
64599@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64600
64601 oldfs = get_fs();
64602 set_fs(KERNEL_DS);
64603- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64604+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64605 set_fs(oldfs);
64606 return nfsd_finish_read(file, count, host_err);
64607 }
64608@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64609
64610 /* Write the data. */
64611 oldfs = get_fs(); set_fs(KERNEL_DS);
64612- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64613+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64614 set_fs(oldfs);
64615 if (host_err < 0)
64616 goto out_nfserr;
64617@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64618 */
64619
64620 oldfs = get_fs(); set_fs(KERNEL_DS);
64621- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64622+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64623 set_fs(oldfs);
64624
64625 if (host_err < 0)
64626diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64627index 52ccd34..7a6b202 100644
64628--- a/fs/nls/nls_base.c
64629+++ b/fs/nls/nls_base.c
64630@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64631
64632 int __register_nls(struct nls_table *nls, struct module *owner)
64633 {
64634- struct nls_table ** tmp = &tables;
64635+ struct nls_table *tmp = tables;
64636
64637 if (nls->next)
64638 return -EBUSY;
64639
64640- nls->owner = owner;
64641+ pax_open_kernel();
64642+ *(void **)&nls->owner = owner;
64643+ pax_close_kernel();
64644 spin_lock(&nls_lock);
64645- while (*tmp) {
64646- if (nls == *tmp) {
64647+ while (tmp) {
64648+ if (nls == tmp) {
64649 spin_unlock(&nls_lock);
64650 return -EBUSY;
64651 }
64652- tmp = &(*tmp)->next;
64653+ tmp = tmp->next;
64654 }
64655- nls->next = tables;
64656+ pax_open_kernel();
64657+ *(struct nls_table **)&nls->next = tables;
64658+ pax_close_kernel();
64659 tables = nls;
64660 spin_unlock(&nls_lock);
64661 return 0;
64662@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64663
64664 int unregister_nls(struct nls_table * nls)
64665 {
64666- struct nls_table ** tmp = &tables;
64667+ struct nls_table * const * tmp = &tables;
64668
64669 spin_lock(&nls_lock);
64670 while (*tmp) {
64671 if (nls == *tmp) {
64672- *tmp = nls->next;
64673+ pax_open_kernel();
64674+ *(struct nls_table **)tmp = nls->next;
64675+ pax_close_kernel();
64676 spin_unlock(&nls_lock);
64677 return 0;
64678 }
64679@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
64680 return -EINVAL;
64681 }
64682
64683-static struct nls_table *find_nls(char *charset)
64684+static struct nls_table *find_nls(const char *charset)
64685 {
64686 struct nls_table *nls;
64687 spin_lock(&nls_lock);
64688@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
64689 return nls;
64690 }
64691
64692-struct nls_table *load_nls(char *charset)
64693+struct nls_table *load_nls(const char *charset)
64694 {
64695 return try_then_request_module(find_nls(charset), "nls_%s", charset);
64696 }
64697diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
64698index 162b3f1..6076a7c 100644
64699--- a/fs/nls/nls_euc-jp.c
64700+++ b/fs/nls/nls_euc-jp.c
64701@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
64702 p_nls = load_nls("cp932");
64703
64704 if (p_nls) {
64705- table.charset2upper = p_nls->charset2upper;
64706- table.charset2lower = p_nls->charset2lower;
64707+ pax_open_kernel();
64708+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64709+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64710+ pax_close_kernel();
64711 return register_nls(&table);
64712 }
64713
64714diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
64715index a80a741..7b96e1b 100644
64716--- a/fs/nls/nls_koi8-ru.c
64717+++ b/fs/nls/nls_koi8-ru.c
64718@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
64719 p_nls = load_nls("koi8-u");
64720
64721 if (p_nls) {
64722- table.charset2upper = p_nls->charset2upper;
64723- table.charset2lower = p_nls->charset2lower;
64724+ pax_open_kernel();
64725+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64726+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64727+ pax_close_kernel();
64728 return register_nls(&table);
64729 }
64730
64731diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
64732index bff8567..83281c6 100644
64733--- a/fs/notify/fanotify/fanotify_user.c
64734+++ b/fs/notify/fanotify/fanotify_user.c
64735@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
64736
64737 fd = fanotify_event_metadata.fd;
64738 ret = -EFAULT;
64739- if (copy_to_user(buf, &fanotify_event_metadata,
64740- fanotify_event_metadata.event_len))
64741+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
64742+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
64743 goto out_close_fd;
64744
64745 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
64746diff --git a/fs/notify/notification.c b/fs/notify/notification.c
64747index a95d8e0..a91a5fd 100644
64748--- a/fs/notify/notification.c
64749+++ b/fs/notify/notification.c
64750@@ -48,7 +48,7 @@
64751 #include <linux/fsnotify_backend.h>
64752 #include "fsnotify.h"
64753
64754-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64755+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64756
64757 /**
64758 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
64759@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64760 */
64761 u32 fsnotify_get_cookie(void)
64762 {
64763- return atomic_inc_return(&fsnotify_sync_cookie);
64764+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
64765 }
64766 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
64767
64768diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
64769index 9e38daf..5727cae 100644
64770--- a/fs/ntfs/dir.c
64771+++ b/fs/ntfs/dir.c
64772@@ -1310,7 +1310,7 @@ find_next_index_buffer:
64773 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
64774 ~(s64)(ndir->itype.index.block_size - 1)));
64775 /* Bounds checks. */
64776- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64777+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64778 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
64779 "inode 0x%lx or driver bug.", vdir->i_ino);
64780 goto err_out;
64781diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
64782index 643faa4..ef9027e 100644
64783--- a/fs/ntfs/file.c
64784+++ b/fs/ntfs/file.c
64785@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
64786 char *addr;
64787 size_t total = 0;
64788 unsigned len;
64789- int left;
64790+ unsigned left;
64791
64792 do {
64793 len = PAGE_CACHE_SIZE - ofs;
64794diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
64795index 9e1e112..241a52a 100644
64796--- a/fs/ntfs/super.c
64797+++ b/fs/ntfs/super.c
64798@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64799 if (!silent)
64800 ntfs_error(sb, "Primary boot sector is invalid.");
64801 } else if (!silent)
64802- ntfs_error(sb, read_err_str, "primary");
64803+ ntfs_error(sb, read_err_str, "%s", "primary");
64804 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
64805 if (bh_primary)
64806 brelse(bh_primary);
64807@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64808 goto hotfix_primary_boot_sector;
64809 brelse(bh_backup);
64810 } else if (!silent)
64811- ntfs_error(sb, read_err_str, "backup");
64812+ ntfs_error(sb, read_err_str, "%s", "backup");
64813 /* Try to read NT3.51- backup boot sector. */
64814 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
64815 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
64816@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64817 "sector.");
64818 brelse(bh_backup);
64819 } else if (!silent)
64820- ntfs_error(sb, read_err_str, "backup");
64821+ ntfs_error(sb, read_err_str, "%s", "backup");
64822 /* We failed. Cleanup and return. */
64823 if (bh_primary)
64824 brelse(bh_primary);
64825diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
64826index 0440134..d52c93a 100644
64827--- a/fs/ocfs2/localalloc.c
64828+++ b/fs/ocfs2/localalloc.c
64829@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
64830 goto bail;
64831 }
64832
64833- atomic_inc(&osb->alloc_stats.moves);
64834+ atomic_inc_unchecked(&osb->alloc_stats.moves);
64835
64836 bail:
64837 if (handle)
64838diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
64839index 7d6b7d0..5fb529a 100644
64840--- a/fs/ocfs2/ocfs2.h
64841+++ b/fs/ocfs2/ocfs2.h
64842@@ -242,11 +242,11 @@ enum ocfs2_vol_state
64843
64844 struct ocfs2_alloc_stats
64845 {
64846- atomic_t moves;
64847- atomic_t local_data;
64848- atomic_t bitmap_data;
64849- atomic_t bg_allocs;
64850- atomic_t bg_extends;
64851+ atomic_unchecked_t moves;
64852+ atomic_unchecked_t local_data;
64853+ atomic_unchecked_t bitmap_data;
64854+ atomic_unchecked_t bg_allocs;
64855+ atomic_unchecked_t bg_extends;
64856 };
64857
64858 enum ocfs2_local_alloc_state
64859diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
64860index 0cb889a..6a26b24 100644
64861--- a/fs/ocfs2/suballoc.c
64862+++ b/fs/ocfs2/suballoc.c
64863@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
64864 mlog_errno(status);
64865 goto bail;
64866 }
64867- atomic_inc(&osb->alloc_stats.bg_extends);
64868+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
64869
64870 /* You should never ask for this much metadata */
64871 BUG_ON(bits_wanted >
64872@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
64873 mlog_errno(status);
64874 goto bail;
64875 }
64876- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64877+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64878
64879 *suballoc_loc = res.sr_bg_blkno;
64880 *suballoc_bit_start = res.sr_bit_offset;
64881@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
64882 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
64883 res->sr_bits);
64884
64885- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64886+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64887
64888 BUG_ON(res->sr_bits != 1);
64889
64890@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
64891 mlog_errno(status);
64892 goto bail;
64893 }
64894- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64895+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64896
64897 BUG_ON(res.sr_bits != 1);
64898
64899@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64900 cluster_start,
64901 num_clusters);
64902 if (!status)
64903- atomic_inc(&osb->alloc_stats.local_data);
64904+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
64905 } else {
64906 if (min_clusters > (osb->bitmap_cpg - 1)) {
64907 /* The only paths asking for contiguousness
64908@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64909 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
64910 res.sr_bg_blkno,
64911 res.sr_bit_offset);
64912- atomic_inc(&osb->alloc_stats.bitmap_data);
64913+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
64914 *num_clusters = res.sr_bits;
64915 }
64916 }
64917diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
64918index 8372317..ec86e79 100644
64919--- a/fs/ocfs2/super.c
64920+++ b/fs/ocfs2/super.c
64921@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
64922 "%10s => GlobalAllocs: %d LocalAllocs: %d "
64923 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
64924 "Stats",
64925- atomic_read(&osb->alloc_stats.bitmap_data),
64926- atomic_read(&osb->alloc_stats.local_data),
64927- atomic_read(&osb->alloc_stats.bg_allocs),
64928- atomic_read(&osb->alloc_stats.moves),
64929- atomic_read(&osb->alloc_stats.bg_extends));
64930+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
64931+ atomic_read_unchecked(&osb->alloc_stats.local_data),
64932+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
64933+ atomic_read_unchecked(&osb->alloc_stats.moves),
64934+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
64935
64936 out += snprintf(buf + out, len - out,
64937 "%10s => State: %u Descriptor: %llu Size: %u bits "
64938@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
64939
64940 mutex_init(&osb->system_file_mutex);
64941
64942- atomic_set(&osb->alloc_stats.moves, 0);
64943- atomic_set(&osb->alloc_stats.local_data, 0);
64944- atomic_set(&osb->alloc_stats.bitmap_data, 0);
64945- atomic_set(&osb->alloc_stats.bg_allocs, 0);
64946- atomic_set(&osb->alloc_stats.bg_extends, 0);
64947+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
64948+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
64949+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
64950+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
64951+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
64952
64953 /* Copy the blockcheck stats from the superblock probe */
64954 osb->osb_ecc_stats = *stats;
64955diff --git a/fs/open.c b/fs/open.c
64956index 813be03..781941d 100644
64957--- a/fs/open.c
64958+++ b/fs/open.c
64959@@ -32,6 +32,8 @@
64960 #include <linux/dnotify.h>
64961 #include <linux/compat.h>
64962
64963+#define CREATE_TRACE_POINTS
64964+#include <trace/events/fs.h>
64965 #include "internal.h"
64966
64967 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
64968@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
64969 error = locks_verify_truncate(inode, NULL, length);
64970 if (!error)
64971 error = security_path_truncate(path);
64972+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
64973+ error = -EACCES;
64974 if (!error)
64975 error = do_truncate(path->dentry, length, 0, NULL);
64976
64977@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
64978 error = locks_verify_truncate(inode, f.file, length);
64979 if (!error)
64980 error = security_path_truncate(&f.file->f_path);
64981+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
64982+ error = -EACCES;
64983 if (!error)
64984 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
64985 sb_end_write(inode->i_sb);
64986@@ -392,6 +398,9 @@ retry:
64987 if (__mnt_is_readonly(path.mnt))
64988 res = -EROFS;
64989
64990+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
64991+ res = -EACCES;
64992+
64993 out_path_release:
64994 path_put(&path);
64995 if (retry_estale(res, lookup_flags)) {
64996@@ -423,6 +432,8 @@ retry:
64997 if (error)
64998 goto dput_and_out;
64999
65000+ gr_log_chdir(path.dentry, path.mnt);
65001+
65002 set_fs_pwd(current->fs, &path);
65003
65004 dput_and_out:
65005@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65006 goto out_putf;
65007
65008 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65009+
65010+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65011+ error = -EPERM;
65012+
65013+ if (!error)
65014+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65015+
65016 if (!error)
65017 set_fs_pwd(current->fs, &f.file->f_path);
65018 out_putf:
65019@@ -481,7 +499,13 @@ retry:
65020 if (error)
65021 goto dput_and_out;
65022
65023+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65024+ goto dput_and_out;
65025+
65026 set_fs_root(current->fs, &path);
65027+
65028+ gr_handle_chroot_chdir(&path);
65029+
65030 error = 0;
65031 dput_and_out:
65032 path_put(&path);
65033@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65034 return error;
65035 retry_deleg:
65036 mutex_lock(&inode->i_mutex);
65037+
65038+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65039+ error = -EACCES;
65040+ goto out_unlock;
65041+ }
65042+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65043+ error = -EACCES;
65044+ goto out_unlock;
65045+ }
65046+
65047 error = security_path_chmod(path, mode);
65048 if (error)
65049 goto out_unlock;
65050@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65051 uid = make_kuid(current_user_ns(), user);
65052 gid = make_kgid(current_user_ns(), group);
65053
65054+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65055+ return -EACCES;
65056+
65057 newattrs.ia_valid = ATTR_CTIME;
65058 if (user != (uid_t) -1) {
65059 if (!uid_valid(uid))
65060@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65061 } else {
65062 fsnotify_open(f);
65063 fd_install(fd, f);
65064+ trace_do_sys_open(tmp->name, flags, mode);
65065 }
65066 }
65067 putname(tmp);
65068diff --git a/fs/pipe.c b/fs/pipe.c
65069index 21981e5..3d5f55c 100644
65070--- a/fs/pipe.c
65071+++ b/fs/pipe.c
65072@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65073
65074 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65075 {
65076- if (pipe->files)
65077+ if (atomic_read(&pipe->files))
65078 mutex_lock_nested(&pipe->mutex, subclass);
65079 }
65080
65081@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65082
65083 void pipe_unlock(struct pipe_inode_info *pipe)
65084 {
65085- if (pipe->files)
65086+ if (atomic_read(&pipe->files))
65087 mutex_unlock(&pipe->mutex);
65088 }
65089 EXPORT_SYMBOL(pipe_unlock);
65090@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65091 }
65092 if (bufs) /* More to do? */
65093 continue;
65094- if (!pipe->writers)
65095+ if (!atomic_read(&pipe->writers))
65096 break;
65097- if (!pipe->waiting_writers) {
65098+ if (!atomic_read(&pipe->waiting_writers)) {
65099 /* syscall merging: Usually we must not sleep
65100 * if O_NONBLOCK is set, or if we got some data.
65101 * But if a writer sleeps in kernel space, then
65102@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65103
65104 __pipe_lock(pipe);
65105
65106- if (!pipe->readers) {
65107+ if (!atomic_read(&pipe->readers)) {
65108 send_sig(SIGPIPE, current, 0);
65109 ret = -EPIPE;
65110 goto out;
65111@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65112 for (;;) {
65113 int bufs;
65114
65115- if (!pipe->readers) {
65116+ if (!atomic_read(&pipe->readers)) {
65117 send_sig(SIGPIPE, current, 0);
65118 if (!ret)
65119 ret = -EPIPE;
65120@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65121 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65122 do_wakeup = 0;
65123 }
65124- pipe->waiting_writers++;
65125+ atomic_inc(&pipe->waiting_writers);
65126 pipe_wait(pipe);
65127- pipe->waiting_writers--;
65128+ atomic_dec(&pipe->waiting_writers);
65129 }
65130 out:
65131 __pipe_unlock(pipe);
65132@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65133 mask = 0;
65134 if (filp->f_mode & FMODE_READ) {
65135 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65136- if (!pipe->writers && filp->f_version != pipe->w_counter)
65137+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65138 mask |= POLLHUP;
65139 }
65140
65141@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65142 * Most Unices do not set POLLERR for FIFOs but on Linux they
65143 * behave exactly like pipes for poll().
65144 */
65145- if (!pipe->readers)
65146+ if (!atomic_read(&pipe->readers))
65147 mask |= POLLERR;
65148 }
65149
65150@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65151 int kill = 0;
65152
65153 spin_lock(&inode->i_lock);
65154- if (!--pipe->files) {
65155+ if (atomic_dec_and_test(&pipe->files)) {
65156 inode->i_pipe = NULL;
65157 kill = 1;
65158 }
65159@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65160
65161 __pipe_lock(pipe);
65162 if (file->f_mode & FMODE_READ)
65163- pipe->readers--;
65164+ atomic_dec(&pipe->readers);
65165 if (file->f_mode & FMODE_WRITE)
65166- pipe->writers--;
65167+ atomic_dec(&pipe->writers);
65168
65169- if (pipe->readers || pipe->writers) {
65170+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65171 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65172 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65173 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65174@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65175 kfree(pipe);
65176 }
65177
65178-static struct vfsmount *pipe_mnt __read_mostly;
65179+struct vfsmount *pipe_mnt __read_mostly;
65180
65181 /*
65182 * pipefs_dname() is called from d_path().
65183@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65184 goto fail_iput;
65185
65186 inode->i_pipe = pipe;
65187- pipe->files = 2;
65188- pipe->readers = pipe->writers = 1;
65189+ atomic_set(&pipe->files, 2);
65190+ atomic_set(&pipe->readers, 1);
65191+ atomic_set(&pipe->writers, 1);
65192 inode->i_fop = &pipefifo_fops;
65193
65194 /*
65195@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65196 spin_lock(&inode->i_lock);
65197 if (inode->i_pipe) {
65198 pipe = inode->i_pipe;
65199- pipe->files++;
65200+ atomic_inc(&pipe->files);
65201 spin_unlock(&inode->i_lock);
65202 } else {
65203 spin_unlock(&inode->i_lock);
65204 pipe = alloc_pipe_info();
65205 if (!pipe)
65206 return -ENOMEM;
65207- pipe->files = 1;
65208+ atomic_set(&pipe->files, 1);
65209 spin_lock(&inode->i_lock);
65210 if (unlikely(inode->i_pipe)) {
65211- inode->i_pipe->files++;
65212+ atomic_inc(&inode->i_pipe->files);
65213 spin_unlock(&inode->i_lock);
65214 free_pipe_info(pipe);
65215 pipe = inode->i_pipe;
65216@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65217 * opened, even when there is no process writing the FIFO.
65218 */
65219 pipe->r_counter++;
65220- if (pipe->readers++ == 0)
65221+ if (atomic_inc_return(&pipe->readers) == 1)
65222 wake_up_partner(pipe);
65223
65224- if (!is_pipe && !pipe->writers) {
65225+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65226 if ((filp->f_flags & O_NONBLOCK)) {
65227 /* suppress POLLHUP until we have
65228 * seen a writer */
65229@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65230 * errno=ENXIO when there is no process reading the FIFO.
65231 */
65232 ret = -ENXIO;
65233- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65234+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65235 goto err;
65236
65237 pipe->w_counter++;
65238- if (!pipe->writers++)
65239+ if (atomic_inc_return(&pipe->writers) == 1)
65240 wake_up_partner(pipe);
65241
65242- if (!is_pipe && !pipe->readers) {
65243+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65244 if (wait_for_partner(pipe, &pipe->r_counter))
65245 goto err_wr;
65246 }
65247@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65248 * the process can at least talk to itself.
65249 */
65250
65251- pipe->readers++;
65252- pipe->writers++;
65253+ atomic_inc(&pipe->readers);
65254+ atomic_inc(&pipe->writers);
65255 pipe->r_counter++;
65256 pipe->w_counter++;
65257- if (pipe->readers == 1 || pipe->writers == 1)
65258+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65259 wake_up_partner(pipe);
65260 break;
65261
65262@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65263 return 0;
65264
65265 err_rd:
65266- if (!--pipe->readers)
65267+ if (atomic_dec_and_test(&pipe->readers))
65268 wake_up_interruptible(&pipe->wait);
65269 ret = -ERESTARTSYS;
65270 goto err;
65271
65272 err_wr:
65273- if (!--pipe->writers)
65274+ if (atomic_dec_and_test(&pipe->writers))
65275 wake_up_interruptible(&pipe->wait);
65276 ret = -ERESTARTSYS;
65277 goto err;
65278diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65279index 0855f77..6787d50 100644
65280--- a/fs/posix_acl.c
65281+++ b/fs/posix_acl.c
65282@@ -20,6 +20,7 @@
65283 #include <linux/xattr.h>
65284 #include <linux/export.h>
65285 #include <linux/user_namespace.h>
65286+#include <linux/grsecurity.h>
65287
65288 struct posix_acl **acl_by_type(struct inode *inode, int type)
65289 {
65290@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65291 }
65292 }
65293 if (mode_p)
65294- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65295+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65296 return not_equiv;
65297 }
65298 EXPORT_SYMBOL(posix_acl_equiv_mode);
65299@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65300 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65301 }
65302
65303- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65304+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65305 return not_equiv;
65306 }
65307
65308@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65309 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65310 int err = -ENOMEM;
65311 if (clone) {
65312+ *mode_p &= ~gr_acl_umask();
65313+
65314 err = posix_acl_create_masq(clone, mode_p);
65315 if (err < 0) {
65316 posix_acl_release(clone);
65317@@ -659,11 +662,12 @@ struct posix_acl *
65318 posix_acl_from_xattr(struct user_namespace *user_ns,
65319 const void *value, size_t size)
65320 {
65321- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65322- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65323+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65324+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65325 int count;
65326 struct posix_acl *acl;
65327 struct posix_acl_entry *acl_e;
65328+ umode_t umask = gr_acl_umask();
65329
65330 if (!value)
65331 return NULL;
65332@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65333
65334 switch(acl_e->e_tag) {
65335 case ACL_USER_OBJ:
65336+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65337+ break;
65338 case ACL_GROUP_OBJ:
65339 case ACL_MASK:
65340+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65341+ break;
65342 case ACL_OTHER:
65343+ acl_e->e_perm &= ~(umask & S_IRWXO);
65344 break;
65345
65346 case ACL_USER:
65347+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65348 acl_e->e_uid =
65349 make_kuid(user_ns,
65350 le32_to_cpu(entry->e_id));
65351@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65352 goto fail;
65353 break;
65354 case ACL_GROUP:
65355+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65356 acl_e->e_gid =
65357 make_kgid(user_ns,
65358 le32_to_cpu(entry->e_id));
65359diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65360index 2183fcf..3c32a98 100644
65361--- a/fs/proc/Kconfig
65362+++ b/fs/proc/Kconfig
65363@@ -30,7 +30,7 @@ config PROC_FS
65364
65365 config PROC_KCORE
65366 bool "/proc/kcore support" if !ARM
65367- depends on PROC_FS && MMU
65368+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65369 help
65370 Provides a virtual ELF core file of the live kernel. This can
65371 be read with gdb and other ELF tools. No modifications can be
65372@@ -38,8 +38,8 @@ config PROC_KCORE
65373
65374 config PROC_VMCORE
65375 bool "/proc/vmcore support"
65376- depends on PROC_FS && CRASH_DUMP
65377- default y
65378+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65379+ default n
65380 help
65381 Exports the dump image of crashed kernel in ELF format.
65382
65383@@ -63,8 +63,8 @@ config PROC_SYSCTL
65384 limited in memory.
65385
65386 config PROC_PAGE_MONITOR
65387- default y
65388- depends on PROC_FS && MMU
65389+ default n
65390+ depends on PROC_FS && MMU && !GRKERNSEC
65391 bool "Enable /proc page monitoring" if EXPERT
65392 help
65393 Various /proc files exist to monitor process memory utilization:
65394diff --git a/fs/proc/array.c b/fs/proc/array.c
65395index bd117d0..e6872d7 100644
65396--- a/fs/proc/array.c
65397+++ b/fs/proc/array.c
65398@@ -60,6 +60,7 @@
65399 #include <linux/tty.h>
65400 #include <linux/string.h>
65401 #include <linux/mman.h>
65402+#include <linux/grsecurity.h>
65403 #include <linux/proc_fs.h>
65404 #include <linux/ioport.h>
65405 #include <linux/uaccess.h>
65406@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65407 seq_putc(m, '\n');
65408 }
65409
65410+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65411+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65412+{
65413+ if (p->mm)
65414+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65415+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65416+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65417+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65418+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65419+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65420+ else
65421+ seq_printf(m, "PaX:\t-----\n");
65422+}
65423+#endif
65424+
65425 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65426 struct pid *pid, struct task_struct *task)
65427 {
65428@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65429 task_cpus_allowed(m, task);
65430 cpuset_task_status_allowed(m, task);
65431 task_context_switch_counts(m, task);
65432+
65433+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65434+ task_pax(m, task);
65435+#endif
65436+
65437+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65438+ task_grsec_rbac(m, task);
65439+#endif
65440+
65441 return 0;
65442 }
65443
65444+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65445+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65446+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65447+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65448+#endif
65449+
65450 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65451 struct pid *pid, struct task_struct *task, int whole)
65452 {
65453@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65454 char tcomm[sizeof(task->comm)];
65455 unsigned long flags;
65456
65457+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65458+ if (current->exec_id != m->exec_id) {
65459+ gr_log_badprocpid("stat");
65460+ return 0;
65461+ }
65462+#endif
65463+
65464 state = *get_task_state(task);
65465 vsize = eip = esp = 0;
65466 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65467@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65468 gtime = task_gtime(task);
65469 }
65470
65471+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65472+ if (PAX_RAND_FLAGS(mm)) {
65473+ eip = 0;
65474+ esp = 0;
65475+ wchan = 0;
65476+ }
65477+#endif
65478+#ifdef CONFIG_GRKERNSEC_HIDESYM
65479+ wchan = 0;
65480+ eip =0;
65481+ esp =0;
65482+#endif
65483+
65484 /* scale priority and nice values from timeslices to -20..20 */
65485 /* to make it look like a "normal" Unix priority/nice value */
65486 priority = task_prio(task);
65487@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65488 seq_put_decimal_ull(m, ' ', vsize);
65489 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65490 seq_put_decimal_ull(m, ' ', rsslim);
65491+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65492+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65493+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65494+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65495+#else
65496 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65497 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65498 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65499+#endif
65500 seq_put_decimal_ull(m, ' ', esp);
65501 seq_put_decimal_ull(m, ' ', eip);
65502 /* The signal information here is obsolete.
65503@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65504 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65505 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65506
65507- if (mm && permitted) {
65508+ if (mm && permitted
65509+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65510+ && !PAX_RAND_FLAGS(mm)
65511+#endif
65512+ ) {
65513 seq_put_decimal_ull(m, ' ', mm->start_data);
65514 seq_put_decimal_ull(m, ' ', mm->end_data);
65515 seq_put_decimal_ull(m, ' ', mm->start_brk);
65516@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65517 struct pid *pid, struct task_struct *task)
65518 {
65519 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65520- struct mm_struct *mm = get_task_mm(task);
65521+ struct mm_struct *mm;
65522
65523+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65524+ if (current->exec_id != m->exec_id) {
65525+ gr_log_badprocpid("statm");
65526+ return 0;
65527+ }
65528+#endif
65529+ mm = get_task_mm(task);
65530 if (mm) {
65531 size = task_statm(mm, &shared, &text, &data, &resident);
65532 mmput(mm);
65533@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65534 return 0;
65535 }
65536
65537+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65538+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65539+{
65540+ unsigned long flags;
65541+ u32 curr_ip = 0;
65542+
65543+ if (lock_task_sighand(task, &flags)) {
65544+ curr_ip = task->signal->curr_ip;
65545+ unlock_task_sighand(task, &flags);
65546+ }
65547+ return seq_printf(m, "%pI4\n", &curr_ip);
65548+}
65549+#endif
65550+
65551 #ifdef CONFIG_CHECKPOINT_RESTORE
65552 static struct pid *
65553 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65554diff --git a/fs/proc/base.c b/fs/proc/base.c
65555index 3f3d7ae..68de109 100644
65556--- a/fs/proc/base.c
65557+++ b/fs/proc/base.c
65558@@ -113,6 +113,14 @@ struct pid_entry {
65559 union proc_op op;
65560 };
65561
65562+struct getdents_callback {
65563+ struct linux_dirent __user * current_dir;
65564+ struct linux_dirent __user * previous;
65565+ struct file * file;
65566+ int count;
65567+ int error;
65568+};
65569+
65570 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65571 .name = (NAME), \
65572 .len = sizeof(NAME) - 1, \
65573@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65574 return 0;
65575 }
65576
65577+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65578+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65579+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65580+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65581+#endif
65582+
65583 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65584 struct pid *pid, struct task_struct *task)
65585 {
65586 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65587 if (mm && !IS_ERR(mm)) {
65588 unsigned int nwords = 0;
65589+
65590+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65591+ /* allow if we're currently ptracing this task */
65592+ if (PAX_RAND_FLAGS(mm) &&
65593+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65594+ mmput(mm);
65595+ return 0;
65596+ }
65597+#endif
65598+
65599 do {
65600 nwords += 2;
65601 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65602@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65603 }
65604
65605
65606-#ifdef CONFIG_KALLSYMS
65607+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65608 /*
65609 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65610 * Returns the resolved symbol. If that fails, simply return the address.
65611@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65612 mutex_unlock(&task->signal->cred_guard_mutex);
65613 }
65614
65615-#ifdef CONFIG_STACKTRACE
65616+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65617
65618 #define MAX_STACK_TRACE_DEPTH 64
65619
65620@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65621 return 0;
65622 }
65623
65624-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65625+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65626 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65627 struct pid *pid, struct task_struct *task)
65628 {
65629@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65630 /************************************************************************/
65631
65632 /* permission checks */
65633-static int proc_fd_access_allowed(struct inode *inode)
65634+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65635 {
65636 struct task_struct *task;
65637 int allowed = 0;
65638@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65639 */
65640 task = get_proc_task(inode);
65641 if (task) {
65642- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65643+ if (log)
65644+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65645+ else
65646+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65647 put_task_struct(task);
65648 }
65649 return allowed;
65650@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
65651 struct task_struct *task,
65652 int hide_pid_min)
65653 {
65654+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65655+ return false;
65656+
65657+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65658+ rcu_read_lock();
65659+ {
65660+ const struct cred *tmpcred = current_cred();
65661+ const struct cred *cred = __task_cred(task);
65662+
65663+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
65664+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65665+ || in_group_p(grsec_proc_gid)
65666+#endif
65667+ ) {
65668+ rcu_read_unlock();
65669+ return true;
65670+ }
65671+ }
65672+ rcu_read_unlock();
65673+
65674+ if (!pid->hide_pid)
65675+ return false;
65676+#endif
65677+
65678 if (pid->hide_pid < hide_pid_min)
65679 return true;
65680 if (in_group_p(pid->pid_gid))
65681 return true;
65682+
65683 return ptrace_may_access(task, PTRACE_MODE_READ);
65684 }
65685
65686@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
65687 put_task_struct(task);
65688
65689 if (!has_perms) {
65690+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65691+ {
65692+#else
65693 if (pid->hide_pid == 2) {
65694+#endif
65695 /*
65696 * Let's make getdents(), stat(), and open()
65697 * consistent with each other. If a process
65698@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
65699
65700 if (task) {
65701 mm = mm_access(task, mode);
65702+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
65703+ mmput(mm);
65704+ mm = ERR_PTR(-EPERM);
65705+ }
65706 put_task_struct(task);
65707
65708 if (!IS_ERR_OR_NULL(mm)) {
65709@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
65710 return PTR_ERR(mm);
65711
65712 file->private_data = mm;
65713+
65714+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65715+ file->f_version = current->exec_id;
65716+#endif
65717+
65718 return 0;
65719 }
65720
65721@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65722 ssize_t copied;
65723 char *page;
65724
65725+#ifdef CONFIG_GRKERNSEC
65726+ if (write)
65727+ return -EPERM;
65728+#endif
65729+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65730+ if (file->f_version != current->exec_id) {
65731+ gr_log_badprocpid("mem");
65732+ return 0;
65733+ }
65734+#endif
65735+
65736 if (!mm)
65737 return 0;
65738
65739@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65740 goto free;
65741
65742 while (count > 0) {
65743- int this_len = min_t(int, count, PAGE_SIZE);
65744+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
65745
65746 if (write && copy_from_user(page, buf, this_len)) {
65747 copied = -EFAULT;
65748@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65749 if (!mm)
65750 return 0;
65751
65752+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65753+ if (file->f_version != current->exec_id) {
65754+ gr_log_badprocpid("environ");
65755+ return 0;
65756+ }
65757+#endif
65758+
65759 page = (char *)__get_free_page(GFP_TEMPORARY);
65760 if (!page)
65761 return -ENOMEM;
65762@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65763 goto free;
65764 while (count > 0) {
65765 size_t this_len, max_len;
65766- int retval;
65767+ ssize_t retval;
65768
65769 if (src >= (mm->env_end - mm->env_start))
65770 break;
65771@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
65772 int error = -EACCES;
65773
65774 /* Are we allowed to snoop on the tasks file descriptors? */
65775- if (!proc_fd_access_allowed(inode))
65776+ if (!proc_fd_access_allowed(inode, 0))
65777 goto out;
65778
65779 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65780@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
65781 struct path path;
65782
65783 /* Are we allowed to snoop on the tasks file descriptors? */
65784- if (!proc_fd_access_allowed(inode))
65785- goto out;
65786+ /* logging this is needed for learning on chromium to work properly,
65787+ but we don't want to flood the logs from 'ps' which does a readlink
65788+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
65789+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
65790+ */
65791+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
65792+ if (!proc_fd_access_allowed(inode,0))
65793+ goto out;
65794+ } else {
65795+ if (!proc_fd_access_allowed(inode,1))
65796+ goto out;
65797+ }
65798
65799 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65800 if (error)
65801@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
65802 rcu_read_lock();
65803 cred = __task_cred(task);
65804 inode->i_uid = cred->euid;
65805+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65806+ inode->i_gid = grsec_proc_gid;
65807+#else
65808 inode->i_gid = cred->egid;
65809+#endif
65810 rcu_read_unlock();
65811 }
65812 security_task_to_inode(task, inode);
65813@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
65814 return -ENOENT;
65815 }
65816 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65817+#ifdef CONFIG_GRKERNSEC_PROC_USER
65818+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65819+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65820+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65821+#endif
65822 task_dumpable(task)) {
65823 cred = __task_cred(task);
65824 stat->uid = cred->euid;
65825+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65826+ stat->gid = grsec_proc_gid;
65827+#else
65828 stat->gid = cred->egid;
65829+#endif
65830 }
65831 }
65832 rcu_read_unlock();
65833@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
65834
65835 if (task) {
65836 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65837+#ifdef CONFIG_GRKERNSEC_PROC_USER
65838+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65839+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65840+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65841+#endif
65842 task_dumpable(task)) {
65843 rcu_read_lock();
65844 cred = __task_cred(task);
65845 inode->i_uid = cred->euid;
65846+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65847+ inode->i_gid = grsec_proc_gid;
65848+#else
65849 inode->i_gid = cred->egid;
65850+#endif
65851 rcu_read_unlock();
65852 } else {
65853 inode->i_uid = GLOBAL_ROOT_UID;
65854@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
65855 if (!task)
65856 goto out_no_task;
65857
65858+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65859+ goto out;
65860+
65861 /*
65862 * Yes, it does not scale. And it should not. Don't add
65863 * new entries into /proc/<tgid>/ without very good reasons.
65864@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
65865 if (!task)
65866 return -ENOENT;
65867
65868+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65869+ goto out;
65870+
65871 if (!dir_emit_dots(file, ctx))
65872 goto out;
65873
65874@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
65875 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
65876 #endif
65877 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65878-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65879+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65880 ONE("syscall", S_IRUSR, proc_pid_syscall),
65881 #endif
65882 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65883@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
65884 #ifdef CONFIG_SECURITY
65885 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65886 #endif
65887-#ifdef CONFIG_KALLSYMS
65888+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65889 ONE("wchan", S_IRUGO, proc_pid_wchan),
65890 #endif
65891-#ifdef CONFIG_STACKTRACE
65892+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65893 ONE("stack", S_IRUSR, proc_pid_stack),
65894 #endif
65895 #ifdef CONFIG_SCHEDSTATS
65896@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
65897 #ifdef CONFIG_HARDWALL
65898 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
65899 #endif
65900+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65901+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
65902+#endif
65903 #ifdef CONFIG_USER_NS
65904 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
65905 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
65906@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
65907 if (!inode)
65908 goto out;
65909
65910+#ifdef CONFIG_GRKERNSEC_PROC_USER
65911+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
65912+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65913+ inode->i_gid = grsec_proc_gid;
65914+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
65915+#else
65916 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
65917+#endif
65918 inode->i_op = &proc_tgid_base_inode_operations;
65919 inode->i_fop = &proc_tgid_base_operations;
65920 inode->i_flags|=S_IMMUTABLE;
65921@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
65922 if (!task)
65923 goto out;
65924
65925+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65926+ goto out_put_task;
65927+
65928 result = proc_pid_instantiate(dir, dentry, task, NULL);
65929+out_put_task:
65930 put_task_struct(task);
65931 out:
65932 return ERR_PTR(result);
65933@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
65934 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
65935 #endif
65936 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65937-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65938+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65939 ONE("syscall", S_IRUSR, proc_pid_syscall),
65940 #endif
65941 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65942@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
65943 #ifdef CONFIG_SECURITY
65944 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65945 #endif
65946-#ifdef CONFIG_KALLSYMS
65947+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65948 ONE("wchan", S_IRUGO, proc_pid_wchan),
65949 #endif
65950-#ifdef CONFIG_STACKTRACE
65951+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65952 ONE("stack", S_IRUSR, proc_pid_stack),
65953 #endif
65954 #ifdef CONFIG_SCHEDSTATS
65955diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
65956index cbd82df..c0407d2 100644
65957--- a/fs/proc/cmdline.c
65958+++ b/fs/proc/cmdline.c
65959@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
65960
65961 static int __init proc_cmdline_init(void)
65962 {
65963+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65964+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
65965+#else
65966 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
65967+#endif
65968 return 0;
65969 }
65970 fs_initcall(proc_cmdline_init);
65971diff --git a/fs/proc/devices.c b/fs/proc/devices.c
65972index 50493ed..248166b 100644
65973--- a/fs/proc/devices.c
65974+++ b/fs/proc/devices.c
65975@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
65976
65977 static int __init proc_devices_init(void)
65978 {
65979+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65980+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
65981+#else
65982 proc_create("devices", 0, NULL, &proc_devinfo_operations);
65983+#endif
65984 return 0;
65985 }
65986 fs_initcall(proc_devices_init);
65987diff --git a/fs/proc/fd.c b/fs/proc/fd.c
65988index 8e5ad83..1f07a8c 100644
65989--- a/fs/proc/fd.c
65990+++ b/fs/proc/fd.c
65991@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
65992 if (!task)
65993 return -ENOENT;
65994
65995- files = get_files_struct(task);
65996+ if (!gr_acl_handle_procpidmem(task))
65997+ files = get_files_struct(task);
65998 put_task_struct(task);
65999
66000 if (files) {
66001@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66002 */
66003 int proc_fd_permission(struct inode *inode, int mask)
66004 {
66005+ struct task_struct *task;
66006 int rv = generic_permission(inode, mask);
66007- if (rv == 0)
66008- return 0;
66009+
66010 if (task_tgid(current) == proc_pid(inode))
66011 rv = 0;
66012+
66013+ task = get_proc_task(inode);
66014+ if (task == NULL)
66015+ return rv;
66016+
66017+ if (gr_acl_handle_procpidmem(task))
66018+ rv = -EACCES;
66019+
66020+ put_task_struct(task);
66021+
66022 return rv;
66023 }
66024
66025diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66026index 7fea132..2923577 100644
66027--- a/fs/proc/generic.c
66028+++ b/fs/proc/generic.c
66029@@ -23,6 +23,7 @@
66030 #include <linux/bitops.h>
66031 #include <linux/spinlock.h>
66032 #include <linux/completion.h>
66033+#include <linux/grsecurity.h>
66034 #include <asm/uaccess.h>
66035
66036 #include "internal.h"
66037@@ -265,6 +266,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66038 return proc_lookup_de(PDE(dir), dir, dentry);
66039 }
66040
66041+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66042+ unsigned int flags)
66043+{
66044+ if (gr_proc_is_restricted())
66045+ return ERR_PTR(-EACCES);
66046+
66047+ return proc_lookup_de(PDE(dir), dir, dentry);
66048+}
66049+
66050 /*
66051 * This returns non-zero if at EOF, so that the /proc
66052 * root directory can use this and check if it should
66053@@ -322,6 +332,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66054 return proc_readdir_de(PDE(inode), file, ctx);
66055 }
66056
66057+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66058+{
66059+ struct inode *inode = file_inode(file);
66060+
66061+ if (gr_proc_is_restricted())
66062+ return -EACCES;
66063+
66064+ return proc_readdir_de(PDE(inode), file, ctx);
66065+}
66066+
66067 /*
66068 * These are the generic /proc directory operations. They
66069 * use the in-memory "struct proc_dir_entry" tree to parse
66070@@ -333,6 +353,12 @@ static const struct file_operations proc_dir_operations = {
66071 .iterate = proc_readdir,
66072 };
66073
66074+static const struct file_operations proc_dir_restricted_operations = {
66075+ .llseek = generic_file_llseek,
66076+ .read = generic_read_dir,
66077+ .iterate = proc_readdir_restrict,
66078+};
66079+
66080 /*
66081 * proc directories can do almost nothing..
66082 */
66083@@ -342,6 +368,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66084 .setattr = proc_notify_change,
66085 };
66086
66087+static const struct inode_operations proc_dir_restricted_inode_operations = {
66088+ .lookup = proc_lookup_restrict,
66089+ .getattr = proc_getattr,
66090+ .setattr = proc_notify_change,
66091+};
66092+
66093 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66094 {
66095 int ret;
66096@@ -351,8 +383,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66097 return ret;
66098
66099 if (S_ISDIR(dp->mode)) {
66100- dp->proc_fops = &proc_dir_operations;
66101- dp->proc_iops = &proc_dir_inode_operations;
66102+ if (dp->restricted) {
66103+ dp->proc_fops = &proc_dir_restricted_operations;
66104+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66105+ } else {
66106+ dp->proc_fops = &proc_dir_operations;
66107+ dp->proc_iops = &proc_dir_inode_operations;
66108+ }
66109 dir->nlink++;
66110 } else if (S_ISLNK(dp->mode)) {
66111 dp->proc_iops = &proc_link_inode_operations;
66112@@ -465,6 +502,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66113 }
66114 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66115
66116+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66117+ struct proc_dir_entry *parent, void *data)
66118+{
66119+ struct proc_dir_entry *ent;
66120+
66121+ if (mode == 0)
66122+ mode = S_IRUGO | S_IXUGO;
66123+
66124+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66125+ if (ent) {
66126+ ent->data = data;
66127+ ent->restricted = 1;
66128+ if (proc_register(parent, ent) < 0) {
66129+ kfree(ent);
66130+ ent = NULL;
66131+ }
66132+ }
66133+ return ent;
66134+}
66135+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66136+
66137 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66138 struct proc_dir_entry *parent)
66139 {
66140@@ -479,6 +537,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66141 }
66142 EXPORT_SYMBOL(proc_mkdir);
66143
66144+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66145+ struct proc_dir_entry *parent)
66146+{
66147+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66148+}
66149+EXPORT_SYMBOL(proc_mkdir_restrict);
66150+
66151 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66152 struct proc_dir_entry *parent,
66153 const struct file_operations *proc_fops,
66154diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66155index 8420a2f..7b98f00 100644
66156--- a/fs/proc/inode.c
66157+++ b/fs/proc/inode.c
66158@@ -23,11 +23,17 @@
66159 #include <linux/slab.h>
66160 #include <linux/mount.h>
66161 #include <linux/magic.h>
66162+#include <linux/grsecurity.h>
66163
66164 #include <asm/uaccess.h>
66165
66166 #include "internal.h"
66167
66168+#ifdef CONFIG_PROC_SYSCTL
66169+extern const struct inode_operations proc_sys_inode_operations;
66170+extern const struct inode_operations proc_sys_dir_operations;
66171+#endif
66172+
66173 static void proc_evict_inode(struct inode *inode)
66174 {
66175 struct proc_dir_entry *de;
66176@@ -48,6 +54,13 @@ static void proc_evict_inode(struct inode *inode)
66177 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66178 sysctl_head_put(head);
66179 }
66180+
66181+#ifdef CONFIG_PROC_SYSCTL
66182+ if (inode->i_op == &proc_sys_inode_operations ||
66183+ inode->i_op == &proc_sys_dir_operations)
66184+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66185+#endif
66186+
66187 }
66188
66189 static struct kmem_cache * proc_inode_cachep;
66190@@ -405,7 +418,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66191 if (de->mode) {
66192 inode->i_mode = de->mode;
66193 inode->i_uid = de->uid;
66194+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66195+ inode->i_gid = grsec_proc_gid;
66196+#else
66197 inode->i_gid = de->gid;
66198+#endif
66199 }
66200 if (de->size)
66201 inode->i_size = de->size;
66202diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66203index 6fcdba5..d08b8f1 100644
66204--- a/fs/proc/internal.h
66205+++ b/fs/proc/internal.h
66206@@ -47,9 +47,10 @@ struct proc_dir_entry {
66207 struct completion *pde_unload_completion;
66208 struct list_head pde_openers; /* who did ->open, but not ->release */
66209 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66210+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66211 u8 namelen;
66212 char name[];
66213-};
66214+} __randomize_layout;
66215
66216 union proc_op {
66217 int (*proc_get_link)(struct dentry *, struct path *);
66218@@ -67,7 +68,7 @@ struct proc_inode {
66219 struct ctl_table *sysctl_entry;
66220 const struct proc_ns_operations *ns_ops;
66221 struct inode vfs_inode;
66222-};
66223+} __randomize_layout;
66224
66225 /*
66226 * General functions
66227@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66228 struct pid *, struct task_struct *);
66229 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66230 struct pid *, struct task_struct *);
66231+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66232+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66233+ struct pid *, struct task_struct *);
66234+#endif
66235
66236 /*
66237 * base.c
66238@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66239 * generic.c
66240 */
66241 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66242+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66243 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66244 struct dentry *);
66245 extern int proc_readdir(struct file *, struct dir_context *);
66246+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66247 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66248
66249 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66250diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66251index a352d57..cb94a5c 100644
66252--- a/fs/proc/interrupts.c
66253+++ b/fs/proc/interrupts.c
66254@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66255
66256 static int __init proc_interrupts_init(void)
66257 {
66258+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66259+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66260+#else
66261 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66262+#endif
66263 return 0;
66264 }
66265 fs_initcall(proc_interrupts_init);
66266diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66267index 91a4e64..cb007c0 100644
66268--- a/fs/proc/kcore.c
66269+++ b/fs/proc/kcore.c
66270@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66271 * the addresses in the elf_phdr on our list.
66272 */
66273 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66274- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66275+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66276+ if (tsz > buflen)
66277 tsz = buflen;
66278-
66279+
66280 while (buflen) {
66281 struct kcore_list *m;
66282
66283@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66284 kfree(elf_buf);
66285 } else {
66286 if (kern_addr_valid(start)) {
66287- unsigned long n;
66288+ char *elf_buf;
66289+ mm_segment_t oldfs;
66290
66291- n = copy_to_user(buffer, (char *)start, tsz);
66292- /*
66293- * We cannot distinguish between fault on source
66294- * and fault on destination. When this happens
66295- * we clear too and hope it will trigger the
66296- * EFAULT again.
66297- */
66298- if (n) {
66299- if (clear_user(buffer + tsz - n,
66300- n))
66301+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66302+ if (!elf_buf)
66303+ return -ENOMEM;
66304+ oldfs = get_fs();
66305+ set_fs(KERNEL_DS);
66306+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66307+ set_fs(oldfs);
66308+ if (copy_to_user(buffer, elf_buf, tsz)) {
66309+ kfree(elf_buf);
66310 return -EFAULT;
66311+ }
66312 }
66313+ set_fs(oldfs);
66314+ kfree(elf_buf);
66315 } else {
66316 if (clear_user(buffer, tsz))
66317 return -EFAULT;
66318@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66319
66320 static int open_kcore(struct inode *inode, struct file *filp)
66321 {
66322+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66323+ return -EPERM;
66324+#endif
66325 if (!capable(CAP_SYS_RAWIO))
66326 return -EPERM;
66327 if (kcore_need_update)
66328diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66329index d3ebf2e..6ad42d1 100644
66330--- a/fs/proc/meminfo.c
66331+++ b/fs/proc/meminfo.c
66332@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66333 vmi.used >> 10,
66334 vmi.largest_chunk >> 10
66335 #ifdef CONFIG_MEMORY_FAILURE
66336- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66337+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66338 #endif
66339 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66340 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66341diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66342index d4a3574..b421ce9 100644
66343--- a/fs/proc/nommu.c
66344+++ b/fs/proc/nommu.c
66345@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66346
66347 if (file) {
66348 seq_pad(m, ' ');
66349- seq_path(m, &file->f_path, "");
66350+ seq_path(m, &file->f_path, "\n\\");
66351 }
66352
66353 seq_putc(m, '\n');
66354diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66355index 1bde894..22ac7eb 100644
66356--- a/fs/proc/proc_net.c
66357+++ b/fs/proc/proc_net.c
66358@@ -23,9 +23,27 @@
66359 #include <linux/nsproxy.h>
66360 #include <net/net_namespace.h>
66361 #include <linux/seq_file.h>
66362+#include <linux/grsecurity.h>
66363
66364 #include "internal.h"
66365
66366+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66367+static struct seq_operations *ipv6_seq_ops_addr;
66368+
66369+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66370+{
66371+ ipv6_seq_ops_addr = addr;
66372+}
66373+
66374+void unregister_ipv6_seq_ops_addr(void)
66375+{
66376+ ipv6_seq_ops_addr = NULL;
66377+}
66378+
66379+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66380+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66381+#endif
66382+
66383 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66384 {
66385 return pde->parent->data;
66386@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66387 return maybe_get_net(PDE_NET(PDE(inode)));
66388 }
66389
66390+extern const struct seq_operations dev_seq_ops;
66391+
66392 int seq_open_net(struct inode *ino, struct file *f,
66393 const struct seq_operations *ops, int size)
66394 {
66395@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66396
66397 BUG_ON(size < sizeof(*p));
66398
66399+ /* only permit access to /proc/net/dev */
66400+ if (
66401+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66402+ ops != ipv6_seq_ops_addr &&
66403+#endif
66404+ ops != &dev_seq_ops && gr_proc_is_restricted())
66405+ return -EACCES;
66406+
66407 net = get_proc_net(ino);
66408 if (net == NULL)
66409 return -ENXIO;
66410@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66411 int err;
66412 struct net *net;
66413
66414+ if (gr_proc_is_restricted())
66415+ return -EACCES;
66416+
66417 err = -ENXIO;
66418 net = get_proc_net(inode);
66419 if (net == NULL)
66420diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66421index f92d5dd..26398ac 100644
66422--- a/fs/proc/proc_sysctl.c
66423+++ b/fs/proc/proc_sysctl.c
66424@@ -11,13 +11,21 @@
66425 #include <linux/namei.h>
66426 #include <linux/mm.h>
66427 #include <linux/module.h>
66428+#include <linux/nsproxy.h>
66429+#ifdef CONFIG_GRKERNSEC
66430+#include <net/net_namespace.h>
66431+#endif
66432 #include "internal.h"
66433
66434+extern int gr_handle_chroot_sysctl(const int op);
66435+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66436+ const int op);
66437+
66438 static const struct dentry_operations proc_sys_dentry_operations;
66439 static const struct file_operations proc_sys_file_operations;
66440-static const struct inode_operations proc_sys_inode_operations;
66441+const struct inode_operations proc_sys_inode_operations;
66442 static const struct file_operations proc_sys_dir_file_operations;
66443-static const struct inode_operations proc_sys_dir_operations;
66444+const struct inode_operations proc_sys_dir_operations;
66445
66446 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66447 {
66448@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66449
66450 err = NULL;
66451 d_set_d_op(dentry, &proc_sys_dentry_operations);
66452+
66453+ gr_handle_proc_create(dentry, inode);
66454+
66455 d_add(dentry, inode);
66456
66457 out:
66458@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66459 struct inode *inode = file_inode(filp);
66460 struct ctl_table_header *head = grab_header(inode);
66461 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66462+ int op = write ? MAY_WRITE : MAY_READ;
66463 ssize_t error;
66464 size_t res;
66465
66466@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66467 * and won't be until we finish.
66468 */
66469 error = -EPERM;
66470- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66471+ if (sysctl_perm(head, table, op))
66472 goto out;
66473
66474 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66475@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66476 if (!table->proc_handler)
66477 goto out;
66478
66479+#ifdef CONFIG_GRKERNSEC
66480+ error = -EPERM;
66481+ if (gr_handle_chroot_sysctl(op))
66482+ goto out;
66483+ dget(filp->f_path.dentry);
66484+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66485+ dput(filp->f_path.dentry);
66486+ goto out;
66487+ }
66488+ dput(filp->f_path.dentry);
66489+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66490+ goto out;
66491+ if (write) {
66492+ if (current->nsproxy->net_ns != table->extra2) {
66493+ if (!capable(CAP_SYS_ADMIN))
66494+ goto out;
66495+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66496+ goto out;
66497+ }
66498+#endif
66499+
66500 /* careful: calling conventions are nasty here */
66501 res = count;
66502 error = table->proc_handler(table, write, buf, &res, ppos);
66503@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66504 return false;
66505 } else {
66506 d_set_d_op(child, &proc_sys_dentry_operations);
66507+
66508+ gr_handle_proc_create(child, inode);
66509+
66510 d_add(child, inode);
66511 }
66512 } else {
66513@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66514 if ((*pos)++ < ctx->pos)
66515 return true;
66516
66517+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66518+ return 0;
66519+
66520 if (unlikely(S_ISLNK(table->mode)))
66521 res = proc_sys_link_fill_cache(file, ctx, head, table);
66522 else
66523@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66524 if (IS_ERR(head))
66525 return PTR_ERR(head);
66526
66527+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66528+ return -ENOENT;
66529+
66530 generic_fillattr(inode, stat);
66531 if (table)
66532 stat->mode = (stat->mode & S_IFMT) | table->mode;
66533@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66534 .llseek = generic_file_llseek,
66535 };
66536
66537-static const struct inode_operations proc_sys_inode_operations = {
66538+const struct inode_operations proc_sys_inode_operations = {
66539 .permission = proc_sys_permission,
66540 .setattr = proc_sys_setattr,
66541 .getattr = proc_sys_getattr,
66542 };
66543
66544-static const struct inode_operations proc_sys_dir_operations = {
66545+const struct inode_operations proc_sys_dir_operations = {
66546 .lookup = proc_sys_lookup,
66547 .permission = proc_sys_permission,
66548 .setattr = proc_sys_setattr,
66549@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66550 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66551 const char *name, int namelen)
66552 {
66553- struct ctl_table *table;
66554+ ctl_table_no_const *table;
66555 struct ctl_dir *new;
66556 struct ctl_node *node;
66557 char *new_name;
66558@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66559 return NULL;
66560
66561 node = (struct ctl_node *)(new + 1);
66562- table = (struct ctl_table *)(node + 1);
66563+ table = (ctl_table_no_const *)(node + 1);
66564 new_name = (char *)(table + 2);
66565 memcpy(new_name, name, namelen);
66566 new_name[namelen] = '\0';
66567@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66568 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66569 struct ctl_table_root *link_root)
66570 {
66571- struct ctl_table *link_table, *entry, *link;
66572+ ctl_table_no_const *link_table, *link;
66573+ struct ctl_table *entry;
66574 struct ctl_table_header *links;
66575 struct ctl_node *node;
66576 char *link_name;
66577@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66578 return NULL;
66579
66580 node = (struct ctl_node *)(links + 1);
66581- link_table = (struct ctl_table *)(node + nr_entries);
66582+ link_table = (ctl_table_no_const *)(node + nr_entries);
66583 link_name = (char *)&link_table[nr_entries + 1];
66584
66585 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66586@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66587 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66588 struct ctl_table *table)
66589 {
66590- struct ctl_table *ctl_table_arg = NULL;
66591- struct ctl_table *entry, *files;
66592+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66593+ struct ctl_table *entry;
66594 int nr_files = 0;
66595 int nr_dirs = 0;
66596 int err = -ENOMEM;
66597@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66598 nr_files++;
66599 }
66600
66601- files = table;
66602 /* If there are mixed files and directories we need a new table */
66603 if (nr_dirs && nr_files) {
66604- struct ctl_table *new;
66605+ ctl_table_no_const *new;
66606 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66607 GFP_KERNEL);
66608 if (!files)
66609@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66610 /* Register everything except a directory full of subdirectories */
66611 if (nr_files || !nr_dirs) {
66612 struct ctl_table_header *header;
66613- header = __register_sysctl_table(set, path, files);
66614+ header = __register_sysctl_table(set, path, files ? files : table);
66615 if (!header) {
66616 kfree(ctl_table_arg);
66617 goto out;
66618diff --git a/fs/proc/root.c b/fs/proc/root.c
66619index e74ac9f..35e89f4 100644
66620--- a/fs/proc/root.c
66621+++ b/fs/proc/root.c
66622@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66623 proc_mkdir("openprom", NULL);
66624 #endif
66625 proc_tty_init();
66626+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66627+#ifdef CONFIG_GRKERNSEC_PROC_USER
66628+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66629+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66630+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66631+#endif
66632+#else
66633 proc_mkdir("bus", NULL);
66634+#endif
66635 proc_sys_init();
66636 }
66637
66638diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66639index 510413eb..34d9a8c 100644
66640--- a/fs/proc/stat.c
66641+++ b/fs/proc/stat.c
66642@@ -11,6 +11,7 @@
66643 #include <linux/irqnr.h>
66644 #include <linux/cputime.h>
66645 #include <linux/tick.h>
66646+#include <linux/grsecurity.h>
66647
66648 #ifndef arch_irq_stat_cpu
66649 #define arch_irq_stat_cpu(cpu) 0
66650@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
66651 u64 sum_softirq = 0;
66652 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
66653 struct timespec boottime;
66654+ int unrestricted = 1;
66655+
66656+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66657+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66658+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
66659+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66660+ && !in_group_p(grsec_proc_gid)
66661+#endif
66662+ )
66663+ unrestricted = 0;
66664+#endif
66665+#endif
66666
66667 user = nice = system = idle = iowait =
66668 irq = softirq = steal = 0;
66669@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
66670 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66671 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66672 idle += get_idle_time(i);
66673- iowait += get_iowait_time(i);
66674- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66675- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66676- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66677- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66678- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66679- sum += kstat_cpu_irqs_sum(i);
66680- sum += arch_irq_stat_cpu(i);
66681+ if (unrestricted) {
66682+ iowait += get_iowait_time(i);
66683+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66684+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66685+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66686+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66687+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66688+ sum += kstat_cpu_irqs_sum(i);
66689+ sum += arch_irq_stat_cpu(i);
66690+ for (j = 0; j < NR_SOFTIRQS; j++) {
66691+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66692
66693- for (j = 0; j < NR_SOFTIRQS; j++) {
66694- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66695-
66696- per_softirq_sums[j] += softirq_stat;
66697- sum_softirq += softirq_stat;
66698+ per_softirq_sums[j] += softirq_stat;
66699+ sum_softirq += softirq_stat;
66700+ }
66701 }
66702 }
66703- sum += arch_irq_stat();
66704+ if (unrestricted)
66705+ sum += arch_irq_stat();
66706
66707 seq_puts(p, "cpu ");
66708 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66709@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
66710 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66711 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66712 idle = get_idle_time(i);
66713- iowait = get_iowait_time(i);
66714- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66715- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66716- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66717- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66718- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66719+ if (unrestricted) {
66720+ iowait = get_iowait_time(i);
66721+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66722+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66723+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66724+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66725+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66726+ }
66727 seq_printf(p, "cpu%d", i);
66728 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66729 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
66730@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
66731
66732 /* sum again ? it could be updated? */
66733 for_each_irq_nr(j)
66734- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
66735+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
66736
66737 seq_printf(p,
66738 "\nctxt %llu\n"
66739@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
66740 "processes %lu\n"
66741 "procs_running %lu\n"
66742 "procs_blocked %lu\n",
66743- nr_context_switches(),
66744+ unrestricted ? nr_context_switches() : 0ULL,
66745 (unsigned long)jif,
66746- total_forks,
66747- nr_running(),
66748- nr_iowait());
66749+ unrestricted ? total_forks : 0UL,
66750+ unrestricted ? nr_running() : 0UL,
66751+ unrestricted ? nr_iowait() : 0UL);
66752
66753 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
66754
66755diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
66756index 88f9b83..314064c 100644
66757--- a/fs/proc/task_mmu.c
66758+++ b/fs/proc/task_mmu.c
66759@@ -13,12 +13,19 @@
66760 #include <linux/swap.h>
66761 #include <linux/swapops.h>
66762 #include <linux/mmu_notifier.h>
66763+#include <linux/grsecurity.h>
66764
66765 #include <asm/elf.h>
66766 #include <asm/uaccess.h>
66767 #include <asm/tlbflush.h>
66768 #include "internal.h"
66769
66770+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66771+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66772+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66773+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66774+#endif
66775+
66776 void task_mem(struct seq_file *m, struct mm_struct *mm)
66777 {
66778 unsigned long data, text, lib, swap;
66779@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66780 "VmExe:\t%8lu kB\n"
66781 "VmLib:\t%8lu kB\n"
66782 "VmPTE:\t%8lu kB\n"
66783- "VmSwap:\t%8lu kB\n",
66784- hiwater_vm << (PAGE_SHIFT-10),
66785+ "VmSwap:\t%8lu kB\n"
66786+
66787+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66788+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
66789+#endif
66790+
66791+ ,hiwater_vm << (PAGE_SHIFT-10),
66792 total_vm << (PAGE_SHIFT-10),
66793 mm->locked_vm << (PAGE_SHIFT-10),
66794 mm->pinned_vm << (PAGE_SHIFT-10),
66795@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66796 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66797 (PTRS_PER_PTE * sizeof(pte_t) *
66798 atomic_long_read(&mm->nr_ptes)) >> 10,
66799- swap << (PAGE_SHIFT-10));
66800+ swap << (PAGE_SHIFT-10)
66801+
66802+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66803+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66804+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
66805+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
66806+#else
66807+ , mm->context.user_cs_base
66808+ , mm->context.user_cs_limit
66809+#endif
66810+#endif
66811+
66812+ );
66813 }
66814
66815 unsigned long task_vsize(struct mm_struct *mm)
66816@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66817 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
66818 }
66819
66820- /* We don't show the stack guard page in /proc/maps */
66821+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66822+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
66823+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
66824+#else
66825 start = vma->vm_start;
66826- if (stack_guard_page_start(vma, start))
66827- start += PAGE_SIZE;
66828 end = vma->vm_end;
66829- if (stack_guard_page_end(vma, end))
66830- end -= PAGE_SIZE;
66831+#endif
66832
66833 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
66834 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
66835@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66836 flags & VM_WRITE ? 'w' : '-',
66837 flags & VM_EXEC ? 'x' : '-',
66838 flags & VM_MAYSHARE ? 's' : 'p',
66839+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66840+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
66841+#else
66842 pgoff,
66843+#endif
66844 MAJOR(dev), MINOR(dev), ino);
66845
66846 /*
66847@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66848 */
66849 if (file) {
66850 seq_pad(m, ' ');
66851- seq_path(m, &file->f_path, "\n");
66852+ seq_path(m, &file->f_path, "\n\\");
66853 goto done;
66854 }
66855
66856@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66857 * Thread stack in /proc/PID/task/TID/maps or
66858 * the main process stack.
66859 */
66860- if (!is_pid || (vma->vm_start <= mm->start_stack &&
66861- vma->vm_end >= mm->start_stack)) {
66862+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
66863+ (vma->vm_start <= mm->start_stack &&
66864+ vma->vm_end >= mm->start_stack)) {
66865 name = "[stack]";
66866 } else {
66867 /* Thread stack in /proc/PID/maps */
66868@@ -359,6 +388,12 @@ done:
66869
66870 static int show_map(struct seq_file *m, void *v, int is_pid)
66871 {
66872+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66873+ if (current->exec_id != m->exec_id) {
66874+ gr_log_badprocpid("maps");
66875+ return 0;
66876+ }
66877+#endif
66878 show_map_vma(m, v, is_pid);
66879 m_cache_vma(m, v);
66880 return 0;
66881@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66882 .private = &mss,
66883 };
66884
66885+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66886+ if (current->exec_id != m->exec_id) {
66887+ gr_log_badprocpid("smaps");
66888+ return 0;
66889+ }
66890+#endif
66891 memset(&mss, 0, sizeof mss);
66892- mss.vma = vma;
66893- /* mmap_sem is held in m_start */
66894- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66895- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66896-
66897+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66898+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
66899+#endif
66900+ mss.vma = vma;
66901+ /* mmap_sem is held in m_start */
66902+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66903+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66904+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66905+ }
66906+#endif
66907 show_map_vma(m, vma, is_pid);
66908
66909 seq_printf(m,
66910@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66911 "KernelPageSize: %8lu kB\n"
66912 "MMUPageSize: %8lu kB\n"
66913 "Locked: %8lu kB\n",
66914+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66915+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
66916+#else
66917 (vma->vm_end - vma->vm_start) >> 10,
66918+#endif
66919 mss.resident >> 10,
66920 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
66921 mss.shared_clean >> 10,
66922@@ -1486,6 +1536,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66923 char buffer[64];
66924 int nid;
66925
66926+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66927+ if (current->exec_id != m->exec_id) {
66928+ gr_log_badprocpid("numa_maps");
66929+ return 0;
66930+ }
66931+#endif
66932+
66933 if (!mm)
66934 return 0;
66935
66936@@ -1507,11 +1564,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66937 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
66938 }
66939
66940+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66941+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
66942+#else
66943 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
66944+#endif
66945
66946 if (file) {
66947 seq_puts(m, " file=");
66948- seq_path(m, &file->f_path, "\n\t= ");
66949+ seq_path(m, &file->f_path, "\n\t\\= ");
66950 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
66951 seq_puts(m, " heap");
66952 } else {
66953diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
66954index 599ec2e..f1413ae 100644
66955--- a/fs/proc/task_nommu.c
66956+++ b/fs/proc/task_nommu.c
66957@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66958 else
66959 bytes += kobjsize(mm);
66960
66961- if (current->fs && current->fs->users > 1)
66962+ if (current->fs && atomic_read(&current->fs->users) > 1)
66963 sbytes += kobjsize(current->fs);
66964 else
66965 bytes += kobjsize(current->fs);
66966@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
66967
66968 if (file) {
66969 seq_pad(m, ' ');
66970- seq_path(m, &file->f_path, "");
66971+ seq_path(m, &file->f_path, "\n\\");
66972 } else if (mm) {
66973 pid_t tid = pid_of_stack(priv, vma, is_pid);
66974
66975diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
66976index a90d6d35..d08047c 100644
66977--- a/fs/proc/vmcore.c
66978+++ b/fs/proc/vmcore.c
66979@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
66980 nr_bytes = count;
66981
66982 /* If pfn is not ram, return zeros for sparse dump files */
66983- if (pfn_is_ram(pfn) == 0)
66984- memset(buf, 0, nr_bytes);
66985- else {
66986+ if (pfn_is_ram(pfn) == 0) {
66987+ if (userbuf) {
66988+ if (clear_user((char __force_user *)buf, nr_bytes))
66989+ return -EFAULT;
66990+ } else
66991+ memset(buf, 0, nr_bytes);
66992+ } else {
66993 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
66994 offset, userbuf);
66995 if (tmp < 0)
66996@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
66997 static int copy_to(void *target, void *src, size_t size, int userbuf)
66998 {
66999 if (userbuf) {
67000- if (copy_to_user((char __user *) target, src, size))
67001+ if (copy_to_user((char __force_user *) target, src, size))
67002 return -EFAULT;
67003 } else {
67004 memcpy(target, src, size);
67005@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67006 if (*fpos < m->offset + m->size) {
67007 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67008 start = m->paddr + *fpos - m->offset;
67009- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67010+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67011 if (tmp < 0)
67012 return tmp;
67013 buflen -= tsz;
67014@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67015 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67016 size_t buflen, loff_t *fpos)
67017 {
67018- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67019+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67020 }
67021
67022 /*
67023diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67024index d3fb2b6..43a8140 100644
67025--- a/fs/qnx6/qnx6.h
67026+++ b/fs/qnx6/qnx6.h
67027@@ -74,7 +74,7 @@ enum {
67028 BYTESEX_BE,
67029 };
67030
67031-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67032+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67033 {
67034 if (sbi->s_bytesex == BYTESEX_LE)
67035 return le64_to_cpu((__force __le64)n);
67036@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67037 return (__force __fs64)cpu_to_be64(n);
67038 }
67039
67040-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67041+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67042 {
67043 if (sbi->s_bytesex == BYTESEX_LE)
67044 return le32_to_cpu((__force __le32)n);
67045diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67046index bb2869f..d34ada8 100644
67047--- a/fs/quota/netlink.c
67048+++ b/fs/quota/netlink.c
67049@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67050 void quota_send_warning(struct kqid qid, dev_t dev,
67051 const char warntype)
67052 {
67053- static atomic_t seq;
67054+ static atomic_unchecked_t seq;
67055 struct sk_buff *skb;
67056 void *msg_head;
67057 int ret;
67058@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67059 "VFS: Not enough memory to send quota warning.\n");
67060 return;
67061 }
67062- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67063+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67064 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67065 if (!msg_head) {
67066 printk(KERN_ERR
67067diff --git a/fs/read_write.c b/fs/read_write.c
67068index c0805c93..d39f2eb 100644
67069--- a/fs/read_write.c
67070+++ b/fs/read_write.c
67071@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67072
67073 old_fs = get_fs();
67074 set_fs(get_ds());
67075- p = (__force const char __user *)buf;
67076+ p = (const char __force_user *)buf;
67077 if (count > MAX_RW_COUNT)
67078 count = MAX_RW_COUNT;
67079 if (file->f_op->write)
67080diff --git a/fs/readdir.c b/fs/readdir.c
67081index ced6791..936687b 100644
67082--- a/fs/readdir.c
67083+++ b/fs/readdir.c
67084@@ -18,6 +18,7 @@
67085 #include <linux/security.h>
67086 #include <linux/syscalls.h>
67087 #include <linux/unistd.h>
67088+#include <linux/namei.h>
67089
67090 #include <asm/uaccess.h>
67091
67092@@ -71,6 +72,7 @@ struct old_linux_dirent {
67093 struct readdir_callback {
67094 struct dir_context ctx;
67095 struct old_linux_dirent __user * dirent;
67096+ struct file * file;
67097 int result;
67098 };
67099
67100@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67101 buf->result = -EOVERFLOW;
67102 return -EOVERFLOW;
67103 }
67104+
67105+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67106+ return 0;
67107+
67108 buf->result++;
67109 dirent = buf->dirent;
67110 if (!access_ok(VERIFY_WRITE, dirent,
67111@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67112 if (!f.file)
67113 return -EBADF;
67114
67115+ buf.file = f.file;
67116 error = iterate_dir(f.file, &buf.ctx);
67117 if (buf.result)
67118 error = buf.result;
67119@@ -145,6 +152,7 @@ struct getdents_callback {
67120 struct dir_context ctx;
67121 struct linux_dirent __user * current_dir;
67122 struct linux_dirent __user * previous;
67123+ struct file * file;
67124 int count;
67125 int error;
67126 };
67127@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67128 buf->error = -EOVERFLOW;
67129 return -EOVERFLOW;
67130 }
67131+
67132+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67133+ return 0;
67134+
67135 dirent = buf->previous;
67136 if (dirent) {
67137 if (__put_user(offset, &dirent->d_off))
67138@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67139 if (!f.file)
67140 return -EBADF;
67141
67142+ buf.file = f.file;
67143 error = iterate_dir(f.file, &buf.ctx);
67144 if (error >= 0)
67145 error = buf.error;
67146@@ -230,6 +243,7 @@ struct getdents_callback64 {
67147 struct dir_context ctx;
67148 struct linux_dirent64 __user * current_dir;
67149 struct linux_dirent64 __user * previous;
67150+ struct file *file;
67151 int count;
67152 int error;
67153 };
67154@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67155 buf->error = -EINVAL; /* only used if we fail.. */
67156 if (reclen > buf->count)
67157 return -EINVAL;
67158+
67159+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67160+ return 0;
67161+
67162 dirent = buf->previous;
67163 if (dirent) {
67164 if (__put_user(offset, &dirent->d_off))
67165@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67166 if (!f.file)
67167 return -EBADF;
67168
67169+ buf.file = f.file;
67170 error = iterate_dir(f.file, &buf.ctx);
67171 if (error >= 0)
67172 error = buf.error;
67173diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67174index 9c02d96..6562c10 100644
67175--- a/fs/reiserfs/do_balan.c
67176+++ b/fs/reiserfs/do_balan.c
67177@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67178 return;
67179 }
67180
67181- atomic_inc(&fs_generation(tb->tb_sb));
67182+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67183 do_balance_starts(tb);
67184
67185 /*
67186diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67187index aca73dd..e3c558d 100644
67188--- a/fs/reiserfs/item_ops.c
67189+++ b/fs/reiserfs/item_ops.c
67190@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67191 }
67192
67193 static struct item_operations errcatch_ops = {
67194- errcatch_bytes_number,
67195- errcatch_decrement_key,
67196- errcatch_is_left_mergeable,
67197- errcatch_print_item,
67198- errcatch_check_item,
67199+ .bytes_number = errcatch_bytes_number,
67200+ .decrement_key = errcatch_decrement_key,
67201+ .is_left_mergeable = errcatch_is_left_mergeable,
67202+ .print_item = errcatch_print_item,
67203+ .check_item = errcatch_check_item,
67204
67205- errcatch_create_vi,
67206- errcatch_check_left,
67207- errcatch_check_right,
67208- errcatch_part_size,
67209- errcatch_unit_num,
67210- errcatch_print_vi
67211+ .create_vi = errcatch_create_vi,
67212+ .check_left = errcatch_check_left,
67213+ .check_right = errcatch_check_right,
67214+ .part_size = errcatch_part_size,
67215+ .unit_num = errcatch_unit_num,
67216+ .print_vi = errcatch_print_vi
67217 };
67218
67219 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67220diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67221index 621b9f3..af527fd 100644
67222--- a/fs/reiserfs/procfs.c
67223+++ b/fs/reiserfs/procfs.c
67224@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67225 "SMALL_TAILS " : "NO_TAILS ",
67226 replay_only(sb) ? "REPLAY_ONLY " : "",
67227 convert_reiserfs(sb) ? "CONV " : "",
67228- atomic_read(&r->s_generation_counter),
67229+ atomic_read_unchecked(&r->s_generation_counter),
67230 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67231 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67232 SF(s_good_search_by_key_reada), SF(s_bmaps),
67233diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67234index bb79cdd..fcf49ef 100644
67235--- a/fs/reiserfs/reiserfs.h
67236+++ b/fs/reiserfs/reiserfs.h
67237@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67238 /* Comment? -Hans */
67239 wait_queue_head_t s_wait;
67240 /* increased by one every time the tree gets re-balanced */
67241- atomic_t s_generation_counter;
67242+ atomic_unchecked_t s_generation_counter;
67243
67244 /* File system properties. Currently holds on-disk FS format */
67245 unsigned long s_properties;
67246@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67247 #define REISERFS_USER_MEM 1 /* user memory mode */
67248
67249 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67250-#define get_generation(s) atomic_read (&fs_generation(s))
67251+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67252 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67253 #define __fs_changed(gen,s) (gen != get_generation (s))
67254 #define fs_changed(gen,s) \
67255diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67256index 71fbbe3..eff29ba 100644
67257--- a/fs/reiserfs/super.c
67258+++ b/fs/reiserfs/super.c
67259@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67260 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67261 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67262 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67263+#ifdef CONFIG_REISERFS_FS_XATTR
67264+ /* turn on user xattrs by default */
67265+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67266+#endif
67267 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67268 sbi->s_alloc_options.preallocmin = 0;
67269 /* Preallocate by 16 blocks (17-1) at once */
67270diff --git a/fs/select.c b/fs/select.c
67271index 467bb1c..cf9d65a 100644
67272--- a/fs/select.c
67273+++ b/fs/select.c
67274@@ -20,6 +20,7 @@
67275 #include <linux/export.h>
67276 #include <linux/slab.h>
67277 #include <linux/poll.h>
67278+#include <linux/security.h>
67279 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67280 #include <linux/file.h>
67281 #include <linux/fdtable.h>
67282@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67283 struct poll_list *walk = head;
67284 unsigned long todo = nfds;
67285
67286+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67287 if (nfds > rlimit(RLIMIT_NOFILE))
67288 return -EINVAL;
67289
67290diff --git a/fs/seq_file.c b/fs/seq_file.c
67291index dbf3a59..daf023f 100644
67292--- a/fs/seq_file.c
67293+++ b/fs/seq_file.c
67294@@ -12,6 +12,8 @@
67295 #include <linux/slab.h>
67296 #include <linux/cred.h>
67297 #include <linux/mm.h>
67298+#include <linux/sched.h>
67299+#include <linux/grsecurity.h>
67300
67301 #include <asm/uaccess.h>
67302 #include <asm/page.h>
67303@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67304
67305 static void *seq_buf_alloc(unsigned long size)
67306 {
67307- void *buf;
67308-
67309- /*
67310- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67311- * it's better to fall back to vmalloc() than to kill things.
67312- */
67313- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67314- if (!buf && size > PAGE_SIZE)
67315- buf = vmalloc(size);
67316- return buf;
67317+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67318 }
67319
67320 /**
67321@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67322 #ifdef CONFIG_USER_NS
67323 p->user_ns = file->f_cred->user_ns;
67324 #endif
67325+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67326+ p->exec_id = current->exec_id;
67327+#endif
67328
67329 /*
67330 * Wrappers around seq_open(e.g. swaps_open) need to be
67331@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67332 }
67333 EXPORT_SYMBOL(seq_open);
67334
67335+
67336+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67337+{
67338+ if (gr_proc_is_restricted())
67339+ return -EACCES;
67340+
67341+ return seq_open(file, op);
67342+}
67343+EXPORT_SYMBOL(seq_open_restrict);
67344+
67345 static int traverse(struct seq_file *m, loff_t offset)
67346 {
67347 loff_t pos = 0, index;
67348@@ -158,7 +164,7 @@ Eoverflow:
67349 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67350 {
67351 struct seq_file *m = file->private_data;
67352- size_t copied = 0;
67353+ ssize_t copied = 0;
67354 loff_t pos;
67355 size_t n;
67356 void *p;
67357@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
67358 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67359 void *data)
67360 {
67361- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67362+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67363 int res = -ENOMEM;
67364
67365 if (op) {
67366@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67367 }
67368 EXPORT_SYMBOL(single_open_size);
67369
67370+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67371+ void *data)
67372+{
67373+ if (gr_proc_is_restricted())
67374+ return -EACCES;
67375+
67376+ return single_open(file, show, data);
67377+}
67378+EXPORT_SYMBOL(single_open_restrict);
67379+
67380+
67381 int single_release(struct inode *inode, struct file *file)
67382 {
67383 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67384diff --git a/fs/splice.c b/fs/splice.c
67385index 75c6058..770d40c 100644
67386--- a/fs/splice.c
67387+++ b/fs/splice.c
67388@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67389 pipe_lock(pipe);
67390
67391 for (;;) {
67392- if (!pipe->readers) {
67393+ if (!atomic_read(&pipe->readers)) {
67394 send_sig(SIGPIPE, current, 0);
67395 if (!ret)
67396 ret = -EPIPE;
67397@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67398 page_nr++;
67399 ret += buf->len;
67400
67401- if (pipe->files)
67402+ if (atomic_read(&pipe->files))
67403 do_wakeup = 1;
67404
67405 if (!--spd->nr_pages)
67406@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67407 do_wakeup = 0;
67408 }
67409
67410- pipe->waiting_writers++;
67411+ atomic_inc(&pipe->waiting_writers);
67412 pipe_wait(pipe);
67413- pipe->waiting_writers--;
67414+ atomic_dec(&pipe->waiting_writers);
67415 }
67416
67417 pipe_unlock(pipe);
67418@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67419 old_fs = get_fs();
67420 set_fs(get_ds());
67421 /* The cast to a user pointer is valid due to the set_fs() */
67422- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67423+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67424 set_fs(old_fs);
67425
67426 return res;
67427@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67428 old_fs = get_fs();
67429 set_fs(get_ds());
67430 /* The cast to a user pointer is valid due to the set_fs() */
67431- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67432+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67433 set_fs(old_fs);
67434
67435 return res;
67436@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67437 goto err;
67438
67439 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67440- vec[i].iov_base = (void __user *) page_address(page);
67441+ vec[i].iov_base = (void __force_user *) page_address(page);
67442 vec[i].iov_len = this_len;
67443 spd.pages[i] = page;
67444 spd.nr_pages++;
67445@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67446 ops->release(pipe, buf);
67447 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67448 pipe->nrbufs--;
67449- if (pipe->files)
67450+ if (atomic_read(&pipe->files))
67451 sd->need_wakeup = true;
67452 }
67453
67454@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67455 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67456 {
67457 while (!pipe->nrbufs) {
67458- if (!pipe->writers)
67459+ if (!atomic_read(&pipe->writers))
67460 return 0;
67461
67462- if (!pipe->waiting_writers && sd->num_spliced)
67463+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67464 return 0;
67465
67466 if (sd->flags & SPLICE_F_NONBLOCK)
67467@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67468 ops->release(pipe, buf);
67469 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67470 pipe->nrbufs--;
67471- if (pipe->files)
67472+ if (atomic_read(&pipe->files))
67473 sd.need_wakeup = true;
67474 } else {
67475 buf->offset += ret;
67476@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67477 * out of the pipe right after the splice_to_pipe(). So set
67478 * PIPE_READERS appropriately.
67479 */
67480- pipe->readers = 1;
67481+ atomic_set(&pipe->readers, 1);
67482
67483 current->splice_pipe = pipe;
67484 }
67485@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67486
67487 partial[buffers].offset = off;
67488 partial[buffers].len = plen;
67489+ partial[buffers].private = 0;
67490
67491 off = 0;
67492 len -= plen;
67493@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67494 ret = -ERESTARTSYS;
67495 break;
67496 }
67497- if (!pipe->writers)
67498+ if (!atomic_read(&pipe->writers))
67499 break;
67500- if (!pipe->waiting_writers) {
67501+ if (!atomic_read(&pipe->waiting_writers)) {
67502 if (flags & SPLICE_F_NONBLOCK) {
67503 ret = -EAGAIN;
67504 break;
67505@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67506 pipe_lock(pipe);
67507
67508 while (pipe->nrbufs >= pipe->buffers) {
67509- if (!pipe->readers) {
67510+ if (!atomic_read(&pipe->readers)) {
67511 send_sig(SIGPIPE, current, 0);
67512 ret = -EPIPE;
67513 break;
67514@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67515 ret = -ERESTARTSYS;
67516 break;
67517 }
67518- pipe->waiting_writers++;
67519+ atomic_inc(&pipe->waiting_writers);
67520 pipe_wait(pipe);
67521- pipe->waiting_writers--;
67522+ atomic_dec(&pipe->waiting_writers);
67523 }
67524
67525 pipe_unlock(pipe);
67526@@ -1818,14 +1819,14 @@ retry:
67527 pipe_double_lock(ipipe, opipe);
67528
67529 do {
67530- if (!opipe->readers) {
67531+ if (!atomic_read(&opipe->readers)) {
67532 send_sig(SIGPIPE, current, 0);
67533 if (!ret)
67534 ret = -EPIPE;
67535 break;
67536 }
67537
67538- if (!ipipe->nrbufs && !ipipe->writers)
67539+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67540 break;
67541
67542 /*
67543@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67544 pipe_double_lock(ipipe, opipe);
67545
67546 do {
67547- if (!opipe->readers) {
67548+ if (!atomic_read(&opipe->readers)) {
67549 send_sig(SIGPIPE, current, 0);
67550 if (!ret)
67551 ret = -EPIPE;
67552@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67553 * return EAGAIN if we have the potential of some data in the
67554 * future, otherwise just return 0
67555 */
67556- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67557+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67558 ret = -EAGAIN;
67559
67560 pipe_unlock(ipipe);
67561diff --git a/fs/stat.c b/fs/stat.c
67562index ae0c3ce..9ee641c 100644
67563--- a/fs/stat.c
67564+++ b/fs/stat.c
67565@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67566 stat->gid = inode->i_gid;
67567 stat->rdev = inode->i_rdev;
67568 stat->size = i_size_read(inode);
67569- stat->atime = inode->i_atime;
67570- stat->mtime = inode->i_mtime;
67571+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67572+ stat->atime = inode->i_ctime;
67573+ stat->mtime = inode->i_ctime;
67574+ } else {
67575+ stat->atime = inode->i_atime;
67576+ stat->mtime = inode->i_mtime;
67577+ }
67578 stat->ctime = inode->i_ctime;
67579 stat->blksize = (1 << inode->i_blkbits);
67580 stat->blocks = inode->i_blocks;
67581@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67582 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67583 {
67584 struct inode *inode = path->dentry->d_inode;
67585+ int retval;
67586
67587- if (inode->i_op->getattr)
67588- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67589+ if (inode->i_op->getattr) {
67590+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67591+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67592+ stat->atime = stat->ctime;
67593+ stat->mtime = stat->ctime;
67594+ }
67595+ return retval;
67596+ }
67597
67598 generic_fillattr(inode, stat);
67599 return 0;
67600diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67601index 0b45ff4..847de5b 100644
67602--- a/fs/sysfs/dir.c
67603+++ b/fs/sysfs/dir.c
67604@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67605 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67606 {
67607 struct kernfs_node *parent, *kn;
67608+ const char *name;
67609+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67610+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67611+ const char *parent_name;
67612+#endif
67613
67614 BUG_ON(!kobj);
67615
67616+ name = kobject_name(kobj);
67617+
67618 if (kobj->parent)
67619 parent = kobj->parent->sd;
67620 else
67621@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67622 if (!parent)
67623 return -ENOENT;
67624
67625- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67626- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67627+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67628+ parent_name = parent->name;
67629+ mode = S_IRWXU;
67630+
67631+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67632+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67633+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67634+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67635+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67636+#endif
67637+
67638+ kn = kernfs_create_dir_ns(parent, name,
67639+ mode, kobj, ns);
67640 if (IS_ERR(kn)) {
67641 if (PTR_ERR(kn) == -EEXIST)
67642- sysfs_warn_dup(parent, kobject_name(kobj));
67643+ sysfs_warn_dup(parent, name);
67644 return PTR_ERR(kn);
67645 }
67646
67647diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67648index 69d4889..a810bd4 100644
67649--- a/fs/sysv/sysv.h
67650+++ b/fs/sysv/sysv.h
67651@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67652 #endif
67653 }
67654
67655-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67656+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67657 {
67658 if (sbi->s_bytesex == BYTESEX_PDP)
67659 return PDP_swab((__force __u32)n);
67660diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
67661index fb08b0c..65fcc7e 100644
67662--- a/fs/ubifs/io.c
67663+++ b/fs/ubifs/io.c
67664@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
67665 return err;
67666 }
67667
67668-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67669+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67670 {
67671 int err;
67672
67673diff --git a/fs/udf/misc.c b/fs/udf/misc.c
67674index c175b4d..8f36a16 100644
67675--- a/fs/udf/misc.c
67676+++ b/fs/udf/misc.c
67677@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
67678
67679 u8 udf_tag_checksum(const struct tag *t)
67680 {
67681- u8 *data = (u8 *)t;
67682+ const u8 *data = (const u8 *)t;
67683 u8 checksum = 0;
67684 int i;
67685 for (i = 0; i < sizeof(struct tag); ++i)
67686diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
67687index 8d974c4..b82f6ec 100644
67688--- a/fs/ufs/swab.h
67689+++ b/fs/ufs/swab.h
67690@@ -22,7 +22,7 @@ enum {
67691 BYTESEX_BE
67692 };
67693
67694-static inline u64
67695+static inline u64 __intentional_overflow(-1)
67696 fs64_to_cpu(struct super_block *sbp, __fs64 n)
67697 {
67698 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67699@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
67700 return (__force __fs64)cpu_to_be64(n);
67701 }
67702
67703-static inline u32
67704+static inline u32 __intentional_overflow(-1)
67705 fs32_to_cpu(struct super_block *sbp, __fs32 n)
67706 {
67707 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67708diff --git a/fs/utimes.c b/fs/utimes.c
67709index aa138d6..5f3a811 100644
67710--- a/fs/utimes.c
67711+++ b/fs/utimes.c
67712@@ -1,6 +1,7 @@
67713 #include <linux/compiler.h>
67714 #include <linux/file.h>
67715 #include <linux/fs.h>
67716+#include <linux/security.h>
67717 #include <linux/linkage.h>
67718 #include <linux/mount.h>
67719 #include <linux/namei.h>
67720@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
67721 }
67722 }
67723 retry_deleg:
67724+
67725+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
67726+ error = -EACCES;
67727+ goto mnt_drop_write_and_out;
67728+ }
67729+
67730 mutex_lock(&inode->i_mutex);
67731 error = notify_change(path->dentry, &newattrs, &delegated_inode);
67732 mutex_unlock(&inode->i_mutex);
67733diff --git a/fs/xattr.c b/fs/xattr.c
67734index 4ef6985..a6cd6567 100644
67735--- a/fs/xattr.c
67736+++ b/fs/xattr.c
67737@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
67738 return rc;
67739 }
67740
67741+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
67742+ssize_t
67743+pax_getxattr(struct dentry *dentry, void *value, size_t size)
67744+{
67745+ struct inode *inode = dentry->d_inode;
67746+ ssize_t error;
67747+
67748+ error = inode_permission(inode, MAY_EXEC);
67749+ if (error)
67750+ return error;
67751+
67752+ if (inode->i_op->getxattr)
67753+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
67754+ else
67755+ error = -EOPNOTSUPP;
67756+
67757+ return error;
67758+}
67759+EXPORT_SYMBOL(pax_getxattr);
67760+#endif
67761+
67762 ssize_t
67763 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
67764 {
67765@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
67766 * Extended attribute SET operations
67767 */
67768 static long
67769-setxattr(struct dentry *d, const char __user *name, const void __user *value,
67770+setxattr(struct path *path, const char __user *name, const void __user *value,
67771 size_t size, int flags)
67772 {
67773 int error;
67774@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
67775 posix_acl_fix_xattr_from_user(kvalue, size);
67776 }
67777
67778- error = vfs_setxattr(d, kname, kvalue, size, flags);
67779+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
67780+ error = -EACCES;
67781+ goto out;
67782+ }
67783+
67784+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
67785 out:
67786 if (vvalue)
67787 vfree(vvalue);
67788@@ -376,7 +402,7 @@ retry:
67789 return error;
67790 error = mnt_want_write(path.mnt);
67791 if (!error) {
67792- error = setxattr(path.dentry, name, value, size, flags);
67793+ error = setxattr(&path, name, value, size, flags);
67794 mnt_drop_write(path.mnt);
67795 }
67796 path_put(&path);
67797@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
67798 audit_file(f.file);
67799 error = mnt_want_write_file(f.file);
67800 if (!error) {
67801- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
67802+ error = setxattr(&f.file->f_path, name, value, size, flags);
67803 mnt_drop_write_file(f.file);
67804 }
67805 fdput(f);
67806@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
67807 * Extended attribute REMOVE operations
67808 */
67809 static long
67810-removexattr(struct dentry *d, const char __user *name)
67811+removexattr(struct path *path, const char __user *name)
67812 {
67813 int error;
67814 char kname[XATTR_NAME_MAX + 1];
67815@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
67816 if (error < 0)
67817 return error;
67818
67819- return vfs_removexattr(d, kname);
67820+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
67821+ return -EACCES;
67822+
67823+ return vfs_removexattr(path->dentry, kname);
67824 }
67825
67826 static int path_removexattr(const char __user *pathname,
67827@@ -623,7 +652,7 @@ retry:
67828 return error;
67829 error = mnt_want_write(path.mnt);
67830 if (!error) {
67831- error = removexattr(path.dentry, name);
67832+ error = removexattr(&path, name);
67833 mnt_drop_write(path.mnt);
67834 }
67835 path_put(&path);
67836@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
67837 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
67838 {
67839 struct fd f = fdget(fd);
67840+ struct path *path;
67841 int error = -EBADF;
67842
67843 if (!f.file)
67844 return error;
67845+ path = &f.file->f_path;
67846 audit_file(f.file);
67847 error = mnt_want_write_file(f.file);
67848 if (!error) {
67849- error = removexattr(f.file->f_path.dentry, name);
67850+ error = removexattr(path, name);
67851 mnt_drop_write_file(f.file);
67852 }
67853 fdput(f);
67854diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
67855index 4e20fe7..6d1a55a 100644
67856--- a/fs/xfs/libxfs/xfs_bmap.c
67857+++ b/fs/xfs/libxfs/xfs_bmap.c
67858@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
67859
67860 #else
67861 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
67862-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
67863+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
67864 #endif /* DEBUG */
67865
67866 /*
67867diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
67868index 098cd78..724d3f8 100644
67869--- a/fs/xfs/xfs_dir2_readdir.c
67870+++ b/fs/xfs/xfs_dir2_readdir.c
67871@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
67872 ino = dp->d_ops->sf_get_ino(sfp, sfep);
67873 filetype = dp->d_ops->sf_get_ftype(sfep);
67874 ctx->pos = off & 0x7fffffff;
67875- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67876+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
67877+ char name[sfep->namelen];
67878+ memcpy(name, sfep->name, sfep->namelen);
67879+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
67880+ return 0;
67881+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67882 xfs_dir3_get_dtype(dp->i_mount, filetype)))
67883 return 0;
67884 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
67885diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
67886index a183198..6b52f52 100644
67887--- a/fs/xfs/xfs_ioctl.c
67888+++ b/fs/xfs/xfs_ioctl.c
67889@@ -119,7 +119,7 @@ xfs_find_handle(
67890 }
67891
67892 error = -EFAULT;
67893- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
67894+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
67895 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
67896 goto out_put;
67897
67898diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
67899index c31d2c2..6ec8f62 100644
67900--- a/fs/xfs/xfs_linux.h
67901+++ b/fs/xfs/xfs_linux.h
67902@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
67903 * of the compiler which do not like us using do_div in the middle
67904 * of large functions.
67905 */
67906-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67907+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67908 {
67909 __u32 mod;
67910
67911@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
67912 return 0;
67913 }
67914 #else
67915-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67916+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67917 {
67918 __u32 mod;
67919
67920diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
67921new file mode 100644
67922index 0000000..31f8fe4
67923--- /dev/null
67924+++ b/grsecurity/Kconfig
67925@@ -0,0 +1,1182 @@
67926+#
67927+# grecurity configuration
67928+#
67929+menu "Memory Protections"
67930+depends on GRKERNSEC
67931+
67932+config GRKERNSEC_KMEM
67933+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
67934+ default y if GRKERNSEC_CONFIG_AUTO
67935+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
67936+ help
67937+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
67938+ be written to or read from to modify or leak the contents of the running
67939+ kernel. /dev/port will also not be allowed to be opened, writing to
67940+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
67941+ If you have module support disabled, enabling this will close up several
67942+ ways that are currently used to insert malicious code into the running
67943+ kernel.
67944+
67945+ Even with this feature enabled, we still highly recommend that
67946+ you use the RBAC system, as it is still possible for an attacker to
67947+ modify the running kernel through other more obscure methods.
67948+
67949+ It is highly recommended that you say Y here if you meet all the
67950+ conditions above.
67951+
67952+config GRKERNSEC_VM86
67953+ bool "Restrict VM86 mode"
67954+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
67955+ depends on X86_32
67956+
67957+ help
67958+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
67959+ make use of a special execution mode on 32bit x86 processors called
67960+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
67961+ video cards and will still work with this option enabled. The purpose
67962+ of the option is to prevent exploitation of emulation errors in
67963+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
67964+ Nearly all users should be able to enable this option.
67965+
67966+config GRKERNSEC_IO
67967+ bool "Disable privileged I/O"
67968+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
67969+ depends on X86
67970+ select RTC_CLASS
67971+ select RTC_INTF_DEV
67972+ select RTC_DRV_CMOS
67973+
67974+ help
67975+ If you say Y here, all ioperm and iopl calls will return an error.
67976+ Ioperm and iopl can be used to modify the running kernel.
67977+ Unfortunately, some programs need this access to operate properly,
67978+ the most notable of which are XFree86 and hwclock. hwclock can be
67979+ remedied by having RTC support in the kernel, so real-time
67980+ clock support is enabled if this option is enabled, to ensure
67981+ that hwclock operates correctly. If hwclock still does not work,
67982+ either update udev or symlink /dev/rtc to /dev/rtc0.
67983+
67984+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
67985+ you may not be able to boot into a graphical environment with this
67986+ option enabled. In this case, you should use the RBAC system instead.
67987+
67988+config GRKERNSEC_BPF_HARDEN
67989+ bool "Harden BPF interpreter"
67990+ default y if GRKERNSEC_CONFIG_AUTO
67991+ help
67992+ Unlike previous versions of grsecurity that hardened both the BPF
67993+ interpreted code against corruption at rest as well as the JIT code
67994+ against JIT-spray attacks and attacker-controlled immediate values
67995+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
67996+ and will ensure the interpreted code is read-only at rest. This feature
67997+ may be removed at a later time when eBPF stabilizes to entirely revert
67998+ back to the more secure pre-3.16 BPF interpreter/JIT.
67999+
68000+ If you're using KERNEXEC, it's recommended that you enable this option
68001+ to supplement the hardening of the kernel.
68002+
68003+config GRKERNSEC_PERF_HARDEN
68004+ bool "Disable unprivileged PERF_EVENTS usage by default"
68005+ default y if GRKERNSEC_CONFIG_AUTO
68006+ depends on PERF_EVENTS
68007+ help
68008+ If you say Y here, the range of acceptable values for the
68009+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68010+ default to a new value: 3. When the sysctl is set to this value, no
68011+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68012+
68013+ Though PERF_EVENTS can be used legitimately for performance monitoring
68014+ and low-level application profiling, it is forced on regardless of
68015+ configuration, has been at fault for several vulnerabilities, and
68016+ creates new opportunities for side channels and other information leaks.
68017+
68018+ This feature puts PERF_EVENTS into a secure default state and permits
68019+ the administrator to change out of it temporarily if unprivileged
68020+ application profiling is needed.
68021+
68022+config GRKERNSEC_RAND_THREADSTACK
68023+ bool "Insert random gaps between thread stacks"
68024+ default y if GRKERNSEC_CONFIG_AUTO
68025+ depends on PAX_RANDMMAP && !PPC
68026+ help
68027+ If you say Y here, a random-sized gap will be enforced between allocated
68028+ thread stacks. Glibc's NPTL and other threading libraries that
68029+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68030+ The implementation currently provides 8 bits of entropy for the gap.
68031+
68032+ Many distributions do not compile threaded remote services with the
68033+ -fstack-check argument to GCC, causing the variable-sized stack-based
68034+ allocator, alloca(), to not probe the stack on allocation. This
68035+ permits an unbounded alloca() to skip over any guard page and potentially
68036+ modify another thread's stack reliably. An enforced random gap
68037+ reduces the reliability of such an attack and increases the chance
68038+ that such a read/write to another thread's stack instead lands in
68039+ an unmapped area, causing a crash and triggering grsecurity's
68040+ anti-bruteforcing logic.
68041+
68042+config GRKERNSEC_PROC_MEMMAP
68043+ bool "Harden ASLR against information leaks and entropy reduction"
68044+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68045+ depends on PAX_NOEXEC || PAX_ASLR
68046+ help
68047+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68048+ give no information about the addresses of its mappings if
68049+ PaX features that rely on random addresses are enabled on the task.
68050+ In addition to sanitizing this information and disabling other
68051+ dangerous sources of information, this option causes reads of sensitive
68052+ /proc/<pid> entries where the file descriptor was opened in a different
68053+ task than the one performing the read. Such attempts are logged.
68054+ This option also limits argv/env strings for suid/sgid binaries
68055+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68056+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68057+ binaries to prevent alternative mmap layouts from being abused.
68058+
68059+ If you use PaX it is essential that you say Y here as it closes up
68060+ several holes that make full ASLR useless locally.
68061+
68062+
68063+config GRKERNSEC_KSTACKOVERFLOW
68064+ bool "Prevent kernel stack overflows"
68065+ default y if GRKERNSEC_CONFIG_AUTO
68066+ depends on !IA64 && 64BIT
68067+ help
68068+ If you say Y here, the kernel's process stacks will be allocated
68069+ with vmalloc instead of the kernel's default allocator. This
68070+ introduces guard pages that in combination with the alloca checking
68071+ of the STACKLEAK feature prevents all forms of kernel process stack
68072+ overflow abuse. Note that this is different from kernel stack
68073+ buffer overflows.
68074+
68075+config GRKERNSEC_BRUTE
68076+ bool "Deter exploit bruteforcing"
68077+ default y if GRKERNSEC_CONFIG_AUTO
68078+ help
68079+ If you say Y here, attempts to bruteforce exploits against forking
68080+ daemons such as apache or sshd, as well as against suid/sgid binaries
68081+ will be deterred. When a child of a forking daemon is killed by PaX
68082+ or crashes due to an illegal instruction or other suspicious signal,
68083+ the parent process will be delayed 30 seconds upon every subsequent
68084+ fork until the administrator is able to assess the situation and
68085+ restart the daemon.
68086+ In the suid/sgid case, the attempt is logged, the user has all their
68087+ existing instances of the suid/sgid binary terminated and will
68088+ be unable to execute any suid/sgid binaries for 15 minutes.
68089+
68090+ It is recommended that you also enable signal logging in the auditing
68091+ section so that logs are generated when a process triggers a suspicious
68092+ signal.
68093+ If the sysctl option is enabled, a sysctl option with name
68094+ "deter_bruteforce" is created.
68095+
68096+config GRKERNSEC_MODHARDEN
68097+ bool "Harden module auto-loading"
68098+ default y if GRKERNSEC_CONFIG_AUTO
68099+ depends on MODULES
68100+ help
68101+ If you say Y here, module auto-loading in response to use of some
68102+ feature implemented by an unloaded module will be restricted to
68103+ root users. Enabling this option helps defend against attacks
68104+ by unprivileged users who abuse the auto-loading behavior to
68105+ cause a vulnerable module to load that is then exploited.
68106+
68107+ If this option prevents a legitimate use of auto-loading for a
68108+ non-root user, the administrator can execute modprobe manually
68109+ with the exact name of the module mentioned in the alert log.
68110+ Alternatively, the administrator can add the module to the list
68111+ of modules loaded at boot by modifying init scripts.
68112+
68113+ Modification of init scripts will most likely be needed on
68114+ Ubuntu servers with encrypted home directory support enabled,
68115+ as the first non-root user logging in will cause the ecb(aes),
68116+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68117+
68118+config GRKERNSEC_HIDESYM
68119+ bool "Hide kernel symbols"
68120+ default y if GRKERNSEC_CONFIG_AUTO
68121+ select PAX_USERCOPY_SLABS
68122+ help
68123+ If you say Y here, getting information on loaded modules, and
68124+ displaying all kernel symbols through a syscall will be restricted
68125+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68126+ /proc/kallsyms will be restricted to the root user. The RBAC
68127+ system can hide that entry even from root.
68128+
68129+ This option also prevents leaking of kernel addresses through
68130+ several /proc entries.
68131+
68132+ Note that this option is only effective provided the following
68133+ conditions are met:
68134+ 1) The kernel using grsecurity is not precompiled by some distribution
68135+ 2) You have also enabled GRKERNSEC_DMESG
68136+ 3) You are using the RBAC system and hiding other files such as your
68137+ kernel image and System.map. Alternatively, enabling this option
68138+ causes the permissions on /boot, /lib/modules, and the kernel
68139+ source directory to change at compile time to prevent
68140+ reading by non-root users.
68141+ If the above conditions are met, this option will aid in providing a
68142+ useful protection against local kernel exploitation of overflows
68143+ and arbitrary read/write vulnerabilities.
68144+
68145+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68146+ in addition to this feature.
68147+
68148+config GRKERNSEC_RANDSTRUCT
68149+ bool "Randomize layout of sensitive kernel structures"
68150+ default y if GRKERNSEC_CONFIG_AUTO
68151+ select GRKERNSEC_HIDESYM
68152+ select MODVERSIONS if MODULES
68153+ help
68154+ If you say Y here, the layouts of a number of sensitive kernel
68155+ structures (task, fs, cred, etc) and all structures composed entirely
68156+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68157+ This can introduce the requirement of an additional infoleak
68158+ vulnerability for exploits targeting these structure types.
68159+
68160+ Enabling this feature will introduce some performance impact, slightly
68161+ increase memory usage, and prevent the use of forensic tools like
68162+ Volatility against the system (unless the kernel source tree isn't
68163+ cleaned after kernel installation).
68164+
68165+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68166+ It remains after a make clean to allow for external modules to be compiled
68167+ with the existing seed and will be removed by a make mrproper or
68168+ make distclean.
68169+
68170+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68171+ to install the supporting headers explicitly in addition to the normal
68172+ gcc package.
68173+
68174+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68175+ bool "Use cacheline-aware structure randomization"
68176+ depends on GRKERNSEC_RANDSTRUCT
68177+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68178+ help
68179+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68180+ at restricting randomization to cacheline-sized groups of elements. It
68181+ will further not randomize bitfields in structures. This reduces the
68182+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68183+
68184+config GRKERNSEC_KERN_LOCKOUT
68185+ bool "Active kernel exploit response"
68186+ default y if GRKERNSEC_CONFIG_AUTO
68187+ depends on X86 || ARM || PPC || SPARC
68188+ help
68189+ If you say Y here, when a PaX alert is triggered due to suspicious
68190+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68191+ or an OOPS occurs due to bad memory accesses, instead of just
68192+ terminating the offending process (and potentially allowing
68193+ a subsequent exploit from the same user), we will take one of two
68194+ actions:
68195+ If the user was root, we will panic the system
68196+ If the user was non-root, we will log the attempt, terminate
68197+ all processes owned by the user, then prevent them from creating
68198+ any new processes until the system is restarted
68199+ This deters repeated kernel exploitation/bruteforcing attempts
68200+ and is useful for later forensics.
68201+
68202+config GRKERNSEC_OLD_ARM_USERLAND
68203+ bool "Old ARM userland compatibility"
68204+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68205+ help
68206+ If you say Y here, stubs of executable code to perform such operations
68207+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68208+ table. This is unfortunately needed for old ARM userland meant to run
68209+ across a wide range of processors. Without this option enabled,
68210+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68211+ which is enough for Linaro userlands or other userlands designed for v6
68212+ and newer ARM CPUs. It's recommended that you try without this option enabled
68213+ first, and only enable it if your userland does not boot (it will likely fail
68214+ at init time).
68215+
68216+endmenu
68217+menu "Role Based Access Control Options"
68218+depends on GRKERNSEC
68219+
68220+config GRKERNSEC_RBAC_DEBUG
68221+ bool
68222+
68223+config GRKERNSEC_NO_RBAC
68224+ bool "Disable RBAC system"
68225+ help
68226+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68227+ preventing the RBAC system from being enabled. You should only say Y
68228+ here if you have no intention of using the RBAC system, so as to prevent
68229+ an attacker with root access from misusing the RBAC system to hide files
68230+ and processes when loadable module support and /dev/[k]mem have been
68231+ locked down.
68232+
68233+config GRKERNSEC_ACL_HIDEKERN
68234+ bool "Hide kernel processes"
68235+ help
68236+ If you say Y here, all kernel threads will be hidden to all
68237+ processes but those whose subject has the "view hidden processes"
68238+ flag.
68239+
68240+config GRKERNSEC_ACL_MAXTRIES
68241+ int "Maximum tries before password lockout"
68242+ default 3
68243+ help
68244+ This option enforces the maximum number of times a user can attempt
68245+ to authorize themselves with the grsecurity RBAC system before being
68246+ denied the ability to attempt authorization again for a specified time.
68247+ The lower the number, the harder it will be to brute-force a password.
68248+
68249+config GRKERNSEC_ACL_TIMEOUT
68250+ int "Time to wait after max password tries, in seconds"
68251+ default 30
68252+ help
68253+ This option specifies the time the user must wait after attempting to
68254+ authorize to the RBAC system with the maximum number of invalid
68255+ passwords. The higher the number, the harder it will be to brute-force
68256+ a password.
68257+
68258+endmenu
68259+menu "Filesystem Protections"
68260+depends on GRKERNSEC
68261+
68262+config GRKERNSEC_PROC
68263+ bool "Proc restrictions"
68264+ default y if GRKERNSEC_CONFIG_AUTO
68265+ help
68266+ If you say Y here, the permissions of the /proc filesystem
68267+ will be altered to enhance system security and privacy. You MUST
68268+ choose either a user only restriction or a user and group restriction.
68269+ Depending upon the option you choose, you can either restrict users to
68270+ see only the processes they themselves run, or choose a group that can
68271+ view all processes and files normally restricted to root if you choose
68272+ the "restrict to user only" option. NOTE: If you're running identd or
68273+ ntpd as a non-root user, you will have to run it as the group you
68274+ specify here.
68275+
68276+config GRKERNSEC_PROC_USER
68277+ bool "Restrict /proc to user only"
68278+ depends on GRKERNSEC_PROC
68279+ help
68280+ If you say Y here, non-root users will only be able to view their own
68281+ processes, and restricts them from viewing network-related information,
68282+ and viewing kernel symbol and module information.
68283+
68284+config GRKERNSEC_PROC_USERGROUP
68285+ bool "Allow special group"
68286+ default y if GRKERNSEC_CONFIG_AUTO
68287+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68288+ help
68289+ If you say Y here, you will be able to select a group that will be
68290+ able to view all processes and network-related information. If you've
68291+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68292+ remain hidden. This option is useful if you want to run identd as
68293+ a non-root user. The group you select may also be chosen at boot time
68294+ via "grsec_proc_gid=" on the kernel commandline.
68295+
68296+config GRKERNSEC_PROC_GID
68297+ int "GID for special group"
68298+ depends on GRKERNSEC_PROC_USERGROUP
68299+ default 1001
68300+
68301+config GRKERNSEC_PROC_ADD
68302+ bool "Additional restrictions"
68303+ default y if GRKERNSEC_CONFIG_AUTO
68304+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68305+ help
68306+ If you say Y here, additional restrictions will be placed on
68307+ /proc that keep normal users from viewing device information and
68308+ slabinfo information that could be useful for exploits.
68309+
68310+config GRKERNSEC_LINK
68311+ bool "Linking restrictions"
68312+ default y if GRKERNSEC_CONFIG_AUTO
68313+ help
68314+ If you say Y here, /tmp race exploits will be prevented, since users
68315+ will no longer be able to follow symlinks owned by other users in
68316+ world-writable +t directories (e.g. /tmp), unless the owner of the
68317+ symlink is the owner of the directory. users will also not be
68318+ able to hardlink to files they do not own. If the sysctl option is
68319+ enabled, a sysctl option with name "linking_restrictions" is created.
68320+
68321+config GRKERNSEC_SYMLINKOWN
68322+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68323+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68324+ help
68325+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68326+ that prevents it from being used as a security feature. As Apache
68327+ verifies the symlink by performing a stat() against the target of
68328+ the symlink before it is followed, an attacker can setup a symlink
68329+ to point to a same-owned file, then replace the symlink with one
68330+ that targets another user's file just after Apache "validates" the
68331+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68332+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68333+ will be in place for the group you specify. If the sysctl option
68334+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68335+ created.
68336+
68337+config GRKERNSEC_SYMLINKOWN_GID
68338+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68339+ depends on GRKERNSEC_SYMLINKOWN
68340+ default 1006
68341+ help
68342+ Setting this GID determines what group kernel-enforced
68343+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68344+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68345+
68346+config GRKERNSEC_FIFO
68347+ bool "FIFO restrictions"
68348+ default y if GRKERNSEC_CONFIG_AUTO
68349+ help
68350+ If you say Y here, users will not be able to write to FIFOs they don't
68351+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68352+ the FIFO is the same owner of the directory it's held in. If the sysctl
68353+ option is enabled, a sysctl option with name "fifo_restrictions" is
68354+ created.
68355+
68356+config GRKERNSEC_SYSFS_RESTRICT
68357+ bool "Sysfs/debugfs restriction"
68358+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68359+ depends on SYSFS
68360+ help
68361+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68362+ any filesystem normally mounted under it (e.g. debugfs) will be
68363+ mostly accessible only by root. These filesystems generally provide access
68364+ to hardware and debug information that isn't appropriate for unprivileged
68365+ users of the system. Sysfs and debugfs have also become a large source
68366+ of new vulnerabilities, ranging from infoleaks to local compromise.
68367+ There has been very little oversight with an eye toward security involved
68368+ in adding new exporters of information to these filesystems, so their
68369+ use is discouraged.
68370+ For reasons of compatibility, a few directories have been whitelisted
68371+ for access by non-root users:
68372+ /sys/fs/selinux
68373+ /sys/fs/fuse
68374+ /sys/devices/system/cpu
68375+
68376+config GRKERNSEC_ROFS
68377+ bool "Runtime read-only mount protection"
68378+ depends on SYSCTL
68379+ help
68380+ If you say Y here, a sysctl option with name "romount_protect" will
68381+ be created. By setting this option to 1 at runtime, filesystems
68382+ will be protected in the following ways:
68383+ * No new writable mounts will be allowed
68384+ * Existing read-only mounts won't be able to be remounted read/write
68385+ * Write operations will be denied on all block devices
68386+ This option acts independently of grsec_lock: once it is set to 1,
68387+ it cannot be turned off. Therefore, please be mindful of the resulting
68388+ behavior if this option is enabled in an init script on a read-only
68389+ filesystem.
68390+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68391+ and GRKERNSEC_IO should be enabled and module loading disabled via
68392+ config or at runtime.
68393+ This feature is mainly intended for secure embedded systems.
68394+
68395+
68396+config GRKERNSEC_DEVICE_SIDECHANNEL
68397+ bool "Eliminate stat/notify-based device sidechannels"
68398+ default y if GRKERNSEC_CONFIG_AUTO
68399+ help
68400+ If you say Y here, timing analyses on block or character
68401+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68402+ will be thwarted for unprivileged users. If a process without
68403+ CAP_MKNOD stats such a device, the last access and last modify times
68404+ will match the device's create time. No access or modify events
68405+ will be triggered through inotify/dnotify/fanotify for such devices.
68406+ This feature will prevent attacks that may at a minimum
68407+ allow an attacker to determine the administrator's password length.
68408+
68409+config GRKERNSEC_CHROOT
68410+ bool "Chroot jail restrictions"
68411+ default y if GRKERNSEC_CONFIG_AUTO
68412+ help
68413+ If you say Y here, you will be able to choose several options that will
68414+ make breaking out of a chrooted jail much more difficult. If you
68415+ encounter no software incompatibilities with the following options, it
68416+ is recommended that you enable each one.
68417+
68418+ Note that the chroot restrictions are not intended to apply to "chroots"
68419+ to directories that are simple bind mounts of the global root filesystem.
68420+ For several other reasons, a user shouldn't expect any significant
68421+ security by performing such a chroot.
68422+
68423+config GRKERNSEC_CHROOT_MOUNT
68424+ bool "Deny mounts"
68425+ default y if GRKERNSEC_CONFIG_AUTO
68426+ depends on GRKERNSEC_CHROOT
68427+ help
68428+ If you say Y here, processes inside a chroot will not be able to
68429+ mount or remount filesystems. If the sysctl option is enabled, a
68430+ sysctl option with name "chroot_deny_mount" is created.
68431+
68432+config GRKERNSEC_CHROOT_DOUBLE
68433+ bool "Deny double-chroots"
68434+ default y if GRKERNSEC_CONFIG_AUTO
68435+ depends on GRKERNSEC_CHROOT
68436+ help
68437+ If you say Y here, processes inside a chroot will not be able to chroot
68438+ again outside the chroot. This is a widely used method of breaking
68439+ out of a chroot jail and should not be allowed. If the sysctl
68440+ option is enabled, a sysctl option with name
68441+ "chroot_deny_chroot" is created.
68442+
68443+config GRKERNSEC_CHROOT_PIVOT
68444+ bool "Deny pivot_root in chroot"
68445+ default y if GRKERNSEC_CONFIG_AUTO
68446+ depends on GRKERNSEC_CHROOT
68447+ help
68448+ If you say Y here, processes inside a chroot will not be able to use
68449+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68450+ works similar to chroot in that it changes the root filesystem. This
68451+ function could be misused in a chrooted process to attempt to break out
68452+ of the chroot, and therefore should not be allowed. If the sysctl
68453+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68454+ created.
68455+
68456+config GRKERNSEC_CHROOT_CHDIR
68457+ bool "Enforce chdir(\"/\") on all chroots"
68458+ default y if GRKERNSEC_CONFIG_AUTO
68459+ depends on GRKERNSEC_CHROOT
68460+ help
68461+ If you say Y here, the current working directory of all newly-chrooted
68462+ applications will be set to the the root directory of the chroot.
68463+ The man page on chroot(2) states:
68464+ Note that this call does not change the current working
68465+ directory, so that `.' can be outside the tree rooted at
68466+ `/'. In particular, the super-user can escape from a
68467+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68468+
68469+ It is recommended that you say Y here, since it's not known to break
68470+ any software. If the sysctl option is enabled, a sysctl option with
68471+ name "chroot_enforce_chdir" is created.
68472+
68473+config GRKERNSEC_CHROOT_CHMOD
68474+ bool "Deny (f)chmod +s"
68475+ default y if GRKERNSEC_CONFIG_AUTO
68476+ depends on GRKERNSEC_CHROOT
68477+ help
68478+ If you say Y here, processes inside a chroot will not be able to chmod
68479+ or fchmod files to make them have suid or sgid bits. This protects
68480+ against another published method of breaking a chroot. If the sysctl
68481+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68482+ created.
68483+
68484+config GRKERNSEC_CHROOT_FCHDIR
68485+ bool "Deny fchdir and fhandle out of chroot"
68486+ default y if GRKERNSEC_CONFIG_AUTO
68487+ depends on GRKERNSEC_CHROOT
68488+ help
68489+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68490+ to a file descriptor of the chrooting process that points to a directory
68491+ outside the filesystem will be stopped. Additionally, this option prevents
68492+ use of the recently-created syscall for opening files by a guessable "file
68493+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68494+ with name "chroot_deny_fchdir" is created.
68495+
68496+config GRKERNSEC_CHROOT_MKNOD
68497+ bool "Deny mknod"
68498+ default y if GRKERNSEC_CONFIG_AUTO
68499+ depends on GRKERNSEC_CHROOT
68500+ help
68501+ If you say Y here, processes inside a chroot will not be allowed to
68502+ mknod. The problem with using mknod inside a chroot is that it
68503+ would allow an attacker to create a device entry that is the same
68504+ as one on the physical root of your system, which could range from
68505+ anything from the console device to a device for your harddrive (which
68506+ they could then use to wipe the drive or steal data). It is recommended
68507+ that you say Y here, unless you run into software incompatibilities.
68508+ If the sysctl option is enabled, a sysctl option with name
68509+ "chroot_deny_mknod" is created.
68510+
68511+config GRKERNSEC_CHROOT_SHMAT
68512+ bool "Deny shmat() out of chroot"
68513+ default y if GRKERNSEC_CONFIG_AUTO
68514+ depends on GRKERNSEC_CHROOT
68515+ help
68516+ If you say Y here, processes inside a chroot will not be able to attach
68517+ to shared memory segments that were created outside of the chroot jail.
68518+ It is recommended that you say Y here. If the sysctl option is enabled,
68519+ a sysctl option with name "chroot_deny_shmat" is created.
68520+
68521+config GRKERNSEC_CHROOT_UNIX
68522+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68523+ default y if GRKERNSEC_CONFIG_AUTO
68524+ depends on GRKERNSEC_CHROOT
68525+ help
68526+ If you say Y here, processes inside a chroot will not be able to
68527+ connect to abstract (meaning not belonging to a filesystem) Unix
68528+ domain sockets that were bound outside of a chroot. It is recommended
68529+ that you say Y here. If the sysctl option is enabled, a sysctl option
68530+ with name "chroot_deny_unix" is created.
68531+
68532+config GRKERNSEC_CHROOT_FINDTASK
68533+ bool "Protect outside processes"
68534+ default y if GRKERNSEC_CONFIG_AUTO
68535+ depends on GRKERNSEC_CHROOT
68536+ help
68537+ If you say Y here, processes inside a chroot will not be able to
68538+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68539+ getsid, or view any process outside of the chroot. If the sysctl
68540+ option is enabled, a sysctl option with name "chroot_findtask" is
68541+ created.
68542+
68543+config GRKERNSEC_CHROOT_NICE
68544+ bool "Restrict priority changes"
68545+ default y if GRKERNSEC_CONFIG_AUTO
68546+ depends on GRKERNSEC_CHROOT
68547+ help
68548+ If you say Y here, processes inside a chroot will not be able to raise
68549+ the priority of processes in the chroot, or alter the priority of
68550+ processes outside the chroot. This provides more security than simply
68551+ removing CAP_SYS_NICE from the process' capability set. If the
68552+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68553+ is created.
68554+
68555+config GRKERNSEC_CHROOT_SYSCTL
68556+ bool "Deny sysctl writes"
68557+ default y if GRKERNSEC_CONFIG_AUTO
68558+ depends on GRKERNSEC_CHROOT
68559+ help
68560+ If you say Y here, an attacker in a chroot will not be able to
68561+ write to sysctl entries, either by sysctl(2) or through a /proc
68562+ interface. It is strongly recommended that you say Y here. If the
68563+ sysctl option is enabled, a sysctl option with name
68564+ "chroot_deny_sysctl" is created.
68565+
68566+config GRKERNSEC_CHROOT_RENAME
68567+ bool "Deny bad renames"
68568+ default y if GRKERNSEC_CONFIG_AUTO
68569+ depends on GRKERNSEC_CHROOT
68570+ help
68571+ If you say Y here, an attacker in a chroot will not be able to
68572+ abuse the ability to create double chroots to break out of the
68573+ chroot by exploiting a race condition between a rename of a directory
68574+ within a chroot against an open of a symlink with relative path
68575+ components. This feature will likewise prevent an accomplice outside
68576+ a chroot from enabling a user inside the chroot to break out and make
68577+ use of their credentials on the global filesystem. Enabling this
68578+ feature is essential to prevent root users from breaking out of a
68579+ chroot. If the sysctl option is enabled, a sysctl option with name
68580+ "chroot_deny_bad_rename" is created.
68581+
68582+config GRKERNSEC_CHROOT_CAPS
68583+ bool "Capability restrictions"
68584+ default y if GRKERNSEC_CONFIG_AUTO
68585+ depends on GRKERNSEC_CHROOT
68586+ help
68587+ If you say Y here, the capabilities on all processes within a
68588+ chroot jail will be lowered to stop module insertion, raw i/o,
68589+ system and net admin tasks, rebooting the system, modifying immutable
68590+ files, modifying IPC owned by another, and changing the system time.
68591+ This is left an option because it can break some apps. Disable this
68592+ if your chrooted apps are having problems performing those kinds of
68593+ tasks. If the sysctl option is enabled, a sysctl option with
68594+ name "chroot_caps" is created.
68595+
68596+config GRKERNSEC_CHROOT_INITRD
68597+ bool "Exempt initrd tasks from restrictions"
68598+ default y if GRKERNSEC_CONFIG_AUTO
68599+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68600+ help
68601+ If you say Y here, tasks started prior to init will be exempted from
68602+ grsecurity's chroot restrictions. This option is mainly meant to
68603+ resolve Plymouth's performing privileged operations unnecessarily
68604+ in a chroot.
68605+
68606+endmenu
68607+menu "Kernel Auditing"
68608+depends on GRKERNSEC
68609+
68610+config GRKERNSEC_AUDIT_GROUP
68611+ bool "Single group for auditing"
68612+ help
68613+ If you say Y here, the exec and chdir logging features will only operate
68614+ on a group you specify. This option is recommended if you only want to
68615+ watch certain users instead of having a large amount of logs from the
68616+ entire system. If the sysctl option is enabled, a sysctl option with
68617+ name "audit_group" is created.
68618+
68619+config GRKERNSEC_AUDIT_GID
68620+ int "GID for auditing"
68621+ depends on GRKERNSEC_AUDIT_GROUP
68622+ default 1007
68623+
68624+config GRKERNSEC_EXECLOG
68625+ bool "Exec logging"
68626+ help
68627+ If you say Y here, all execve() calls will be logged (since the
68628+ other exec*() calls are frontends to execve(), all execution
68629+ will be logged). Useful for shell-servers that like to keep track
68630+ of their users. If the sysctl option is enabled, a sysctl option with
68631+ name "exec_logging" is created.
68632+ WARNING: This option when enabled will produce a LOT of logs, especially
68633+ on an active system.
68634+
68635+config GRKERNSEC_RESLOG
68636+ bool "Resource logging"
68637+ default y if GRKERNSEC_CONFIG_AUTO
68638+ help
68639+ If you say Y here, all attempts to overstep resource limits will
68640+ be logged with the resource name, the requested size, and the current
68641+ limit. It is highly recommended that you say Y here. If the sysctl
68642+ option is enabled, a sysctl option with name "resource_logging" is
68643+ created. If the RBAC system is enabled, the sysctl value is ignored.
68644+
68645+config GRKERNSEC_CHROOT_EXECLOG
68646+ bool "Log execs within chroot"
68647+ help
68648+ If you say Y here, all executions inside a chroot jail will be logged
68649+ to syslog. This can cause a large amount of logs if certain
68650+ applications (eg. djb's daemontools) are installed on the system, and
68651+ is therefore left as an option. If the sysctl option is enabled, a
68652+ sysctl option with name "chroot_execlog" is created.
68653+
68654+config GRKERNSEC_AUDIT_PTRACE
68655+ bool "Ptrace logging"
68656+ help
68657+ If you say Y here, all attempts to attach to a process via ptrace
68658+ will be logged. If the sysctl option is enabled, a sysctl option
68659+ with name "audit_ptrace" is created.
68660+
68661+config GRKERNSEC_AUDIT_CHDIR
68662+ bool "Chdir logging"
68663+ help
68664+ If you say Y here, all chdir() calls will be logged. If the sysctl
68665+ option is enabled, a sysctl option with name "audit_chdir" is created.
68666+
68667+config GRKERNSEC_AUDIT_MOUNT
68668+ bool "(Un)Mount logging"
68669+ help
68670+ If you say Y here, all mounts and unmounts will be logged. If the
68671+ sysctl option is enabled, a sysctl option with name "audit_mount" is
68672+ created.
68673+
68674+config GRKERNSEC_SIGNAL
68675+ bool "Signal logging"
68676+ default y if GRKERNSEC_CONFIG_AUTO
68677+ help
68678+ If you say Y here, certain important signals will be logged, such as
68679+ SIGSEGV, which will as a result inform you of when a error in a program
68680+ occurred, which in some cases could mean a possible exploit attempt.
68681+ If the sysctl option is enabled, a sysctl option with name
68682+ "signal_logging" is created.
68683+
68684+config GRKERNSEC_FORKFAIL
68685+ bool "Fork failure logging"
68686+ help
68687+ If you say Y here, all failed fork() attempts will be logged.
68688+ This could suggest a fork bomb, or someone attempting to overstep
68689+ their process limit. If the sysctl option is enabled, a sysctl option
68690+ with name "forkfail_logging" is created.
68691+
68692+config GRKERNSEC_TIME
68693+ bool "Time change logging"
68694+ default y if GRKERNSEC_CONFIG_AUTO
68695+ help
68696+ If you say Y here, any changes of the system clock will be logged.
68697+ If the sysctl option is enabled, a sysctl option with name
68698+ "timechange_logging" is created.
68699+
68700+config GRKERNSEC_PROC_IPADDR
68701+ bool "/proc/<pid>/ipaddr support"
68702+ default y if GRKERNSEC_CONFIG_AUTO
68703+ help
68704+ If you say Y here, a new entry will be added to each /proc/<pid>
68705+ directory that contains the IP address of the person using the task.
68706+ The IP is carried across local TCP and AF_UNIX stream sockets.
68707+ This information can be useful for IDS/IPSes to perform remote response
68708+ to a local attack. The entry is readable by only the owner of the
68709+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
68710+ the RBAC system), and thus does not create privacy concerns.
68711+
68712+config GRKERNSEC_RWXMAP_LOG
68713+ bool 'Denied RWX mmap/mprotect logging'
68714+ default y if GRKERNSEC_CONFIG_AUTO
68715+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
68716+ help
68717+ If you say Y here, calls to mmap() and mprotect() with explicit
68718+ usage of PROT_WRITE and PROT_EXEC together will be logged when
68719+ denied by the PAX_MPROTECT feature. This feature will also
68720+ log other problematic scenarios that can occur when PAX_MPROTECT
68721+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
68722+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
68723+ is created.
68724+
68725+endmenu
68726+
68727+menu "Executable Protections"
68728+depends on GRKERNSEC
68729+
68730+config GRKERNSEC_DMESG
68731+ bool "Dmesg(8) restriction"
68732+ default y if GRKERNSEC_CONFIG_AUTO
68733+ help
68734+ If you say Y here, non-root users will not be able to use dmesg(8)
68735+ to view the contents of the kernel's circular log buffer.
68736+ The kernel's log buffer often contains kernel addresses and other
68737+ identifying information useful to an attacker in fingerprinting a
68738+ system for a targeted exploit.
68739+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
68740+ created.
68741+
68742+config GRKERNSEC_HARDEN_PTRACE
68743+ bool "Deter ptrace-based process snooping"
68744+ default y if GRKERNSEC_CONFIG_AUTO
68745+ help
68746+ If you say Y here, TTY sniffers and other malicious monitoring
68747+ programs implemented through ptrace will be defeated. If you
68748+ have been using the RBAC system, this option has already been
68749+ enabled for several years for all users, with the ability to make
68750+ fine-grained exceptions.
68751+
68752+ This option only affects the ability of non-root users to ptrace
68753+ processes that are not a descendent of the ptracing process.
68754+ This means that strace ./binary and gdb ./binary will still work,
68755+ but attaching to arbitrary processes will not. If the sysctl
68756+ option is enabled, a sysctl option with name "harden_ptrace" is
68757+ created.
68758+
68759+config GRKERNSEC_PTRACE_READEXEC
68760+ bool "Require read access to ptrace sensitive binaries"
68761+ default y if GRKERNSEC_CONFIG_AUTO
68762+ help
68763+ If you say Y here, unprivileged users will not be able to ptrace unreadable
68764+ binaries. This option is useful in environments that
68765+ remove the read bits (e.g. file mode 4711) from suid binaries to
68766+ prevent infoleaking of their contents. This option adds
68767+ consistency to the use of that file mode, as the binary could normally
68768+ be read out when run without privileges while ptracing.
68769+
68770+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
68771+ is created.
68772+
68773+config GRKERNSEC_SETXID
68774+ bool "Enforce consistent multithreaded privileges"
68775+ default y if GRKERNSEC_CONFIG_AUTO
68776+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
68777+ help
68778+ If you say Y here, a change from a root uid to a non-root uid
68779+ in a multithreaded application will cause the resulting uids,
68780+ gids, supplementary groups, and capabilities in that thread
68781+ to be propagated to the other threads of the process. In most
68782+ cases this is unnecessary, as glibc will emulate this behavior
68783+ on behalf of the application. Other libcs do not act in the
68784+ same way, allowing the other threads of the process to continue
68785+ running with root privileges. If the sysctl option is enabled,
68786+ a sysctl option with name "consistent_setxid" is created.
68787+
68788+config GRKERNSEC_HARDEN_IPC
68789+ bool "Disallow access to overly-permissive IPC objects"
68790+ default y if GRKERNSEC_CONFIG_AUTO
68791+ depends on SYSVIPC
68792+ help
68793+ If you say Y here, access to overly-permissive IPC objects (shared
68794+ memory, message queues, and semaphores) will be denied for processes
68795+ given the following criteria beyond normal permission checks:
68796+ 1) If the IPC object is world-accessible and the euid doesn't match
68797+ that of the creator or current uid for the IPC object
68798+ 2) If the IPC object is group-accessible and the egid doesn't
68799+ match that of the creator or current gid for the IPC object
68800+ It's a common error to grant too much permission to these objects,
68801+ with impact ranging from denial of service and information leaking to
68802+ privilege escalation. This feature was developed in response to
68803+ research by Tim Brown:
68804+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
68805+ who found hundreds of such insecure usages. Processes with
68806+ CAP_IPC_OWNER are still permitted to access these IPC objects.
68807+ If the sysctl option is enabled, a sysctl option with name
68808+ "harden_ipc" is created.
68809+
68810+config GRKERNSEC_TPE
68811+ bool "Trusted Path Execution (TPE)"
68812+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68813+ help
68814+ If you say Y here, you will be able to choose a gid to add to the
68815+ supplementary groups of users you want to mark as "untrusted."
68816+ These users will not be able to execute any files that are not in
68817+ root-owned directories writable only by root. If the sysctl option
68818+ is enabled, a sysctl option with name "tpe" is created.
68819+
68820+config GRKERNSEC_TPE_ALL
68821+ bool "Partially restrict all non-root users"
68822+ depends on GRKERNSEC_TPE
68823+ help
68824+ If you say Y here, all non-root users will be covered under
68825+ a weaker TPE restriction. This is separate from, and in addition to,
68826+ the main TPE options that you have selected elsewhere. Thus, if a
68827+ "trusted" GID is chosen, this restriction applies to even that GID.
68828+ Under this restriction, all non-root users will only be allowed to
68829+ execute files in directories they own that are not group or
68830+ world-writable, or in directories owned by root and writable only by
68831+ root. If the sysctl option is enabled, a sysctl option with name
68832+ "tpe_restrict_all" is created.
68833+
68834+config GRKERNSEC_TPE_INVERT
68835+ bool "Invert GID option"
68836+ depends on GRKERNSEC_TPE
68837+ help
68838+ If you say Y here, the group you specify in the TPE configuration will
68839+ decide what group TPE restrictions will be *disabled* for. This
68840+ option is useful if you want TPE restrictions to be applied to most
68841+ users on the system. If the sysctl option is enabled, a sysctl option
68842+ with name "tpe_invert" is created. Unlike other sysctl options, this
68843+ entry will default to on for backward-compatibility.
68844+
68845+config GRKERNSEC_TPE_GID
68846+ int
68847+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
68848+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
68849+
68850+config GRKERNSEC_TPE_UNTRUSTED_GID
68851+ int "GID for TPE-untrusted users"
68852+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
68853+ default 1005
68854+ help
68855+ Setting this GID determines what group TPE restrictions will be
68856+ *enabled* for. If the sysctl option is enabled, a sysctl option
68857+ with name "tpe_gid" is created.
68858+
68859+config GRKERNSEC_TPE_TRUSTED_GID
68860+ int "GID for TPE-trusted users"
68861+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
68862+ default 1005
68863+ help
68864+ Setting this GID determines what group TPE restrictions will be
68865+ *disabled* for. If the sysctl option is enabled, a sysctl option
68866+ with name "tpe_gid" is created.
68867+
68868+endmenu
68869+menu "Network Protections"
68870+depends on GRKERNSEC
68871+
68872+config GRKERNSEC_BLACKHOLE
68873+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
68874+ default y if GRKERNSEC_CONFIG_AUTO
68875+ depends on NET
68876+ help
68877+ If you say Y here, neither TCP resets nor ICMP
68878+ destination-unreachable packets will be sent in response to packets
68879+ sent to ports for which no associated listening process exists.
68880+ It will also prevent the sending of ICMP protocol unreachable packets
68881+ in response to packets with unknown protocols.
68882+ This feature supports both IPV4 and IPV6 and exempts the
68883+ loopback interface from blackholing. Enabling this feature
68884+ makes a host more resilient to DoS attacks and reduces network
68885+ visibility against scanners.
68886+
68887+ The blackhole feature as-implemented is equivalent to the FreeBSD
68888+ blackhole feature, as it prevents RST responses to all packets, not
68889+ just SYNs. Under most application behavior this causes no
68890+ problems, but applications (like haproxy) may not close certain
68891+ connections in a way that cleanly terminates them on the remote
68892+ end, leaving the remote host in LAST_ACK state. Because of this
68893+ side-effect and to prevent intentional LAST_ACK DoSes, this
68894+ feature also adds automatic mitigation against such attacks.
68895+ The mitigation drastically reduces the amount of time a socket
68896+ can spend in LAST_ACK state. If you're using haproxy and not
68897+ all servers it connects to have this option enabled, consider
68898+ disabling this feature on the haproxy host.
68899+
68900+ If the sysctl option is enabled, two sysctl options with names
68901+ "ip_blackhole" and "lastack_retries" will be created.
68902+ While "ip_blackhole" takes the standard zero/non-zero on/off
68903+ toggle, "lastack_retries" uses the same kinds of values as
68904+ "tcp_retries1" and "tcp_retries2". The default value of 4
68905+ prevents a socket from lasting more than 45 seconds in LAST_ACK
68906+ state.
68907+
68908+config GRKERNSEC_NO_SIMULT_CONNECT
68909+ bool "Disable TCP Simultaneous Connect"
68910+ default y if GRKERNSEC_CONFIG_AUTO
68911+ depends on NET
68912+ help
68913+ If you say Y here, a feature by Willy Tarreau will be enabled that
68914+ removes a weakness in Linux's strict implementation of TCP that
68915+ allows two clients to connect to each other without either entering
68916+ a listening state. The weakness allows an attacker to easily prevent
68917+ a client from connecting to a known server provided the source port
68918+ for the connection is guessed correctly.
68919+
68920+ As the weakness could be used to prevent an antivirus or IPS from
68921+ fetching updates, or prevent an SSL gateway from fetching a CRL,
68922+ it should be eliminated by enabling this option. Though Linux is
68923+ one of few operating systems supporting simultaneous connect, it
68924+ has no legitimate use in practice and is rarely supported by firewalls.
68925+
68926+config GRKERNSEC_SOCKET
68927+ bool "Socket restrictions"
68928+ depends on NET
68929+ help
68930+ If you say Y here, you will be able to choose from several options.
68931+ If you assign a GID on your system and add it to the supplementary
68932+ groups of users you want to restrict socket access to, this patch
68933+ will perform up to three things, based on the option(s) you choose.
68934+
68935+config GRKERNSEC_SOCKET_ALL
68936+ bool "Deny any sockets to group"
68937+ depends on GRKERNSEC_SOCKET
68938+ help
68939+ If you say Y here, you will be able to choose a GID of whose users will
68940+ be unable to connect to other hosts from your machine or run server
68941+ applications from your machine. If the sysctl option is enabled, a
68942+ sysctl option with name "socket_all" is created.
68943+
68944+config GRKERNSEC_SOCKET_ALL_GID
68945+ int "GID to deny all sockets for"
68946+ depends on GRKERNSEC_SOCKET_ALL
68947+ default 1004
68948+ help
68949+ Here you can choose the GID to disable socket access for. Remember to
68950+ add the users you want socket access disabled for to the GID
68951+ specified here. If the sysctl option is enabled, a sysctl option
68952+ with name "socket_all_gid" is created.
68953+
68954+config GRKERNSEC_SOCKET_CLIENT
68955+ bool "Deny client sockets to group"
68956+ depends on GRKERNSEC_SOCKET
68957+ help
68958+ If you say Y here, you will be able to choose a GID of whose users will
68959+ be unable to connect to other hosts from your machine, but will be
68960+ able to run servers. If this option is enabled, all users in the group
68961+ you specify will have to use passive mode when initiating ftp transfers
68962+ from the shell on your machine. If the sysctl option is enabled, a
68963+ sysctl option with name "socket_client" is created.
68964+
68965+config GRKERNSEC_SOCKET_CLIENT_GID
68966+ int "GID to deny client sockets for"
68967+ depends on GRKERNSEC_SOCKET_CLIENT
68968+ default 1003
68969+ help
68970+ Here you can choose the GID to disable client socket access for.
68971+ Remember to add the users you want client socket access disabled for to
68972+ the GID specified here. If the sysctl option is enabled, a sysctl
68973+ option with name "socket_client_gid" is created.
68974+
68975+config GRKERNSEC_SOCKET_SERVER
68976+ bool "Deny server sockets to group"
68977+ depends on GRKERNSEC_SOCKET
68978+ help
68979+ If you say Y here, you will be able to choose a GID of whose users will
68980+ be unable to run server applications from your machine. If the sysctl
68981+ option is enabled, a sysctl option with name "socket_server" is created.
68982+
68983+config GRKERNSEC_SOCKET_SERVER_GID
68984+ int "GID to deny server sockets for"
68985+ depends on GRKERNSEC_SOCKET_SERVER
68986+ default 1002
68987+ help
68988+ Here you can choose the GID to disable server socket access for.
68989+ Remember to add the users you want server socket access disabled for to
68990+ the GID specified here. If the sysctl option is enabled, a sysctl
68991+ option with name "socket_server_gid" is created.
68992+
68993+endmenu
68994+
68995+menu "Physical Protections"
68996+depends on GRKERNSEC
68997+
68998+config GRKERNSEC_DENYUSB
68999+ bool "Deny new USB connections after toggle"
69000+ default y if GRKERNSEC_CONFIG_AUTO
69001+ depends on SYSCTL && USB_SUPPORT
69002+ help
69003+ If you say Y here, a new sysctl option with name "deny_new_usb"
69004+ will be created. Setting its value to 1 will prevent any new
69005+ USB devices from being recognized by the OS. Any attempted USB
69006+ device insertion will be logged. This option is intended to be
69007+ used against custom USB devices designed to exploit vulnerabilities
69008+ in various USB device drivers.
69009+
69010+ For greatest effectiveness, this sysctl should be set after any
69011+ relevant init scripts. This option is safe to enable in distros
69012+ as each user can choose whether or not to toggle the sysctl.
69013+
69014+config GRKERNSEC_DENYUSB_FORCE
69015+ bool "Reject all USB devices not connected at boot"
69016+ select USB
69017+ depends on GRKERNSEC_DENYUSB
69018+ help
69019+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69020+ that doesn't involve a sysctl entry. This option should only be
69021+ enabled if you're sure you want to deny all new USB connections
69022+ at runtime and don't want to modify init scripts. This should not
69023+ be enabled by distros. It forces the core USB code to be built
69024+ into the kernel image so that all devices connected at boot time
69025+ can be recognized and new USB device connections can be prevented
69026+ prior to init running.
69027+
69028+endmenu
69029+
69030+menu "Sysctl Support"
69031+depends on GRKERNSEC && SYSCTL
69032+
69033+config GRKERNSEC_SYSCTL
69034+ bool "Sysctl support"
69035+ default y if GRKERNSEC_CONFIG_AUTO
69036+ help
69037+ If you say Y here, you will be able to change the options that
69038+ grsecurity runs with at bootup, without having to recompile your
69039+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69040+ to enable (1) or disable (0) various features. All the sysctl entries
69041+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69042+ All features enabled in the kernel configuration are disabled at boot
69043+ if you do not say Y to the "Turn on features by default" option.
69044+ All options should be set at startup, and the grsec_lock entry should
69045+ be set to a non-zero value after all the options are set.
69046+ *THIS IS EXTREMELY IMPORTANT*
69047+
69048+config GRKERNSEC_SYSCTL_DISTRO
69049+ bool "Extra sysctl support for distro makers (READ HELP)"
69050+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69051+ help
69052+ If you say Y here, additional sysctl options will be created
69053+ for features that affect processes running as root. Therefore,
69054+ it is critical when using this option that the grsec_lock entry be
69055+ enabled after boot. Only distros with prebuilt kernel packages
69056+ with this option enabled that can ensure grsec_lock is enabled
69057+ after boot should use this option.
69058+ *Failure to set grsec_lock after boot makes all grsec features
69059+ this option covers useless*
69060+
69061+ Currently this option creates the following sysctl entries:
69062+ "Disable Privileged I/O": "disable_priv_io"
69063+
69064+config GRKERNSEC_SYSCTL_ON
69065+ bool "Turn on features by default"
69066+ default y if GRKERNSEC_CONFIG_AUTO
69067+ depends on GRKERNSEC_SYSCTL
69068+ help
69069+ If you say Y here, instead of having all features enabled in the
69070+ kernel configuration disabled at boot time, the features will be
69071+ enabled at boot time. It is recommended you say Y here unless
69072+ there is some reason you would want all sysctl-tunable features to
69073+ be disabled by default. As mentioned elsewhere, it is important
69074+ to enable the grsec_lock entry once you have finished modifying
69075+ the sysctl entries.
69076+
69077+endmenu
69078+menu "Logging Options"
69079+depends on GRKERNSEC
69080+
69081+config GRKERNSEC_FLOODTIME
69082+ int "Seconds in between log messages (minimum)"
69083+ default 10
69084+ help
69085+ This option allows you to enforce the number of seconds between
69086+ grsecurity log messages. The default should be suitable for most
69087+ people, however, if you choose to change it, choose a value small enough
69088+ to allow informative logs to be produced, but large enough to
69089+ prevent flooding.
69090+
69091+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69092+ any rate limiting on grsecurity log messages.
69093+
69094+config GRKERNSEC_FLOODBURST
69095+ int "Number of messages in a burst (maximum)"
69096+ default 6
69097+ help
69098+ This option allows you to choose the maximum number of messages allowed
69099+ within the flood time interval you chose in a separate option. The
69100+ default should be suitable for most people, however if you find that
69101+ many of your logs are being interpreted as flooding, you may want to
69102+ raise this value.
69103+
69104+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69105+ any rate limiting on grsecurity log messages.
69106+
69107+endmenu
69108diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69109new file mode 100644
69110index 0000000..30ababb
69111--- /dev/null
69112+++ b/grsecurity/Makefile
69113@@ -0,0 +1,54 @@
69114+# grsecurity – access control and security hardening for Linux
69115+# All code in this directory and various hooks located throughout the Linux kernel are
69116+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69117+# http://www.grsecurity.net spender@grsecurity.net
69118+#
69119+# This program is free software; you can redistribute it and/or
69120+# modify it under the terms of the GNU General Public License version 2
69121+# as published by the Free Software Foundation.
69122+#
69123+# This program is distributed in the hope that it will be useful,
69124+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69125+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69126+# GNU General Public License for more details.
69127+#
69128+# You should have received a copy of the GNU General Public License
69129+# along with this program; if not, write to the Free Software
69130+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69131+
69132+KBUILD_CFLAGS += -Werror
69133+
69134+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69135+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69136+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69137+ grsec_usb.o grsec_ipc.o grsec_proc.o
69138+
69139+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69140+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69141+ gracl_learn.o grsec_log.o gracl_policy.o
69142+ifdef CONFIG_COMPAT
69143+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69144+endif
69145+
69146+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69147+
69148+ifdef CONFIG_NET
69149+obj-y += grsec_sock.o
69150+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69151+endif
69152+
69153+ifndef CONFIG_GRKERNSEC
69154+obj-y += grsec_disabled.o
69155+endif
69156+
69157+ifdef CONFIG_GRKERNSEC_HIDESYM
69158+extra-y := grsec_hidesym.o
69159+$(obj)/grsec_hidesym.o:
69160+ @-chmod -f 500 /boot
69161+ @-chmod -f 500 /lib/modules
69162+ @-chmod -f 500 /lib64/modules
69163+ @-chmod -f 500 /lib32/modules
69164+ @-chmod -f 700 .
69165+ @-chmod -f 700 $(objtree)
69166+ @echo ' grsec: protected kernel image paths'
69167+endif
69168diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69169new file mode 100644
69170index 0000000..6c1e154
69171--- /dev/null
69172+++ b/grsecurity/gracl.c
69173@@ -0,0 +1,2749 @@
69174+#include <linux/kernel.h>
69175+#include <linux/module.h>
69176+#include <linux/sched.h>
69177+#include <linux/mm.h>
69178+#include <linux/file.h>
69179+#include <linux/fs.h>
69180+#include <linux/namei.h>
69181+#include <linux/mount.h>
69182+#include <linux/tty.h>
69183+#include <linux/proc_fs.h>
69184+#include <linux/lglock.h>
69185+#include <linux/slab.h>
69186+#include <linux/vmalloc.h>
69187+#include <linux/types.h>
69188+#include <linux/sysctl.h>
69189+#include <linux/netdevice.h>
69190+#include <linux/ptrace.h>
69191+#include <linux/gracl.h>
69192+#include <linux/gralloc.h>
69193+#include <linux/security.h>
69194+#include <linux/grinternal.h>
69195+#include <linux/pid_namespace.h>
69196+#include <linux/stop_machine.h>
69197+#include <linux/fdtable.h>
69198+#include <linux/percpu.h>
69199+#include <linux/lglock.h>
69200+#include <linux/hugetlb.h>
69201+#include <linux/posix-timers.h>
69202+#include <linux/prefetch.h>
69203+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69204+#include <linux/magic.h>
69205+#include <linux/pagemap.h>
69206+#include "../fs/btrfs/async-thread.h"
69207+#include "../fs/btrfs/ctree.h"
69208+#include "../fs/btrfs/btrfs_inode.h"
69209+#endif
69210+#include "../fs/mount.h"
69211+
69212+#include <asm/uaccess.h>
69213+#include <asm/errno.h>
69214+#include <asm/mman.h>
69215+
69216+#define FOR_EACH_ROLE_START(role) \
69217+ role = running_polstate.role_list; \
69218+ while (role) {
69219+
69220+#define FOR_EACH_ROLE_END(role) \
69221+ role = role->prev; \
69222+ }
69223+
69224+extern struct path gr_real_root;
69225+
69226+static struct gr_policy_state running_polstate;
69227+struct gr_policy_state *polstate = &running_polstate;
69228+extern struct gr_alloc_state *current_alloc_state;
69229+
69230+extern char *gr_shared_page[4];
69231+DEFINE_RWLOCK(gr_inode_lock);
69232+
69233+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69234+
69235+#ifdef CONFIG_NET
69236+extern struct vfsmount *sock_mnt;
69237+#endif
69238+
69239+extern struct vfsmount *pipe_mnt;
69240+extern struct vfsmount *shm_mnt;
69241+
69242+#ifdef CONFIG_HUGETLBFS
69243+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69244+#endif
69245+
69246+extern u16 acl_sp_role_value;
69247+extern struct acl_object_label *fakefs_obj_rw;
69248+extern struct acl_object_label *fakefs_obj_rwx;
69249+
69250+int gr_acl_is_enabled(void)
69251+{
69252+ return (gr_status & GR_READY);
69253+}
69254+
69255+void gr_enable_rbac_system(void)
69256+{
69257+ pax_open_kernel();
69258+ gr_status |= GR_READY;
69259+ pax_close_kernel();
69260+}
69261+
69262+int gr_rbac_disable(void *unused)
69263+{
69264+ pax_open_kernel();
69265+ gr_status &= ~GR_READY;
69266+ pax_close_kernel();
69267+
69268+ return 0;
69269+}
69270+
69271+static inline dev_t __get_dev(const struct dentry *dentry)
69272+{
69273+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69274+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69275+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69276+ else
69277+#endif
69278+ return dentry->d_sb->s_dev;
69279+}
69280+
69281+static inline u64 __get_ino(const struct dentry *dentry)
69282+{
69283+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69284+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69285+ return btrfs_ino(dentry->d_inode);
69286+ else
69287+#endif
69288+ return dentry->d_inode->i_ino;
69289+}
69290+
69291+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69292+{
69293+ return __get_dev(dentry);
69294+}
69295+
69296+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69297+{
69298+ return __get_ino(dentry);
69299+}
69300+
69301+static char gr_task_roletype_to_char(struct task_struct *task)
69302+{
69303+ switch (task->role->roletype &
69304+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69305+ GR_ROLE_SPECIAL)) {
69306+ case GR_ROLE_DEFAULT:
69307+ return 'D';
69308+ case GR_ROLE_USER:
69309+ return 'U';
69310+ case GR_ROLE_GROUP:
69311+ return 'G';
69312+ case GR_ROLE_SPECIAL:
69313+ return 'S';
69314+ }
69315+
69316+ return 'X';
69317+}
69318+
69319+char gr_roletype_to_char(void)
69320+{
69321+ return gr_task_roletype_to_char(current);
69322+}
69323+
69324+__inline__ int
69325+gr_acl_tpe_check(void)
69326+{
69327+ if (unlikely(!(gr_status & GR_READY)))
69328+ return 0;
69329+ if (current->role->roletype & GR_ROLE_TPE)
69330+ return 1;
69331+ else
69332+ return 0;
69333+}
69334+
69335+int
69336+gr_handle_rawio(const struct inode *inode)
69337+{
69338+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69339+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69340+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69341+ !capable(CAP_SYS_RAWIO))
69342+ return 1;
69343+#endif
69344+ return 0;
69345+}
69346+
69347+int
69348+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69349+{
69350+ if (likely(lena != lenb))
69351+ return 0;
69352+
69353+ return !memcmp(a, b, lena);
69354+}
69355+
69356+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69357+{
69358+ *buflen -= namelen;
69359+ if (*buflen < 0)
69360+ return -ENAMETOOLONG;
69361+ *buffer -= namelen;
69362+ memcpy(*buffer, str, namelen);
69363+ return 0;
69364+}
69365+
69366+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69367+{
69368+ return prepend(buffer, buflen, name->name, name->len);
69369+}
69370+
69371+static int prepend_path(const struct path *path, struct path *root,
69372+ char **buffer, int *buflen)
69373+{
69374+ struct dentry *dentry = path->dentry;
69375+ struct vfsmount *vfsmnt = path->mnt;
69376+ struct mount *mnt = real_mount(vfsmnt);
69377+ bool slash = false;
69378+ int error = 0;
69379+
69380+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69381+ struct dentry * parent;
69382+
69383+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69384+ /* Global root? */
69385+ if (!mnt_has_parent(mnt)) {
69386+ goto out;
69387+ }
69388+ dentry = mnt->mnt_mountpoint;
69389+ mnt = mnt->mnt_parent;
69390+ vfsmnt = &mnt->mnt;
69391+ continue;
69392+ }
69393+ parent = dentry->d_parent;
69394+ prefetch(parent);
69395+ spin_lock(&dentry->d_lock);
69396+ error = prepend_name(buffer, buflen, &dentry->d_name);
69397+ spin_unlock(&dentry->d_lock);
69398+ if (!error)
69399+ error = prepend(buffer, buflen, "/", 1);
69400+ if (error)
69401+ break;
69402+
69403+ slash = true;
69404+ dentry = parent;
69405+ }
69406+
69407+out:
69408+ if (!error && !slash)
69409+ error = prepend(buffer, buflen, "/", 1);
69410+
69411+ return error;
69412+}
69413+
69414+/* this must be called with mount_lock and rename_lock held */
69415+
69416+static char *__our_d_path(const struct path *path, struct path *root,
69417+ char *buf, int buflen)
69418+{
69419+ char *res = buf + buflen;
69420+ int error;
69421+
69422+ prepend(&res, &buflen, "\0", 1);
69423+ error = prepend_path(path, root, &res, &buflen);
69424+ if (error)
69425+ return ERR_PTR(error);
69426+
69427+ return res;
69428+}
69429+
69430+static char *
69431+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69432+{
69433+ char *retval;
69434+
69435+ retval = __our_d_path(path, root, buf, buflen);
69436+ if (unlikely(IS_ERR(retval)))
69437+ retval = strcpy(buf, "<path too long>");
69438+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69439+ retval[1] = '\0';
69440+
69441+ return retval;
69442+}
69443+
69444+static char *
69445+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69446+ char *buf, int buflen)
69447+{
69448+ struct path path;
69449+ char *res;
69450+
69451+ path.dentry = (struct dentry *)dentry;
69452+ path.mnt = (struct vfsmount *)vfsmnt;
69453+
69454+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69455+ by the RBAC system */
69456+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69457+
69458+ return res;
69459+}
69460+
69461+static char *
69462+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69463+ char *buf, int buflen)
69464+{
69465+ char *res;
69466+ struct path path;
69467+ struct path root;
69468+ struct task_struct *reaper = init_pid_ns.child_reaper;
69469+
69470+ path.dentry = (struct dentry *)dentry;
69471+ path.mnt = (struct vfsmount *)vfsmnt;
69472+
69473+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69474+ get_fs_root(reaper->fs, &root);
69475+
69476+ read_seqlock_excl(&mount_lock);
69477+ write_seqlock(&rename_lock);
69478+ res = gen_full_path(&path, &root, buf, buflen);
69479+ write_sequnlock(&rename_lock);
69480+ read_sequnlock_excl(&mount_lock);
69481+
69482+ path_put(&root);
69483+ return res;
69484+}
69485+
69486+char *
69487+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69488+{
69489+ char *ret;
69490+ read_seqlock_excl(&mount_lock);
69491+ write_seqlock(&rename_lock);
69492+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69493+ PAGE_SIZE);
69494+ write_sequnlock(&rename_lock);
69495+ read_sequnlock_excl(&mount_lock);
69496+ return ret;
69497+}
69498+
69499+static char *
69500+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69501+{
69502+ char *ret;
69503+ char *buf;
69504+ int buflen;
69505+
69506+ read_seqlock_excl(&mount_lock);
69507+ write_seqlock(&rename_lock);
69508+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69509+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69510+ buflen = (int)(ret - buf);
69511+ if (buflen >= 5)
69512+ prepend(&ret, &buflen, "/proc", 5);
69513+ else
69514+ ret = strcpy(buf, "<path too long>");
69515+ write_sequnlock(&rename_lock);
69516+ read_sequnlock_excl(&mount_lock);
69517+ return ret;
69518+}
69519+
69520+char *
69521+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69522+{
69523+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69524+ PAGE_SIZE);
69525+}
69526+
69527+char *
69528+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69529+{
69530+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69531+ PAGE_SIZE);
69532+}
69533+
69534+char *
69535+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69536+{
69537+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69538+ PAGE_SIZE);
69539+}
69540+
69541+char *
69542+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69543+{
69544+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69545+ PAGE_SIZE);
69546+}
69547+
69548+char *
69549+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69550+{
69551+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69552+ PAGE_SIZE);
69553+}
69554+
69555+__inline__ __u32
69556+to_gr_audit(const __u32 reqmode)
69557+{
69558+ /* masks off auditable permission flags, then shifts them to create
69559+ auditing flags, and adds the special case of append auditing if
69560+ we're requesting write */
69561+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69562+}
69563+
69564+struct acl_role_label *
69565+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69566+ const gid_t gid)
69567+{
69568+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69569+ struct acl_role_label *match;
69570+ struct role_allowed_ip *ipp;
69571+ unsigned int x;
69572+ u32 curr_ip = task->signal->saved_ip;
69573+
69574+ match = state->acl_role_set.r_hash[index];
69575+
69576+ while (match) {
69577+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69578+ for (x = 0; x < match->domain_child_num; x++) {
69579+ if (match->domain_children[x] == uid)
69580+ goto found;
69581+ }
69582+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69583+ break;
69584+ match = match->next;
69585+ }
69586+found:
69587+ if (match == NULL) {
69588+ try_group:
69589+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69590+ match = state->acl_role_set.r_hash[index];
69591+
69592+ while (match) {
69593+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69594+ for (x = 0; x < match->domain_child_num; x++) {
69595+ if (match->domain_children[x] == gid)
69596+ goto found2;
69597+ }
69598+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69599+ break;
69600+ match = match->next;
69601+ }
69602+found2:
69603+ if (match == NULL)
69604+ match = state->default_role;
69605+ if (match->allowed_ips == NULL)
69606+ return match;
69607+ else {
69608+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69609+ if (likely
69610+ ((ntohl(curr_ip) & ipp->netmask) ==
69611+ (ntohl(ipp->addr) & ipp->netmask)))
69612+ return match;
69613+ }
69614+ match = state->default_role;
69615+ }
69616+ } else if (match->allowed_ips == NULL) {
69617+ return match;
69618+ } else {
69619+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69620+ if (likely
69621+ ((ntohl(curr_ip) & ipp->netmask) ==
69622+ (ntohl(ipp->addr) & ipp->netmask)))
69623+ return match;
69624+ }
69625+ goto try_group;
69626+ }
69627+
69628+ return match;
69629+}
69630+
69631+static struct acl_role_label *
69632+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69633+ const gid_t gid)
69634+{
69635+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69636+}
69637+
69638+struct acl_subject_label *
69639+lookup_acl_subj_label(const u64 ino, const dev_t dev,
69640+ const struct acl_role_label *role)
69641+{
69642+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69643+ struct acl_subject_label *match;
69644+
69645+ match = role->subj_hash[index];
69646+
69647+ while (match && (match->inode != ino || match->device != dev ||
69648+ (match->mode & GR_DELETED))) {
69649+ match = match->next;
69650+ }
69651+
69652+ if (match && !(match->mode & GR_DELETED))
69653+ return match;
69654+ else
69655+ return NULL;
69656+}
69657+
69658+struct acl_subject_label *
69659+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
69660+ const struct acl_role_label *role)
69661+{
69662+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69663+ struct acl_subject_label *match;
69664+
69665+ match = role->subj_hash[index];
69666+
69667+ while (match && (match->inode != ino || match->device != dev ||
69668+ !(match->mode & GR_DELETED))) {
69669+ match = match->next;
69670+ }
69671+
69672+ if (match && (match->mode & GR_DELETED))
69673+ return match;
69674+ else
69675+ return NULL;
69676+}
69677+
69678+static struct acl_object_label *
69679+lookup_acl_obj_label(const u64 ino, const dev_t dev,
69680+ const struct acl_subject_label *subj)
69681+{
69682+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69683+ struct acl_object_label *match;
69684+
69685+ match = subj->obj_hash[index];
69686+
69687+ while (match && (match->inode != ino || match->device != dev ||
69688+ (match->mode & GR_DELETED))) {
69689+ match = match->next;
69690+ }
69691+
69692+ if (match && !(match->mode & GR_DELETED))
69693+ return match;
69694+ else
69695+ return NULL;
69696+}
69697+
69698+static struct acl_object_label *
69699+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
69700+ const struct acl_subject_label *subj)
69701+{
69702+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69703+ struct acl_object_label *match;
69704+
69705+ match = subj->obj_hash[index];
69706+
69707+ while (match && (match->inode != ino || match->device != dev ||
69708+ !(match->mode & GR_DELETED))) {
69709+ match = match->next;
69710+ }
69711+
69712+ if (match && (match->mode & GR_DELETED))
69713+ return match;
69714+
69715+ match = subj->obj_hash[index];
69716+
69717+ while (match && (match->inode != ino || match->device != dev ||
69718+ (match->mode & GR_DELETED))) {
69719+ match = match->next;
69720+ }
69721+
69722+ if (match && !(match->mode & GR_DELETED))
69723+ return match;
69724+ else
69725+ return NULL;
69726+}
69727+
69728+struct name_entry *
69729+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
69730+{
69731+ unsigned int len = strlen(name);
69732+ unsigned int key = full_name_hash(name, len);
69733+ unsigned int index = key % state->name_set.n_size;
69734+ struct name_entry *match;
69735+
69736+ match = state->name_set.n_hash[index];
69737+
69738+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
69739+ match = match->next;
69740+
69741+ return match;
69742+}
69743+
69744+static struct name_entry *
69745+lookup_name_entry(const char *name)
69746+{
69747+ return __lookup_name_entry(&running_polstate, name);
69748+}
69749+
69750+static struct name_entry *
69751+lookup_name_entry_create(const char *name)
69752+{
69753+ unsigned int len = strlen(name);
69754+ unsigned int key = full_name_hash(name, len);
69755+ unsigned int index = key % running_polstate.name_set.n_size;
69756+ struct name_entry *match;
69757+
69758+ match = running_polstate.name_set.n_hash[index];
69759+
69760+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69761+ !match->deleted))
69762+ match = match->next;
69763+
69764+ if (match && match->deleted)
69765+ return match;
69766+
69767+ match = running_polstate.name_set.n_hash[index];
69768+
69769+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69770+ match->deleted))
69771+ match = match->next;
69772+
69773+ if (match && !match->deleted)
69774+ return match;
69775+ else
69776+ return NULL;
69777+}
69778+
69779+static struct inodev_entry *
69780+lookup_inodev_entry(const u64 ino, const dev_t dev)
69781+{
69782+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
69783+ struct inodev_entry *match;
69784+
69785+ match = running_polstate.inodev_set.i_hash[index];
69786+
69787+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
69788+ match = match->next;
69789+
69790+ return match;
69791+}
69792+
69793+void
69794+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
69795+{
69796+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
69797+ state->inodev_set.i_size);
69798+ struct inodev_entry **curr;
69799+
69800+ entry->prev = NULL;
69801+
69802+ curr = &state->inodev_set.i_hash[index];
69803+ if (*curr != NULL)
69804+ (*curr)->prev = entry;
69805+
69806+ entry->next = *curr;
69807+ *curr = entry;
69808+
69809+ return;
69810+}
69811+
69812+static void
69813+insert_inodev_entry(struct inodev_entry *entry)
69814+{
69815+ __insert_inodev_entry(&running_polstate, entry);
69816+}
69817+
69818+void
69819+insert_acl_obj_label(struct acl_object_label *obj,
69820+ struct acl_subject_label *subj)
69821+{
69822+ unsigned int index =
69823+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
69824+ struct acl_object_label **curr;
69825+
69826+ obj->prev = NULL;
69827+
69828+ curr = &subj->obj_hash[index];
69829+ if (*curr != NULL)
69830+ (*curr)->prev = obj;
69831+
69832+ obj->next = *curr;
69833+ *curr = obj;
69834+
69835+ return;
69836+}
69837+
69838+void
69839+insert_acl_subj_label(struct acl_subject_label *obj,
69840+ struct acl_role_label *role)
69841+{
69842+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
69843+ struct acl_subject_label **curr;
69844+
69845+ obj->prev = NULL;
69846+
69847+ curr = &role->subj_hash[index];
69848+ if (*curr != NULL)
69849+ (*curr)->prev = obj;
69850+
69851+ obj->next = *curr;
69852+ *curr = obj;
69853+
69854+ return;
69855+}
69856+
69857+/* derived from glibc fnmatch() 0: match, 1: no match*/
69858+
69859+static int
69860+glob_match(const char *p, const char *n)
69861+{
69862+ char c;
69863+
69864+ while ((c = *p++) != '\0') {
69865+ switch (c) {
69866+ case '?':
69867+ if (*n == '\0')
69868+ return 1;
69869+ else if (*n == '/')
69870+ return 1;
69871+ break;
69872+ case '\\':
69873+ if (*n != c)
69874+ return 1;
69875+ break;
69876+ case '*':
69877+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
69878+ if (*n == '/')
69879+ return 1;
69880+ else if (c == '?') {
69881+ if (*n == '\0')
69882+ return 1;
69883+ else
69884+ ++n;
69885+ }
69886+ }
69887+ if (c == '\0') {
69888+ return 0;
69889+ } else {
69890+ const char *endp;
69891+
69892+ if ((endp = strchr(n, '/')) == NULL)
69893+ endp = n + strlen(n);
69894+
69895+ if (c == '[') {
69896+ for (--p; n < endp; ++n)
69897+ if (!glob_match(p, n))
69898+ return 0;
69899+ } else if (c == '/') {
69900+ while (*n != '\0' && *n != '/')
69901+ ++n;
69902+ if (*n == '/' && !glob_match(p, n + 1))
69903+ return 0;
69904+ } else {
69905+ for (--p; n < endp; ++n)
69906+ if (*n == c && !glob_match(p, n))
69907+ return 0;
69908+ }
69909+
69910+ return 1;
69911+ }
69912+ case '[':
69913+ {
69914+ int not;
69915+ char cold;
69916+
69917+ if (*n == '\0' || *n == '/')
69918+ return 1;
69919+
69920+ not = (*p == '!' || *p == '^');
69921+ if (not)
69922+ ++p;
69923+
69924+ c = *p++;
69925+ for (;;) {
69926+ unsigned char fn = (unsigned char)*n;
69927+
69928+ if (c == '\0')
69929+ return 1;
69930+ else {
69931+ if (c == fn)
69932+ goto matched;
69933+ cold = c;
69934+ c = *p++;
69935+
69936+ if (c == '-' && *p != ']') {
69937+ unsigned char cend = *p++;
69938+
69939+ if (cend == '\0')
69940+ return 1;
69941+
69942+ if (cold <= fn && fn <= cend)
69943+ goto matched;
69944+
69945+ c = *p++;
69946+ }
69947+ }
69948+
69949+ if (c == ']')
69950+ break;
69951+ }
69952+ if (!not)
69953+ return 1;
69954+ break;
69955+ matched:
69956+ while (c != ']') {
69957+ if (c == '\0')
69958+ return 1;
69959+
69960+ c = *p++;
69961+ }
69962+ if (not)
69963+ return 1;
69964+ }
69965+ break;
69966+ default:
69967+ if (c != *n)
69968+ return 1;
69969+ }
69970+
69971+ ++n;
69972+ }
69973+
69974+ if (*n == '\0')
69975+ return 0;
69976+
69977+ if (*n == '/')
69978+ return 0;
69979+
69980+ return 1;
69981+}
69982+
69983+static struct acl_object_label *
69984+chk_glob_label(struct acl_object_label *globbed,
69985+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
69986+{
69987+ struct acl_object_label *tmp;
69988+
69989+ if (*path == NULL)
69990+ *path = gr_to_filename_nolock(dentry, mnt);
69991+
69992+ tmp = globbed;
69993+
69994+ while (tmp) {
69995+ if (!glob_match(tmp->filename, *path))
69996+ return tmp;
69997+ tmp = tmp->next;
69998+ }
69999+
70000+ return NULL;
70001+}
70002+
70003+static struct acl_object_label *
70004+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70005+ const u64 curr_ino, const dev_t curr_dev,
70006+ const struct acl_subject_label *subj, char **path, const int checkglob)
70007+{
70008+ struct acl_subject_label *tmpsubj;
70009+ struct acl_object_label *retval;
70010+ struct acl_object_label *retval2;
70011+
70012+ tmpsubj = (struct acl_subject_label *) subj;
70013+ read_lock(&gr_inode_lock);
70014+ do {
70015+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70016+ if (retval) {
70017+ if (checkglob && retval->globbed) {
70018+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70019+ if (retval2)
70020+ retval = retval2;
70021+ }
70022+ break;
70023+ }
70024+ } while ((tmpsubj = tmpsubj->parent_subject));
70025+ read_unlock(&gr_inode_lock);
70026+
70027+ return retval;
70028+}
70029+
70030+static __inline__ struct acl_object_label *
70031+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70032+ struct dentry *curr_dentry,
70033+ const struct acl_subject_label *subj, char **path, const int checkglob)
70034+{
70035+ int newglob = checkglob;
70036+ u64 inode;
70037+ dev_t device;
70038+
70039+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70040+ as we don't want a / * rule to match instead of the / object
70041+ don't do this for create lookups that call this function though, since they're looking up
70042+ on the parent and thus need globbing checks on all paths
70043+ */
70044+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70045+ newglob = GR_NO_GLOB;
70046+
70047+ spin_lock(&curr_dentry->d_lock);
70048+ inode = __get_ino(curr_dentry);
70049+ device = __get_dev(curr_dentry);
70050+ spin_unlock(&curr_dentry->d_lock);
70051+
70052+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70053+}
70054+
70055+#ifdef CONFIG_HUGETLBFS
70056+static inline bool
70057+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70058+{
70059+ int i;
70060+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70061+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70062+ return true;
70063+ }
70064+
70065+ return false;
70066+}
70067+#endif
70068+
70069+static struct acl_object_label *
70070+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70071+ const struct acl_subject_label *subj, char *path, const int checkglob)
70072+{
70073+ struct dentry *dentry = (struct dentry *) l_dentry;
70074+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70075+ struct mount *real_mnt = real_mount(mnt);
70076+ struct acl_object_label *retval;
70077+ struct dentry *parent;
70078+
70079+ read_seqlock_excl(&mount_lock);
70080+ write_seqlock(&rename_lock);
70081+
70082+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70083+#ifdef CONFIG_NET
70084+ mnt == sock_mnt ||
70085+#endif
70086+#ifdef CONFIG_HUGETLBFS
70087+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70088+#endif
70089+ /* ignore Eric Biederman */
70090+ IS_PRIVATE(l_dentry->d_inode))) {
70091+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70092+ goto out;
70093+ }
70094+
70095+ for (;;) {
70096+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70097+ break;
70098+
70099+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70100+ if (!mnt_has_parent(real_mnt))
70101+ break;
70102+
70103+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70104+ if (retval != NULL)
70105+ goto out;
70106+
70107+ dentry = real_mnt->mnt_mountpoint;
70108+ real_mnt = real_mnt->mnt_parent;
70109+ mnt = &real_mnt->mnt;
70110+ continue;
70111+ }
70112+
70113+ parent = dentry->d_parent;
70114+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70115+ if (retval != NULL)
70116+ goto out;
70117+
70118+ dentry = parent;
70119+ }
70120+
70121+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70122+
70123+ /* gr_real_root is pinned so we don't have to hold a reference */
70124+ if (retval == NULL)
70125+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70126+out:
70127+ write_sequnlock(&rename_lock);
70128+ read_sequnlock_excl(&mount_lock);
70129+
70130+ BUG_ON(retval == NULL);
70131+
70132+ return retval;
70133+}
70134+
70135+static __inline__ struct acl_object_label *
70136+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70137+ const struct acl_subject_label *subj)
70138+{
70139+ char *path = NULL;
70140+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70141+}
70142+
70143+static __inline__ struct acl_object_label *
70144+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70145+ const struct acl_subject_label *subj)
70146+{
70147+ char *path = NULL;
70148+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70149+}
70150+
70151+static __inline__ struct acl_object_label *
70152+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70153+ const struct acl_subject_label *subj, char *path)
70154+{
70155+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70156+}
70157+
70158+struct acl_subject_label *
70159+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70160+ const struct acl_role_label *role)
70161+{
70162+ struct dentry *dentry = (struct dentry *) l_dentry;
70163+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70164+ struct mount *real_mnt = real_mount(mnt);
70165+ struct acl_subject_label *retval;
70166+ struct dentry *parent;
70167+
70168+ read_seqlock_excl(&mount_lock);
70169+ write_seqlock(&rename_lock);
70170+
70171+ for (;;) {
70172+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70173+ break;
70174+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70175+ if (!mnt_has_parent(real_mnt))
70176+ break;
70177+
70178+ spin_lock(&dentry->d_lock);
70179+ read_lock(&gr_inode_lock);
70180+ retval =
70181+ lookup_acl_subj_label(__get_ino(dentry),
70182+ __get_dev(dentry), role);
70183+ read_unlock(&gr_inode_lock);
70184+ spin_unlock(&dentry->d_lock);
70185+ if (retval != NULL)
70186+ goto out;
70187+
70188+ dentry = real_mnt->mnt_mountpoint;
70189+ real_mnt = real_mnt->mnt_parent;
70190+ mnt = &real_mnt->mnt;
70191+ continue;
70192+ }
70193+
70194+ spin_lock(&dentry->d_lock);
70195+ read_lock(&gr_inode_lock);
70196+ retval = lookup_acl_subj_label(__get_ino(dentry),
70197+ __get_dev(dentry), role);
70198+ read_unlock(&gr_inode_lock);
70199+ parent = dentry->d_parent;
70200+ spin_unlock(&dentry->d_lock);
70201+
70202+ if (retval != NULL)
70203+ goto out;
70204+
70205+ dentry = parent;
70206+ }
70207+
70208+ spin_lock(&dentry->d_lock);
70209+ read_lock(&gr_inode_lock);
70210+ retval = lookup_acl_subj_label(__get_ino(dentry),
70211+ __get_dev(dentry), role);
70212+ read_unlock(&gr_inode_lock);
70213+ spin_unlock(&dentry->d_lock);
70214+
70215+ if (unlikely(retval == NULL)) {
70216+ /* gr_real_root is pinned, we don't need to hold a reference */
70217+ read_lock(&gr_inode_lock);
70218+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70219+ __get_dev(gr_real_root.dentry), role);
70220+ read_unlock(&gr_inode_lock);
70221+ }
70222+out:
70223+ write_sequnlock(&rename_lock);
70224+ read_sequnlock_excl(&mount_lock);
70225+
70226+ BUG_ON(retval == NULL);
70227+
70228+ return retval;
70229+}
70230+
70231+void
70232+assign_special_role(const char *rolename)
70233+{
70234+ struct acl_object_label *obj;
70235+ struct acl_role_label *r;
70236+ struct acl_role_label *assigned = NULL;
70237+ struct task_struct *tsk;
70238+ struct file *filp;
70239+
70240+ FOR_EACH_ROLE_START(r)
70241+ if (!strcmp(rolename, r->rolename) &&
70242+ (r->roletype & GR_ROLE_SPECIAL)) {
70243+ assigned = r;
70244+ break;
70245+ }
70246+ FOR_EACH_ROLE_END(r)
70247+
70248+ if (!assigned)
70249+ return;
70250+
70251+ read_lock(&tasklist_lock);
70252+ read_lock(&grsec_exec_file_lock);
70253+
70254+ tsk = current->real_parent;
70255+ if (tsk == NULL)
70256+ goto out_unlock;
70257+
70258+ filp = tsk->exec_file;
70259+ if (filp == NULL)
70260+ goto out_unlock;
70261+
70262+ tsk->is_writable = 0;
70263+ tsk->inherited = 0;
70264+
70265+ tsk->acl_sp_role = 1;
70266+ tsk->acl_role_id = ++acl_sp_role_value;
70267+ tsk->role = assigned;
70268+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70269+
70270+ /* ignore additional mmap checks for processes that are writable
70271+ by the default ACL */
70272+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70273+ if (unlikely(obj->mode & GR_WRITE))
70274+ tsk->is_writable = 1;
70275+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70276+ if (unlikely(obj->mode & GR_WRITE))
70277+ tsk->is_writable = 1;
70278+
70279+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70280+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70281+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70282+#endif
70283+
70284+out_unlock:
70285+ read_unlock(&grsec_exec_file_lock);
70286+ read_unlock(&tasklist_lock);
70287+ return;
70288+}
70289+
70290+
70291+static void
70292+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70293+{
70294+ struct task_struct *task = current;
70295+ const struct cred *cred = current_cred();
70296+
70297+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70298+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70299+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70300+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70301+
70302+ return;
70303+}
70304+
70305+static void
70306+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70307+{
70308+ struct task_struct *task = current;
70309+ const struct cred *cred = current_cred();
70310+
70311+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70312+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70313+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70314+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70315+
70316+ return;
70317+}
70318+
70319+static void
70320+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70321+{
70322+ struct task_struct *task = current;
70323+ const struct cred *cred = current_cred();
70324+
70325+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70326+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70327+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70328+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70329+
70330+ return;
70331+}
70332+
70333+static void
70334+gr_set_proc_res(struct task_struct *task)
70335+{
70336+ struct acl_subject_label *proc;
70337+ unsigned short i;
70338+
70339+ proc = task->acl;
70340+
70341+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70342+ return;
70343+
70344+ for (i = 0; i < RLIM_NLIMITS; i++) {
70345+ unsigned long rlim_cur, rlim_max;
70346+
70347+ if (!(proc->resmask & (1U << i)))
70348+ continue;
70349+
70350+ rlim_cur = proc->res[i].rlim_cur;
70351+ rlim_max = proc->res[i].rlim_max;
70352+
70353+ if (i == RLIMIT_NOFILE) {
70354+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70355+ if (rlim_cur > saved_sysctl_nr_open)
70356+ rlim_cur = saved_sysctl_nr_open;
70357+ if (rlim_max > saved_sysctl_nr_open)
70358+ rlim_max = saved_sysctl_nr_open;
70359+ }
70360+
70361+ task->signal->rlim[i].rlim_cur = rlim_cur;
70362+ task->signal->rlim[i].rlim_max = rlim_max;
70363+
70364+ if (i == RLIMIT_CPU)
70365+ update_rlimit_cpu(task, rlim_cur);
70366+ }
70367+
70368+ return;
70369+}
70370+
70371+/* both of the below must be called with
70372+ rcu_read_lock();
70373+ read_lock(&tasklist_lock);
70374+ read_lock(&grsec_exec_file_lock);
70375+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70376+*/
70377+
70378+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70379+{
70380+ char *tmpname;
70381+ struct acl_subject_label *tmpsubj;
70382+ struct file *filp;
70383+ struct name_entry *nmatch;
70384+
70385+ filp = task->exec_file;
70386+ if (filp == NULL)
70387+ return NULL;
70388+
70389+ /* the following is to apply the correct subject
70390+ on binaries running when the RBAC system
70391+ is enabled, when the binaries have been
70392+ replaced or deleted since their execution
70393+ -----
70394+ when the RBAC system starts, the inode/dev
70395+ from exec_file will be one the RBAC system
70396+ is unaware of. It only knows the inode/dev
70397+ of the present file on disk, or the absence
70398+ of it.
70399+ */
70400+
70401+ if (filename)
70402+ nmatch = __lookup_name_entry(state, filename);
70403+ else {
70404+ preempt_disable();
70405+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70406+
70407+ nmatch = __lookup_name_entry(state, tmpname);
70408+ preempt_enable();
70409+ }
70410+ tmpsubj = NULL;
70411+ if (nmatch) {
70412+ if (nmatch->deleted)
70413+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70414+ else
70415+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70416+ }
70417+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70418+ then we fall back to a normal lookup based on the binary's ino/dev
70419+ */
70420+ if (tmpsubj == NULL && fallback)
70421+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70422+
70423+ return tmpsubj;
70424+}
70425+
70426+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
70427+{
70428+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
70429+}
70430+
70431+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70432+{
70433+ struct acl_object_label *obj;
70434+ struct file *filp;
70435+
70436+ filp = task->exec_file;
70437+
70438+ task->acl = subj;
70439+ task->is_writable = 0;
70440+ /* ignore additional mmap checks for processes that are writable
70441+ by the default ACL */
70442+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70443+ if (unlikely(obj->mode & GR_WRITE))
70444+ task->is_writable = 1;
70445+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70446+ if (unlikely(obj->mode & GR_WRITE))
70447+ task->is_writable = 1;
70448+
70449+ gr_set_proc_res(task);
70450+
70451+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70452+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70453+#endif
70454+}
70455+
70456+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70457+{
70458+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70459+}
70460+
70461+__u32
70462+gr_search_file(const struct dentry * dentry, const __u32 mode,
70463+ const struct vfsmount * mnt)
70464+{
70465+ __u32 retval = mode;
70466+ struct acl_subject_label *curracl;
70467+ struct acl_object_label *currobj;
70468+
70469+ if (unlikely(!(gr_status & GR_READY)))
70470+ return (mode & ~GR_AUDITS);
70471+
70472+ curracl = current->acl;
70473+
70474+ currobj = chk_obj_label(dentry, mnt, curracl);
70475+ retval = currobj->mode & mode;
70476+
70477+ /* if we're opening a specified transfer file for writing
70478+ (e.g. /dev/initctl), then transfer our role to init
70479+ */
70480+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70481+ current->role->roletype & GR_ROLE_PERSIST)) {
70482+ struct task_struct *task = init_pid_ns.child_reaper;
70483+
70484+ if (task->role != current->role) {
70485+ struct acl_subject_label *subj;
70486+
70487+ task->acl_sp_role = 0;
70488+ task->acl_role_id = current->acl_role_id;
70489+ task->role = current->role;
70490+ rcu_read_lock();
70491+ read_lock(&grsec_exec_file_lock);
70492+ subj = gr_get_subject_for_task(task, NULL, 1);
70493+ gr_apply_subject_to_task(task, subj);
70494+ read_unlock(&grsec_exec_file_lock);
70495+ rcu_read_unlock();
70496+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70497+ }
70498+ }
70499+
70500+ if (unlikely
70501+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70502+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70503+ __u32 new_mode = mode;
70504+
70505+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70506+
70507+ retval = new_mode;
70508+
70509+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70510+ new_mode |= GR_INHERIT;
70511+
70512+ if (!(mode & GR_NOLEARN))
70513+ gr_log_learn(dentry, mnt, new_mode);
70514+ }
70515+
70516+ return retval;
70517+}
70518+
70519+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70520+ const struct dentry *parent,
70521+ const struct vfsmount *mnt)
70522+{
70523+ struct name_entry *match;
70524+ struct acl_object_label *matchpo;
70525+ struct acl_subject_label *curracl;
70526+ char *path;
70527+
70528+ if (unlikely(!(gr_status & GR_READY)))
70529+ return NULL;
70530+
70531+ preempt_disable();
70532+ path = gr_to_filename_rbac(new_dentry, mnt);
70533+ match = lookup_name_entry_create(path);
70534+
70535+ curracl = current->acl;
70536+
70537+ if (match) {
70538+ read_lock(&gr_inode_lock);
70539+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70540+ read_unlock(&gr_inode_lock);
70541+
70542+ if (matchpo) {
70543+ preempt_enable();
70544+ return matchpo;
70545+ }
70546+ }
70547+
70548+ // lookup parent
70549+
70550+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70551+
70552+ preempt_enable();
70553+ return matchpo;
70554+}
70555+
70556+__u32
70557+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70558+ const struct vfsmount * mnt, const __u32 mode)
70559+{
70560+ struct acl_object_label *matchpo;
70561+ __u32 retval;
70562+
70563+ if (unlikely(!(gr_status & GR_READY)))
70564+ return (mode & ~GR_AUDITS);
70565+
70566+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70567+
70568+ retval = matchpo->mode & mode;
70569+
70570+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70571+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70572+ __u32 new_mode = mode;
70573+
70574+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70575+
70576+ gr_log_learn(new_dentry, mnt, new_mode);
70577+ return new_mode;
70578+ }
70579+
70580+ return retval;
70581+}
70582+
70583+__u32
70584+gr_check_link(const struct dentry * new_dentry,
70585+ const struct dentry * parent_dentry,
70586+ const struct vfsmount * parent_mnt,
70587+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70588+{
70589+ struct acl_object_label *obj;
70590+ __u32 oldmode, newmode;
70591+ __u32 needmode;
70592+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70593+ GR_DELETE | GR_INHERIT;
70594+
70595+ if (unlikely(!(gr_status & GR_READY)))
70596+ return (GR_CREATE | GR_LINK);
70597+
70598+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70599+ oldmode = obj->mode;
70600+
70601+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70602+ newmode = obj->mode;
70603+
70604+ needmode = newmode & checkmodes;
70605+
70606+ // old name for hardlink must have at least the permissions of the new name
70607+ if ((oldmode & needmode) != needmode)
70608+ goto bad;
70609+
70610+ // if old name had restrictions/auditing, make sure the new name does as well
70611+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70612+
70613+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70614+ if (is_privileged_binary(old_dentry))
70615+ needmode |= GR_SETID;
70616+
70617+ if ((newmode & needmode) != needmode)
70618+ goto bad;
70619+
70620+ // enforce minimum permissions
70621+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70622+ return newmode;
70623+bad:
70624+ needmode = oldmode;
70625+ if (is_privileged_binary(old_dentry))
70626+ needmode |= GR_SETID;
70627+
70628+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70629+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70630+ return (GR_CREATE | GR_LINK);
70631+ } else if (newmode & GR_SUPPRESS)
70632+ return GR_SUPPRESS;
70633+ else
70634+ return 0;
70635+}
70636+
70637+int
70638+gr_check_hidden_task(const struct task_struct *task)
70639+{
70640+ if (unlikely(!(gr_status & GR_READY)))
70641+ return 0;
70642+
70643+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70644+ return 1;
70645+
70646+ return 0;
70647+}
70648+
70649+int
70650+gr_check_protected_task(const struct task_struct *task)
70651+{
70652+ if (unlikely(!(gr_status & GR_READY) || !task))
70653+ return 0;
70654+
70655+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70656+ task->acl != current->acl)
70657+ return 1;
70658+
70659+ return 0;
70660+}
70661+
70662+int
70663+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70664+{
70665+ struct task_struct *p;
70666+ int ret = 0;
70667+
70668+ if (unlikely(!(gr_status & GR_READY) || !pid))
70669+ return ret;
70670+
70671+ read_lock(&tasklist_lock);
70672+ do_each_pid_task(pid, type, p) {
70673+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70674+ p->acl != current->acl) {
70675+ ret = 1;
70676+ goto out;
70677+ }
70678+ } while_each_pid_task(pid, type, p);
70679+out:
70680+ read_unlock(&tasklist_lock);
70681+
70682+ return ret;
70683+}
70684+
70685+void
70686+gr_copy_label(struct task_struct *tsk)
70687+{
70688+ struct task_struct *p = current;
70689+
70690+ tsk->inherited = p->inherited;
70691+ tsk->acl_sp_role = 0;
70692+ tsk->acl_role_id = p->acl_role_id;
70693+ tsk->acl = p->acl;
70694+ tsk->role = p->role;
70695+ tsk->signal->used_accept = 0;
70696+ tsk->signal->curr_ip = p->signal->curr_ip;
70697+ tsk->signal->saved_ip = p->signal->saved_ip;
70698+ if (p->exec_file)
70699+ get_file(p->exec_file);
70700+ tsk->exec_file = p->exec_file;
70701+ tsk->is_writable = p->is_writable;
70702+ if (unlikely(p->signal->used_accept)) {
70703+ p->signal->curr_ip = 0;
70704+ p->signal->saved_ip = 0;
70705+ }
70706+
70707+ return;
70708+}
70709+
70710+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
70711+
70712+int
70713+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70714+{
70715+ unsigned int i;
70716+ __u16 num;
70717+ uid_t *uidlist;
70718+ uid_t curuid;
70719+ int realok = 0;
70720+ int effectiveok = 0;
70721+ int fsok = 0;
70722+ uid_t globalreal, globaleffective, globalfs;
70723+
70724+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
70725+ struct user_struct *user;
70726+
70727+ if (!uid_valid(real))
70728+ goto skipit;
70729+
70730+ /* find user based on global namespace */
70731+
70732+ globalreal = GR_GLOBAL_UID(real);
70733+
70734+ user = find_user(make_kuid(&init_user_ns, globalreal));
70735+ if (user == NULL)
70736+ goto skipit;
70737+
70738+ if (gr_process_kernel_setuid_ban(user)) {
70739+ /* for find_user */
70740+ free_uid(user);
70741+ return 1;
70742+ }
70743+
70744+ /* for find_user */
70745+ free_uid(user);
70746+
70747+skipit:
70748+#endif
70749+
70750+ if (unlikely(!(gr_status & GR_READY)))
70751+ return 0;
70752+
70753+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70754+ gr_log_learn_uid_change(real, effective, fs);
70755+
70756+ num = current->acl->user_trans_num;
70757+ uidlist = current->acl->user_transitions;
70758+
70759+ if (uidlist == NULL)
70760+ return 0;
70761+
70762+ if (!uid_valid(real)) {
70763+ realok = 1;
70764+ globalreal = (uid_t)-1;
70765+ } else {
70766+ globalreal = GR_GLOBAL_UID(real);
70767+ }
70768+ if (!uid_valid(effective)) {
70769+ effectiveok = 1;
70770+ globaleffective = (uid_t)-1;
70771+ } else {
70772+ globaleffective = GR_GLOBAL_UID(effective);
70773+ }
70774+ if (!uid_valid(fs)) {
70775+ fsok = 1;
70776+ globalfs = (uid_t)-1;
70777+ } else {
70778+ globalfs = GR_GLOBAL_UID(fs);
70779+ }
70780+
70781+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
70782+ for (i = 0; i < num; i++) {
70783+ curuid = uidlist[i];
70784+ if (globalreal == curuid)
70785+ realok = 1;
70786+ if (globaleffective == curuid)
70787+ effectiveok = 1;
70788+ if (globalfs == curuid)
70789+ fsok = 1;
70790+ }
70791+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
70792+ for (i = 0; i < num; i++) {
70793+ curuid = uidlist[i];
70794+ if (globalreal == curuid)
70795+ break;
70796+ if (globaleffective == curuid)
70797+ break;
70798+ if (globalfs == curuid)
70799+ break;
70800+ }
70801+ /* not in deny list */
70802+ if (i == num) {
70803+ realok = 1;
70804+ effectiveok = 1;
70805+ fsok = 1;
70806+ }
70807+ }
70808+
70809+ if (realok && effectiveok && fsok)
70810+ return 0;
70811+ else {
70812+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70813+ return 1;
70814+ }
70815+}
70816+
70817+int
70818+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70819+{
70820+ unsigned int i;
70821+ __u16 num;
70822+ gid_t *gidlist;
70823+ gid_t curgid;
70824+ int realok = 0;
70825+ int effectiveok = 0;
70826+ int fsok = 0;
70827+ gid_t globalreal, globaleffective, globalfs;
70828+
70829+ if (unlikely(!(gr_status & GR_READY)))
70830+ return 0;
70831+
70832+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70833+ gr_log_learn_gid_change(real, effective, fs);
70834+
70835+ num = current->acl->group_trans_num;
70836+ gidlist = current->acl->group_transitions;
70837+
70838+ if (gidlist == NULL)
70839+ return 0;
70840+
70841+ if (!gid_valid(real)) {
70842+ realok = 1;
70843+ globalreal = (gid_t)-1;
70844+ } else {
70845+ globalreal = GR_GLOBAL_GID(real);
70846+ }
70847+ if (!gid_valid(effective)) {
70848+ effectiveok = 1;
70849+ globaleffective = (gid_t)-1;
70850+ } else {
70851+ globaleffective = GR_GLOBAL_GID(effective);
70852+ }
70853+ if (!gid_valid(fs)) {
70854+ fsok = 1;
70855+ globalfs = (gid_t)-1;
70856+ } else {
70857+ globalfs = GR_GLOBAL_GID(fs);
70858+ }
70859+
70860+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
70861+ for (i = 0; i < num; i++) {
70862+ curgid = gidlist[i];
70863+ if (globalreal == curgid)
70864+ realok = 1;
70865+ if (globaleffective == curgid)
70866+ effectiveok = 1;
70867+ if (globalfs == curgid)
70868+ fsok = 1;
70869+ }
70870+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
70871+ for (i = 0; i < num; i++) {
70872+ curgid = gidlist[i];
70873+ if (globalreal == curgid)
70874+ break;
70875+ if (globaleffective == curgid)
70876+ break;
70877+ if (globalfs == curgid)
70878+ break;
70879+ }
70880+ /* not in deny list */
70881+ if (i == num) {
70882+ realok = 1;
70883+ effectiveok = 1;
70884+ fsok = 1;
70885+ }
70886+ }
70887+
70888+ if (realok && effectiveok && fsok)
70889+ return 0;
70890+ else {
70891+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70892+ return 1;
70893+ }
70894+}
70895+
70896+extern int gr_acl_is_capable(const int cap);
70897+
70898+void
70899+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
70900+{
70901+ struct acl_role_label *role = task->role;
70902+ struct acl_role_label *origrole = role;
70903+ struct acl_subject_label *subj = NULL;
70904+ struct acl_object_label *obj;
70905+ struct file *filp;
70906+ uid_t uid;
70907+ gid_t gid;
70908+
70909+ if (unlikely(!(gr_status & GR_READY)))
70910+ return;
70911+
70912+ uid = GR_GLOBAL_UID(kuid);
70913+ gid = GR_GLOBAL_GID(kgid);
70914+
70915+ filp = task->exec_file;
70916+
70917+ /* kernel process, we'll give them the kernel role */
70918+ if (unlikely(!filp)) {
70919+ task->role = running_polstate.kernel_role;
70920+ task->acl = running_polstate.kernel_role->root_label;
70921+ return;
70922+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
70923+ /* save the current ip at time of role lookup so that the proper
70924+ IP will be learned for role_allowed_ip */
70925+ task->signal->saved_ip = task->signal->curr_ip;
70926+ role = lookup_acl_role_label(task, uid, gid);
70927+ }
70928+
70929+ /* don't change the role if we're not a privileged process */
70930+ if (role && task->role != role &&
70931+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
70932+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
70933+ return;
70934+
70935+ task->role = role;
70936+
70937+ if (task->inherited) {
70938+ /* if we reached our subject through inheritance, then first see
70939+ if there's a subject of the same name in the new role that has
70940+ an object that would result in the same inherited subject
70941+ */
70942+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
70943+ if (subj) {
70944+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
70945+ if (!(obj->mode & GR_INHERIT))
70946+ subj = NULL;
70947+ }
70948+
70949+ }
70950+ if (subj == NULL) {
70951+ /* otherwise:
70952+ perform subject lookup in possibly new role
70953+ we can use this result below in the case where role == task->role
70954+ */
70955+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
70956+ }
70957+
70958+ /* if we changed uid/gid, but result in the same role
70959+ and are using inheritance, don't lose the inherited subject
70960+ if current subject is other than what normal lookup
70961+ would result in, we arrived via inheritance, don't
70962+ lose subject
70963+ */
70964+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
70965+ (subj == task->acl)))
70966+ task->acl = subj;
70967+
70968+ /* leave task->inherited unaffected */
70969+
70970+ task->is_writable = 0;
70971+
70972+ /* ignore additional mmap checks for processes that are writable
70973+ by the default ACL */
70974+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70975+ if (unlikely(obj->mode & GR_WRITE))
70976+ task->is_writable = 1;
70977+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70978+ if (unlikely(obj->mode & GR_WRITE))
70979+ task->is_writable = 1;
70980+
70981+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70982+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70983+#endif
70984+
70985+ gr_set_proc_res(task);
70986+
70987+ return;
70988+}
70989+
70990+int
70991+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
70992+ const int unsafe_flags)
70993+{
70994+ struct task_struct *task = current;
70995+ struct acl_subject_label *newacl;
70996+ struct acl_object_label *obj;
70997+ __u32 retmode;
70998+
70999+ if (unlikely(!(gr_status & GR_READY)))
71000+ return 0;
71001+
71002+ newacl = chk_subj_label(dentry, mnt, task->role);
71003+
71004+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71005+ did an exec
71006+ */
71007+ rcu_read_lock();
71008+ read_lock(&tasklist_lock);
71009+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71010+ (task->parent->acl->mode & GR_POVERRIDE))) {
71011+ read_unlock(&tasklist_lock);
71012+ rcu_read_unlock();
71013+ goto skip_check;
71014+ }
71015+ read_unlock(&tasklist_lock);
71016+ rcu_read_unlock();
71017+
71018+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71019+ !(task->role->roletype & GR_ROLE_GOD) &&
71020+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71021+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71022+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71023+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71024+ else
71025+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71026+ return -EACCES;
71027+ }
71028+
71029+skip_check:
71030+
71031+ obj = chk_obj_label(dentry, mnt, task->acl);
71032+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71033+
71034+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71035+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71036+ if (obj->nested)
71037+ task->acl = obj->nested;
71038+ else
71039+ task->acl = newacl;
71040+ task->inherited = 0;
71041+ } else {
71042+ task->inherited = 1;
71043+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71044+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71045+ }
71046+
71047+ task->is_writable = 0;
71048+
71049+ /* ignore additional mmap checks for processes that are writable
71050+ by the default ACL */
71051+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71052+ if (unlikely(obj->mode & GR_WRITE))
71053+ task->is_writable = 1;
71054+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71055+ if (unlikely(obj->mode & GR_WRITE))
71056+ task->is_writable = 1;
71057+
71058+ gr_set_proc_res(task);
71059+
71060+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71061+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71062+#endif
71063+ return 0;
71064+}
71065+
71066+/* always called with valid inodev ptr */
71067+static void
71068+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71069+{
71070+ struct acl_object_label *matchpo;
71071+ struct acl_subject_label *matchps;
71072+ struct acl_subject_label *subj;
71073+ struct acl_role_label *role;
71074+ unsigned int x;
71075+
71076+ FOR_EACH_ROLE_START(role)
71077+ FOR_EACH_SUBJECT_START(role, subj, x)
71078+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71079+ matchpo->mode |= GR_DELETED;
71080+ FOR_EACH_SUBJECT_END(subj,x)
71081+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71082+ /* nested subjects aren't in the role's subj_hash table */
71083+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71084+ matchpo->mode |= GR_DELETED;
71085+ FOR_EACH_NESTED_SUBJECT_END(subj)
71086+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71087+ matchps->mode |= GR_DELETED;
71088+ FOR_EACH_ROLE_END(role)
71089+
71090+ inodev->nentry->deleted = 1;
71091+
71092+ return;
71093+}
71094+
71095+void
71096+gr_handle_delete(const u64 ino, const dev_t dev)
71097+{
71098+ struct inodev_entry *inodev;
71099+
71100+ if (unlikely(!(gr_status & GR_READY)))
71101+ return;
71102+
71103+ write_lock(&gr_inode_lock);
71104+ inodev = lookup_inodev_entry(ino, dev);
71105+ if (inodev != NULL)
71106+ do_handle_delete(inodev, ino, dev);
71107+ write_unlock(&gr_inode_lock);
71108+
71109+ return;
71110+}
71111+
71112+static void
71113+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71114+ const u64 newinode, const dev_t newdevice,
71115+ struct acl_subject_label *subj)
71116+{
71117+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71118+ struct acl_object_label *match;
71119+
71120+ match = subj->obj_hash[index];
71121+
71122+ while (match && (match->inode != oldinode ||
71123+ match->device != olddevice ||
71124+ !(match->mode & GR_DELETED)))
71125+ match = match->next;
71126+
71127+ if (match && (match->inode == oldinode)
71128+ && (match->device == olddevice)
71129+ && (match->mode & GR_DELETED)) {
71130+ if (match->prev == NULL) {
71131+ subj->obj_hash[index] = match->next;
71132+ if (match->next != NULL)
71133+ match->next->prev = NULL;
71134+ } else {
71135+ match->prev->next = match->next;
71136+ if (match->next != NULL)
71137+ match->next->prev = match->prev;
71138+ }
71139+ match->prev = NULL;
71140+ match->next = NULL;
71141+ match->inode = newinode;
71142+ match->device = newdevice;
71143+ match->mode &= ~GR_DELETED;
71144+
71145+ insert_acl_obj_label(match, subj);
71146+ }
71147+
71148+ return;
71149+}
71150+
71151+static void
71152+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71153+ const u64 newinode, const dev_t newdevice,
71154+ struct acl_role_label *role)
71155+{
71156+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71157+ struct acl_subject_label *match;
71158+
71159+ match = role->subj_hash[index];
71160+
71161+ while (match && (match->inode != oldinode ||
71162+ match->device != olddevice ||
71163+ !(match->mode & GR_DELETED)))
71164+ match = match->next;
71165+
71166+ if (match && (match->inode == oldinode)
71167+ && (match->device == olddevice)
71168+ && (match->mode & GR_DELETED)) {
71169+ if (match->prev == NULL) {
71170+ role->subj_hash[index] = match->next;
71171+ if (match->next != NULL)
71172+ match->next->prev = NULL;
71173+ } else {
71174+ match->prev->next = match->next;
71175+ if (match->next != NULL)
71176+ match->next->prev = match->prev;
71177+ }
71178+ match->prev = NULL;
71179+ match->next = NULL;
71180+ match->inode = newinode;
71181+ match->device = newdevice;
71182+ match->mode &= ~GR_DELETED;
71183+
71184+ insert_acl_subj_label(match, role);
71185+ }
71186+
71187+ return;
71188+}
71189+
71190+static void
71191+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71192+ const u64 newinode, const dev_t newdevice)
71193+{
71194+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71195+ struct inodev_entry *match;
71196+
71197+ match = running_polstate.inodev_set.i_hash[index];
71198+
71199+ while (match && (match->nentry->inode != oldinode ||
71200+ match->nentry->device != olddevice || !match->nentry->deleted))
71201+ match = match->next;
71202+
71203+ if (match && (match->nentry->inode == oldinode)
71204+ && (match->nentry->device == olddevice) &&
71205+ match->nentry->deleted) {
71206+ if (match->prev == NULL) {
71207+ running_polstate.inodev_set.i_hash[index] = match->next;
71208+ if (match->next != NULL)
71209+ match->next->prev = NULL;
71210+ } else {
71211+ match->prev->next = match->next;
71212+ if (match->next != NULL)
71213+ match->next->prev = match->prev;
71214+ }
71215+ match->prev = NULL;
71216+ match->next = NULL;
71217+ match->nentry->inode = newinode;
71218+ match->nentry->device = newdevice;
71219+ match->nentry->deleted = 0;
71220+
71221+ insert_inodev_entry(match);
71222+ }
71223+
71224+ return;
71225+}
71226+
71227+static void
71228+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71229+{
71230+ struct acl_subject_label *subj;
71231+ struct acl_role_label *role;
71232+ unsigned int x;
71233+
71234+ FOR_EACH_ROLE_START(role)
71235+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71236+
71237+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71238+ if ((subj->inode == ino) && (subj->device == dev)) {
71239+ subj->inode = ino;
71240+ subj->device = dev;
71241+ }
71242+ /* nested subjects aren't in the role's subj_hash table */
71243+ update_acl_obj_label(matchn->inode, matchn->device,
71244+ ino, dev, subj);
71245+ FOR_EACH_NESTED_SUBJECT_END(subj)
71246+ FOR_EACH_SUBJECT_START(role, subj, x)
71247+ update_acl_obj_label(matchn->inode, matchn->device,
71248+ ino, dev, subj);
71249+ FOR_EACH_SUBJECT_END(subj,x)
71250+ FOR_EACH_ROLE_END(role)
71251+
71252+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71253+
71254+ return;
71255+}
71256+
71257+static void
71258+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71259+ const struct vfsmount *mnt)
71260+{
71261+ u64 ino = __get_ino(dentry);
71262+ dev_t dev = __get_dev(dentry);
71263+
71264+ __do_handle_create(matchn, ino, dev);
71265+
71266+ return;
71267+}
71268+
71269+void
71270+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71271+{
71272+ struct name_entry *matchn;
71273+
71274+ if (unlikely(!(gr_status & GR_READY)))
71275+ return;
71276+
71277+ preempt_disable();
71278+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71279+
71280+ if (unlikely((unsigned long)matchn)) {
71281+ write_lock(&gr_inode_lock);
71282+ do_handle_create(matchn, dentry, mnt);
71283+ write_unlock(&gr_inode_lock);
71284+ }
71285+ preempt_enable();
71286+
71287+ return;
71288+}
71289+
71290+void
71291+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71292+{
71293+ struct name_entry *matchn;
71294+
71295+ if (unlikely(!(gr_status & GR_READY)))
71296+ return;
71297+
71298+ preempt_disable();
71299+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71300+
71301+ if (unlikely((unsigned long)matchn)) {
71302+ write_lock(&gr_inode_lock);
71303+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71304+ write_unlock(&gr_inode_lock);
71305+ }
71306+ preempt_enable();
71307+
71308+ return;
71309+}
71310+
71311+void
71312+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71313+ struct dentry *old_dentry,
71314+ struct dentry *new_dentry,
71315+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71316+{
71317+ struct name_entry *matchn;
71318+ struct name_entry *matchn2 = NULL;
71319+ struct inodev_entry *inodev;
71320+ struct inode *inode = new_dentry->d_inode;
71321+ u64 old_ino = __get_ino(old_dentry);
71322+ dev_t old_dev = __get_dev(old_dentry);
71323+ unsigned int exchange = flags & RENAME_EXCHANGE;
71324+
71325+ /* vfs_rename swaps the name and parent link for old_dentry and
71326+ new_dentry
71327+ at this point, old_dentry has the new name, parent link, and inode
71328+ for the renamed file
71329+ if a file is being replaced by a rename, new_dentry has the inode
71330+ and name for the replaced file
71331+ */
71332+
71333+ if (unlikely(!(gr_status & GR_READY)))
71334+ return;
71335+
71336+ preempt_disable();
71337+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71338+
71339+ /* exchange cases:
71340+ a filename exists for the source, but not dest
71341+ do a recreate on source
71342+ a filename exists for the dest, but not source
71343+ do a recreate on dest
71344+ a filename exists for both source and dest
71345+ delete source and dest, then create source and dest
71346+ a filename exists for neither source nor dest
71347+ no updates needed
71348+
71349+ the name entry lookups get us the old inode/dev associated with
71350+ each name, so do the deletes first (if possible) so that when
71351+ we do the create, we pick up on the right entries
71352+ */
71353+
71354+ if (exchange)
71355+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71356+
71357+ /* we wouldn't have to check d_inode if it weren't for
71358+ NFS silly-renaming
71359+ */
71360+
71361+ write_lock(&gr_inode_lock);
71362+ if (unlikely((replace || exchange) && inode)) {
71363+ u64 new_ino = __get_ino(new_dentry);
71364+ dev_t new_dev = __get_dev(new_dentry);
71365+
71366+ inodev = lookup_inodev_entry(new_ino, new_dev);
71367+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71368+ do_handle_delete(inodev, new_ino, new_dev);
71369+ }
71370+
71371+ inodev = lookup_inodev_entry(old_ino, old_dev);
71372+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71373+ do_handle_delete(inodev, old_ino, old_dev);
71374+
71375+ if (unlikely(matchn != NULL))
71376+ do_handle_create(matchn, old_dentry, mnt);
71377+
71378+ if (unlikely(matchn2 != NULL))
71379+ do_handle_create(matchn2, new_dentry, mnt);
71380+
71381+ write_unlock(&gr_inode_lock);
71382+ preempt_enable();
71383+
71384+ return;
71385+}
71386+
71387+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71388+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71389+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71390+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71391+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71392+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71393+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71394+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71395+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71396+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71397+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71398+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71399+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71400+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71401+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71402+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71403+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71404+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71405+};
71406+
71407+void
71408+gr_learn_resource(const struct task_struct *task,
71409+ const int res, const unsigned long wanted, const int gt)
71410+{
71411+ struct acl_subject_label *acl;
71412+ const struct cred *cred;
71413+
71414+ if (unlikely((gr_status & GR_READY) &&
71415+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71416+ goto skip_reslog;
71417+
71418+ gr_log_resource(task, res, wanted, gt);
71419+skip_reslog:
71420+
71421+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71422+ return;
71423+
71424+ acl = task->acl;
71425+
71426+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71427+ !(acl->resmask & (1U << (unsigned short) res))))
71428+ return;
71429+
71430+ if (wanted >= acl->res[res].rlim_cur) {
71431+ unsigned long res_add;
71432+
71433+ res_add = wanted + res_learn_bumps[res];
71434+
71435+ acl->res[res].rlim_cur = res_add;
71436+
71437+ if (wanted > acl->res[res].rlim_max)
71438+ acl->res[res].rlim_max = res_add;
71439+
71440+ /* only log the subject filename, since resource logging is supported for
71441+ single-subject learning only */
71442+ rcu_read_lock();
71443+ cred = __task_cred(task);
71444+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71445+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71446+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71447+ "", (unsigned long) res, &task->signal->saved_ip);
71448+ rcu_read_unlock();
71449+ }
71450+
71451+ return;
71452+}
71453+EXPORT_SYMBOL_GPL(gr_learn_resource);
71454+#endif
71455+
71456+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71457+void
71458+pax_set_initial_flags(struct linux_binprm *bprm)
71459+{
71460+ struct task_struct *task = current;
71461+ struct acl_subject_label *proc;
71462+ unsigned long flags;
71463+
71464+ if (unlikely(!(gr_status & GR_READY)))
71465+ return;
71466+
71467+ flags = pax_get_flags(task);
71468+
71469+ proc = task->acl;
71470+
71471+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71472+ flags &= ~MF_PAX_PAGEEXEC;
71473+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71474+ flags &= ~MF_PAX_SEGMEXEC;
71475+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71476+ flags &= ~MF_PAX_RANDMMAP;
71477+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71478+ flags &= ~MF_PAX_EMUTRAMP;
71479+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71480+ flags &= ~MF_PAX_MPROTECT;
71481+
71482+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71483+ flags |= MF_PAX_PAGEEXEC;
71484+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71485+ flags |= MF_PAX_SEGMEXEC;
71486+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71487+ flags |= MF_PAX_RANDMMAP;
71488+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71489+ flags |= MF_PAX_EMUTRAMP;
71490+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71491+ flags |= MF_PAX_MPROTECT;
71492+
71493+ pax_set_flags(task, flags);
71494+
71495+ return;
71496+}
71497+#endif
71498+
71499+int
71500+gr_handle_proc_ptrace(struct task_struct *task)
71501+{
71502+ struct file *filp;
71503+ struct task_struct *tmp = task;
71504+ struct task_struct *curtemp = current;
71505+ __u32 retmode;
71506+
71507+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71508+ if (unlikely(!(gr_status & GR_READY)))
71509+ return 0;
71510+#endif
71511+
71512+ read_lock(&tasklist_lock);
71513+ read_lock(&grsec_exec_file_lock);
71514+ filp = task->exec_file;
71515+
71516+ while (task_pid_nr(tmp) > 0) {
71517+ if (tmp == curtemp)
71518+ break;
71519+ tmp = tmp->real_parent;
71520+ }
71521+
71522+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71523+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71524+ read_unlock(&grsec_exec_file_lock);
71525+ read_unlock(&tasklist_lock);
71526+ return 1;
71527+ }
71528+
71529+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71530+ if (!(gr_status & GR_READY)) {
71531+ read_unlock(&grsec_exec_file_lock);
71532+ read_unlock(&tasklist_lock);
71533+ return 0;
71534+ }
71535+#endif
71536+
71537+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71538+ read_unlock(&grsec_exec_file_lock);
71539+ read_unlock(&tasklist_lock);
71540+
71541+ if (retmode & GR_NOPTRACE)
71542+ return 1;
71543+
71544+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71545+ && (current->acl != task->acl || (current->acl != current->role->root_label
71546+ && task_pid_nr(current) != task_pid_nr(task))))
71547+ return 1;
71548+
71549+ return 0;
71550+}
71551+
71552+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71553+{
71554+ if (unlikely(!(gr_status & GR_READY)))
71555+ return;
71556+
71557+ if (!(current->role->roletype & GR_ROLE_GOD))
71558+ return;
71559+
71560+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71561+ p->role->rolename, gr_task_roletype_to_char(p),
71562+ p->acl->filename);
71563+}
71564+
71565+int
71566+gr_handle_ptrace(struct task_struct *task, const long request)
71567+{
71568+ struct task_struct *tmp = task;
71569+ struct task_struct *curtemp = current;
71570+ __u32 retmode;
71571+
71572+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71573+ if (unlikely(!(gr_status & GR_READY)))
71574+ return 0;
71575+#endif
71576+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71577+ read_lock(&tasklist_lock);
71578+ while (task_pid_nr(tmp) > 0) {
71579+ if (tmp == curtemp)
71580+ break;
71581+ tmp = tmp->real_parent;
71582+ }
71583+
71584+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71585+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71586+ read_unlock(&tasklist_lock);
71587+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71588+ return 1;
71589+ }
71590+ read_unlock(&tasklist_lock);
71591+ }
71592+
71593+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71594+ if (!(gr_status & GR_READY))
71595+ return 0;
71596+#endif
71597+
71598+ read_lock(&grsec_exec_file_lock);
71599+ if (unlikely(!task->exec_file)) {
71600+ read_unlock(&grsec_exec_file_lock);
71601+ return 0;
71602+ }
71603+
71604+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71605+ read_unlock(&grsec_exec_file_lock);
71606+
71607+ if (retmode & GR_NOPTRACE) {
71608+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71609+ return 1;
71610+ }
71611+
71612+ if (retmode & GR_PTRACERD) {
71613+ switch (request) {
71614+ case PTRACE_SEIZE:
71615+ case PTRACE_POKETEXT:
71616+ case PTRACE_POKEDATA:
71617+ case PTRACE_POKEUSR:
71618+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71619+ case PTRACE_SETREGS:
71620+ case PTRACE_SETFPREGS:
71621+#endif
71622+#ifdef CONFIG_X86
71623+ case PTRACE_SETFPXREGS:
71624+#endif
71625+#ifdef CONFIG_ALTIVEC
71626+ case PTRACE_SETVRREGS:
71627+#endif
71628+ return 1;
71629+ default:
71630+ return 0;
71631+ }
71632+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71633+ !(current->role->roletype & GR_ROLE_GOD) &&
71634+ (current->acl != task->acl)) {
71635+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71636+ return 1;
71637+ }
71638+
71639+ return 0;
71640+}
71641+
71642+static int is_writable_mmap(const struct file *filp)
71643+{
71644+ struct task_struct *task = current;
71645+ struct acl_object_label *obj, *obj2;
71646+
71647+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71648+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71649+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71650+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71651+ task->role->root_label);
71652+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71653+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71654+ return 1;
71655+ }
71656+ }
71657+ return 0;
71658+}
71659+
71660+int
71661+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71662+{
71663+ __u32 mode;
71664+
71665+ if (unlikely(!file || !(prot & PROT_EXEC)))
71666+ return 1;
71667+
71668+ if (is_writable_mmap(file))
71669+ return 0;
71670+
71671+ mode =
71672+ gr_search_file(file->f_path.dentry,
71673+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71674+ file->f_path.mnt);
71675+
71676+ if (!gr_tpe_allow(file))
71677+ return 0;
71678+
71679+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71680+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71681+ return 0;
71682+ } else if (unlikely(!(mode & GR_EXEC))) {
71683+ return 0;
71684+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71685+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71686+ return 1;
71687+ }
71688+
71689+ return 1;
71690+}
71691+
71692+int
71693+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
71694+{
71695+ __u32 mode;
71696+
71697+ if (unlikely(!file || !(prot & PROT_EXEC)))
71698+ return 1;
71699+
71700+ if (is_writable_mmap(file))
71701+ return 0;
71702+
71703+ mode =
71704+ gr_search_file(file->f_path.dentry,
71705+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71706+ file->f_path.mnt);
71707+
71708+ if (!gr_tpe_allow(file))
71709+ return 0;
71710+
71711+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71712+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71713+ return 0;
71714+ } else if (unlikely(!(mode & GR_EXEC))) {
71715+ return 0;
71716+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71717+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71718+ return 1;
71719+ }
71720+
71721+ return 1;
71722+}
71723+
71724+void
71725+gr_acl_handle_psacct(struct task_struct *task, const long code)
71726+{
71727+ unsigned long runtime, cputime;
71728+ cputime_t utime, stime;
71729+ unsigned int wday, cday;
71730+ __u8 whr, chr;
71731+ __u8 wmin, cmin;
71732+ __u8 wsec, csec;
71733+ struct timespec curtime, starttime;
71734+
71735+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
71736+ !(task->acl->mode & GR_PROCACCT)))
71737+ return;
71738+
71739+ curtime = ns_to_timespec(ktime_get_ns());
71740+ starttime = ns_to_timespec(task->start_time);
71741+ runtime = curtime.tv_sec - starttime.tv_sec;
71742+ wday = runtime / (60 * 60 * 24);
71743+ runtime -= wday * (60 * 60 * 24);
71744+ whr = runtime / (60 * 60);
71745+ runtime -= whr * (60 * 60);
71746+ wmin = runtime / 60;
71747+ runtime -= wmin * 60;
71748+ wsec = runtime;
71749+
71750+ task_cputime(task, &utime, &stime);
71751+ cputime = cputime_to_secs(utime + stime);
71752+ cday = cputime / (60 * 60 * 24);
71753+ cputime -= cday * (60 * 60 * 24);
71754+ chr = cputime / (60 * 60);
71755+ cputime -= chr * (60 * 60);
71756+ cmin = cputime / 60;
71757+ cputime -= cmin * 60;
71758+ csec = cputime;
71759+
71760+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
71761+
71762+ return;
71763+}
71764+
71765+#ifdef CONFIG_TASKSTATS
71766+int gr_is_taskstats_denied(int pid)
71767+{
71768+ struct task_struct *task;
71769+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71770+ const struct cred *cred;
71771+#endif
71772+ int ret = 0;
71773+
71774+ /* restrict taskstats viewing to un-chrooted root users
71775+ who have the 'view' subject flag if the RBAC system is enabled
71776+ */
71777+
71778+ rcu_read_lock();
71779+ read_lock(&tasklist_lock);
71780+ task = find_task_by_vpid(pid);
71781+ if (task) {
71782+#ifdef CONFIG_GRKERNSEC_CHROOT
71783+ if (proc_is_chrooted(task))
71784+ ret = -EACCES;
71785+#endif
71786+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71787+ cred = __task_cred(task);
71788+#ifdef CONFIG_GRKERNSEC_PROC_USER
71789+ if (gr_is_global_nonroot(cred->uid))
71790+ ret = -EACCES;
71791+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71792+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
71793+ ret = -EACCES;
71794+#endif
71795+#endif
71796+ if (gr_status & GR_READY) {
71797+ if (!(task->acl->mode & GR_VIEW))
71798+ ret = -EACCES;
71799+ }
71800+ } else
71801+ ret = -ENOENT;
71802+
71803+ read_unlock(&tasklist_lock);
71804+ rcu_read_unlock();
71805+
71806+ return ret;
71807+}
71808+#endif
71809+
71810+/* AUXV entries are filled via a descendant of search_binary_handler
71811+ after we've already applied the subject for the target
71812+*/
71813+int gr_acl_enable_at_secure(void)
71814+{
71815+ if (unlikely(!(gr_status & GR_READY)))
71816+ return 0;
71817+
71818+ if (current->acl->mode & GR_ATSECURE)
71819+ return 1;
71820+
71821+ return 0;
71822+}
71823+
71824+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
71825+{
71826+ struct task_struct *task = current;
71827+ struct dentry *dentry = file->f_path.dentry;
71828+ struct vfsmount *mnt = file->f_path.mnt;
71829+ struct acl_object_label *obj, *tmp;
71830+ struct acl_subject_label *subj;
71831+ unsigned int bufsize;
71832+ int is_not_root;
71833+ char *path;
71834+ dev_t dev = __get_dev(dentry);
71835+
71836+ if (unlikely(!(gr_status & GR_READY)))
71837+ return 1;
71838+
71839+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71840+ return 1;
71841+
71842+ /* ignore Eric Biederman */
71843+ if (IS_PRIVATE(dentry->d_inode))
71844+ return 1;
71845+
71846+ subj = task->acl;
71847+ read_lock(&gr_inode_lock);
71848+ do {
71849+ obj = lookup_acl_obj_label(ino, dev, subj);
71850+ if (obj != NULL) {
71851+ read_unlock(&gr_inode_lock);
71852+ return (obj->mode & GR_FIND) ? 1 : 0;
71853+ }
71854+ } while ((subj = subj->parent_subject));
71855+ read_unlock(&gr_inode_lock);
71856+
71857+ /* this is purely an optimization since we're looking for an object
71858+ for the directory we're doing a readdir on
71859+ if it's possible for any globbed object to match the entry we're
71860+ filling into the directory, then the object we find here will be
71861+ an anchor point with attached globbed objects
71862+ */
71863+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
71864+ if (obj->globbed == NULL)
71865+ return (obj->mode & GR_FIND) ? 1 : 0;
71866+
71867+ is_not_root = ((obj->filename[0] == '/') &&
71868+ (obj->filename[1] == '\0')) ? 0 : 1;
71869+ bufsize = PAGE_SIZE - namelen - is_not_root;
71870+
71871+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
71872+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
71873+ return 1;
71874+
71875+ preempt_disable();
71876+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71877+ bufsize);
71878+
71879+ bufsize = strlen(path);
71880+
71881+ /* if base is "/", don't append an additional slash */
71882+ if (is_not_root)
71883+ *(path + bufsize) = '/';
71884+ memcpy(path + bufsize + is_not_root, name, namelen);
71885+ *(path + bufsize + namelen + is_not_root) = '\0';
71886+
71887+ tmp = obj->globbed;
71888+ while (tmp) {
71889+ if (!glob_match(tmp->filename, path)) {
71890+ preempt_enable();
71891+ return (tmp->mode & GR_FIND) ? 1 : 0;
71892+ }
71893+ tmp = tmp->next;
71894+ }
71895+ preempt_enable();
71896+ return (obj->mode & GR_FIND) ? 1 : 0;
71897+}
71898+
71899+void gr_put_exec_file(struct task_struct *task)
71900+{
71901+ struct file *filp;
71902+
71903+ write_lock(&grsec_exec_file_lock);
71904+ filp = task->exec_file;
71905+ task->exec_file = NULL;
71906+ write_unlock(&grsec_exec_file_lock);
71907+
71908+ if (filp)
71909+ fput(filp);
71910+
71911+ return;
71912+}
71913+
71914+
71915+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
71916+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
71917+#endif
71918+#ifdef CONFIG_SECURITY
71919+EXPORT_SYMBOL_GPL(gr_check_user_change);
71920+EXPORT_SYMBOL_GPL(gr_check_group_change);
71921+#endif
71922+
71923diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
71924new file mode 100644
71925index 0000000..18ffbbd
71926--- /dev/null
71927+++ b/grsecurity/gracl_alloc.c
71928@@ -0,0 +1,105 @@
71929+#include <linux/kernel.h>
71930+#include <linux/mm.h>
71931+#include <linux/slab.h>
71932+#include <linux/vmalloc.h>
71933+#include <linux/gracl.h>
71934+#include <linux/grsecurity.h>
71935+
71936+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
71937+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
71938+
71939+static __inline__ int
71940+alloc_pop(void)
71941+{
71942+ if (current_alloc_state->alloc_stack_next == 1)
71943+ return 0;
71944+
71945+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
71946+
71947+ current_alloc_state->alloc_stack_next--;
71948+
71949+ return 1;
71950+}
71951+
71952+static __inline__ int
71953+alloc_push(void *buf)
71954+{
71955+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
71956+ return 1;
71957+
71958+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
71959+
71960+ current_alloc_state->alloc_stack_next++;
71961+
71962+ return 0;
71963+}
71964+
71965+void *
71966+acl_alloc(unsigned long len)
71967+{
71968+ void *ret = NULL;
71969+
71970+ if (!len || len > PAGE_SIZE)
71971+ goto out;
71972+
71973+ ret = kmalloc(len, GFP_KERNEL);
71974+
71975+ if (ret) {
71976+ if (alloc_push(ret)) {
71977+ kfree(ret);
71978+ ret = NULL;
71979+ }
71980+ }
71981+
71982+out:
71983+ return ret;
71984+}
71985+
71986+void *
71987+acl_alloc_num(unsigned long num, unsigned long len)
71988+{
71989+ if (!len || (num > (PAGE_SIZE / len)))
71990+ return NULL;
71991+
71992+ return acl_alloc(num * len);
71993+}
71994+
71995+void
71996+acl_free_all(void)
71997+{
71998+ if (!current_alloc_state->alloc_stack)
71999+ return;
72000+
72001+ while (alloc_pop()) ;
72002+
72003+ if (current_alloc_state->alloc_stack) {
72004+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72005+ kfree(current_alloc_state->alloc_stack);
72006+ else
72007+ vfree(current_alloc_state->alloc_stack);
72008+ }
72009+
72010+ current_alloc_state->alloc_stack = NULL;
72011+ current_alloc_state->alloc_stack_size = 1;
72012+ current_alloc_state->alloc_stack_next = 1;
72013+
72014+ return;
72015+}
72016+
72017+int
72018+acl_alloc_stack_init(unsigned long size)
72019+{
72020+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72021+ current_alloc_state->alloc_stack =
72022+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72023+ else
72024+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72025+
72026+ current_alloc_state->alloc_stack_size = size;
72027+ current_alloc_state->alloc_stack_next = 1;
72028+
72029+ if (!current_alloc_state->alloc_stack)
72030+ return 0;
72031+ else
72032+ return 1;
72033+}
72034diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72035new file mode 100644
72036index 0000000..1a94c11
72037--- /dev/null
72038+++ b/grsecurity/gracl_cap.c
72039@@ -0,0 +1,127 @@
72040+#include <linux/kernel.h>
72041+#include <linux/module.h>
72042+#include <linux/sched.h>
72043+#include <linux/gracl.h>
72044+#include <linux/grsecurity.h>
72045+#include <linux/grinternal.h>
72046+
72047+extern const char *captab_log[];
72048+extern int captab_log_entries;
72049+
72050+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72051+{
72052+ struct acl_subject_label *curracl;
72053+
72054+ if (!gr_acl_is_enabled())
72055+ return 1;
72056+
72057+ curracl = task->acl;
72058+
72059+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72060+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72061+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72062+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72063+ gr_to_filename(task->exec_file->f_path.dentry,
72064+ task->exec_file->f_path.mnt) : curracl->filename,
72065+ curracl->filename, 0UL,
72066+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72067+ return 1;
72068+ }
72069+
72070+ return 0;
72071+}
72072+
72073+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72074+{
72075+ struct acl_subject_label *curracl;
72076+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72077+ kernel_cap_t cap_audit = __cap_empty_set;
72078+
72079+ if (!gr_acl_is_enabled())
72080+ return 1;
72081+
72082+ curracl = task->acl;
72083+
72084+ cap_drop = curracl->cap_lower;
72085+ cap_mask = curracl->cap_mask;
72086+ cap_audit = curracl->cap_invert_audit;
72087+
72088+ while ((curracl = curracl->parent_subject)) {
72089+ /* if the cap isn't specified in the current computed mask but is specified in the
72090+ current level subject, and is lowered in the current level subject, then add
72091+ it to the set of dropped capabilities
72092+ otherwise, add the current level subject's mask to the current computed mask
72093+ */
72094+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72095+ cap_raise(cap_mask, cap);
72096+ if (cap_raised(curracl->cap_lower, cap))
72097+ cap_raise(cap_drop, cap);
72098+ if (cap_raised(curracl->cap_invert_audit, cap))
72099+ cap_raise(cap_audit, cap);
72100+ }
72101+ }
72102+
72103+ if (!cap_raised(cap_drop, cap)) {
72104+ if (cap_raised(cap_audit, cap))
72105+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72106+ return 1;
72107+ }
72108+
72109+ /* only learn the capability use if the process has the capability in the
72110+ general case, the two uses in sys.c of gr_learn_cap are an exception
72111+ to this rule to ensure any role transition involves what the full-learned
72112+ policy believes in a privileged process
72113+ */
72114+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72115+ return 1;
72116+
72117+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72118+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72119+
72120+ return 0;
72121+}
72122+
72123+int
72124+gr_acl_is_capable(const int cap)
72125+{
72126+ return gr_task_acl_is_capable(current, current_cred(), cap);
72127+}
72128+
72129+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72130+{
72131+ struct acl_subject_label *curracl;
72132+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72133+
72134+ if (!gr_acl_is_enabled())
72135+ return 1;
72136+
72137+ curracl = task->acl;
72138+
72139+ cap_drop = curracl->cap_lower;
72140+ cap_mask = curracl->cap_mask;
72141+
72142+ while ((curracl = curracl->parent_subject)) {
72143+ /* if the cap isn't specified in the current computed mask but is specified in the
72144+ current level subject, and is lowered in the current level subject, then add
72145+ it to the set of dropped capabilities
72146+ otherwise, add the current level subject's mask to the current computed mask
72147+ */
72148+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72149+ cap_raise(cap_mask, cap);
72150+ if (cap_raised(curracl->cap_lower, cap))
72151+ cap_raise(cap_drop, cap);
72152+ }
72153+ }
72154+
72155+ if (!cap_raised(cap_drop, cap))
72156+ return 1;
72157+
72158+ return 0;
72159+}
72160+
72161+int
72162+gr_acl_is_capable_nolog(const int cap)
72163+{
72164+ return gr_task_acl_is_capable_nolog(current, cap);
72165+}
72166+
72167diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72168new file mode 100644
72169index 0000000..a43dd06
72170--- /dev/null
72171+++ b/grsecurity/gracl_compat.c
72172@@ -0,0 +1,269 @@
72173+#include <linux/kernel.h>
72174+#include <linux/gracl.h>
72175+#include <linux/compat.h>
72176+#include <linux/gracl_compat.h>
72177+
72178+#include <asm/uaccess.h>
72179+
72180+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72181+{
72182+ struct gr_arg_wrapper_compat uwrapcompat;
72183+
72184+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72185+ return -EFAULT;
72186+
72187+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72188+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72189+ return -EINVAL;
72190+
72191+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72192+ uwrap->version = uwrapcompat.version;
72193+ uwrap->size = sizeof(struct gr_arg);
72194+
72195+ return 0;
72196+}
72197+
72198+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72199+{
72200+ struct gr_arg_compat argcompat;
72201+
72202+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72203+ return -EFAULT;
72204+
72205+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72206+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72207+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72208+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72209+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72210+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72211+
72212+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72213+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72214+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72215+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72216+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72217+ arg->segv_device = argcompat.segv_device;
72218+ arg->segv_inode = argcompat.segv_inode;
72219+ arg->segv_uid = argcompat.segv_uid;
72220+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72221+ arg->mode = argcompat.mode;
72222+
72223+ return 0;
72224+}
72225+
72226+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72227+{
72228+ struct acl_object_label_compat objcompat;
72229+
72230+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72231+ return -EFAULT;
72232+
72233+ obj->filename = compat_ptr(objcompat.filename);
72234+ obj->inode = objcompat.inode;
72235+ obj->device = objcompat.device;
72236+ obj->mode = objcompat.mode;
72237+
72238+ obj->nested = compat_ptr(objcompat.nested);
72239+ obj->globbed = compat_ptr(objcompat.globbed);
72240+
72241+ obj->prev = compat_ptr(objcompat.prev);
72242+ obj->next = compat_ptr(objcompat.next);
72243+
72244+ return 0;
72245+}
72246+
72247+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72248+{
72249+ unsigned int i;
72250+ struct acl_subject_label_compat subjcompat;
72251+
72252+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72253+ return -EFAULT;
72254+
72255+ subj->filename = compat_ptr(subjcompat.filename);
72256+ subj->inode = subjcompat.inode;
72257+ subj->device = subjcompat.device;
72258+ subj->mode = subjcompat.mode;
72259+ subj->cap_mask = subjcompat.cap_mask;
72260+ subj->cap_lower = subjcompat.cap_lower;
72261+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72262+
72263+ for (i = 0; i < GR_NLIMITS; i++) {
72264+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72265+ subj->res[i].rlim_cur = RLIM_INFINITY;
72266+ else
72267+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72268+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72269+ subj->res[i].rlim_max = RLIM_INFINITY;
72270+ else
72271+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72272+ }
72273+ subj->resmask = subjcompat.resmask;
72274+
72275+ subj->user_trans_type = subjcompat.user_trans_type;
72276+ subj->group_trans_type = subjcompat.group_trans_type;
72277+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72278+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72279+ subj->user_trans_num = subjcompat.user_trans_num;
72280+ subj->group_trans_num = subjcompat.group_trans_num;
72281+
72282+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72283+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72284+ subj->ip_type = subjcompat.ip_type;
72285+ subj->ips = compat_ptr(subjcompat.ips);
72286+ subj->ip_num = subjcompat.ip_num;
72287+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72288+
72289+ subj->crashes = subjcompat.crashes;
72290+ subj->expires = subjcompat.expires;
72291+
72292+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72293+ subj->hash = compat_ptr(subjcompat.hash);
72294+ subj->prev = compat_ptr(subjcompat.prev);
72295+ subj->next = compat_ptr(subjcompat.next);
72296+
72297+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72298+ subj->obj_hash_size = subjcompat.obj_hash_size;
72299+ subj->pax_flags = subjcompat.pax_flags;
72300+
72301+ return 0;
72302+}
72303+
72304+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72305+{
72306+ struct acl_role_label_compat rolecompat;
72307+
72308+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72309+ return -EFAULT;
72310+
72311+ role->rolename = compat_ptr(rolecompat.rolename);
72312+ role->uidgid = rolecompat.uidgid;
72313+ role->roletype = rolecompat.roletype;
72314+
72315+ role->auth_attempts = rolecompat.auth_attempts;
72316+ role->expires = rolecompat.expires;
72317+
72318+ role->root_label = compat_ptr(rolecompat.root_label);
72319+ role->hash = compat_ptr(rolecompat.hash);
72320+
72321+ role->prev = compat_ptr(rolecompat.prev);
72322+ role->next = compat_ptr(rolecompat.next);
72323+
72324+ role->transitions = compat_ptr(rolecompat.transitions);
72325+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72326+ role->domain_children = compat_ptr(rolecompat.domain_children);
72327+ role->domain_child_num = rolecompat.domain_child_num;
72328+
72329+ role->umask = rolecompat.umask;
72330+
72331+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72332+ role->subj_hash_size = rolecompat.subj_hash_size;
72333+
72334+ return 0;
72335+}
72336+
72337+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72338+{
72339+ struct role_allowed_ip_compat roleip_compat;
72340+
72341+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72342+ return -EFAULT;
72343+
72344+ roleip->addr = roleip_compat.addr;
72345+ roleip->netmask = roleip_compat.netmask;
72346+
72347+ roleip->prev = compat_ptr(roleip_compat.prev);
72348+ roleip->next = compat_ptr(roleip_compat.next);
72349+
72350+ return 0;
72351+}
72352+
72353+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72354+{
72355+ struct role_transition_compat trans_compat;
72356+
72357+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72358+ return -EFAULT;
72359+
72360+ trans->rolename = compat_ptr(trans_compat.rolename);
72361+
72362+ trans->prev = compat_ptr(trans_compat.prev);
72363+ trans->next = compat_ptr(trans_compat.next);
72364+
72365+ return 0;
72366+
72367+}
72368+
72369+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72370+{
72371+ struct gr_hash_struct_compat hash_compat;
72372+
72373+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72374+ return -EFAULT;
72375+
72376+ hash->table = compat_ptr(hash_compat.table);
72377+ hash->nametable = compat_ptr(hash_compat.nametable);
72378+ hash->first = compat_ptr(hash_compat.first);
72379+
72380+ hash->table_size = hash_compat.table_size;
72381+ hash->used_size = hash_compat.used_size;
72382+
72383+ hash->type = hash_compat.type;
72384+
72385+ return 0;
72386+}
72387+
72388+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72389+{
72390+ compat_uptr_t ptrcompat;
72391+
72392+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72393+ return -EFAULT;
72394+
72395+ *(void **)ptr = compat_ptr(ptrcompat);
72396+
72397+ return 0;
72398+}
72399+
72400+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72401+{
72402+ struct acl_ip_label_compat ip_compat;
72403+
72404+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72405+ return -EFAULT;
72406+
72407+ ip->iface = compat_ptr(ip_compat.iface);
72408+ ip->addr = ip_compat.addr;
72409+ ip->netmask = ip_compat.netmask;
72410+ ip->low = ip_compat.low;
72411+ ip->high = ip_compat.high;
72412+ ip->mode = ip_compat.mode;
72413+ ip->type = ip_compat.type;
72414+
72415+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72416+
72417+ ip->prev = compat_ptr(ip_compat.prev);
72418+ ip->next = compat_ptr(ip_compat.next);
72419+
72420+ return 0;
72421+}
72422+
72423+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72424+{
72425+ struct sprole_pw_compat pw_compat;
72426+
72427+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72428+ return -EFAULT;
72429+
72430+ pw->rolename = compat_ptr(pw_compat.rolename);
72431+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72432+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72433+
72434+ return 0;
72435+}
72436+
72437+size_t get_gr_arg_wrapper_size_compat(void)
72438+{
72439+ return sizeof(struct gr_arg_wrapper_compat);
72440+}
72441+
72442diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72443new file mode 100644
72444index 0000000..8ee8e4f
72445--- /dev/null
72446+++ b/grsecurity/gracl_fs.c
72447@@ -0,0 +1,447 @@
72448+#include <linux/kernel.h>
72449+#include <linux/sched.h>
72450+#include <linux/types.h>
72451+#include <linux/fs.h>
72452+#include <linux/file.h>
72453+#include <linux/stat.h>
72454+#include <linux/grsecurity.h>
72455+#include <linux/grinternal.h>
72456+#include <linux/gracl.h>
72457+
72458+umode_t
72459+gr_acl_umask(void)
72460+{
72461+ if (unlikely(!gr_acl_is_enabled()))
72462+ return 0;
72463+
72464+ return current->role->umask;
72465+}
72466+
72467+__u32
72468+gr_acl_handle_hidden_file(const struct dentry * dentry,
72469+ const struct vfsmount * mnt)
72470+{
72471+ __u32 mode;
72472+
72473+ if (unlikely(d_is_negative(dentry)))
72474+ return GR_FIND;
72475+
72476+ mode =
72477+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72478+
72479+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72480+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72481+ return mode;
72482+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72483+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72484+ return 0;
72485+ } else if (unlikely(!(mode & GR_FIND)))
72486+ return 0;
72487+
72488+ return GR_FIND;
72489+}
72490+
72491+__u32
72492+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72493+ int acc_mode)
72494+{
72495+ __u32 reqmode = GR_FIND;
72496+ __u32 mode;
72497+
72498+ if (unlikely(d_is_negative(dentry)))
72499+ return reqmode;
72500+
72501+ if (acc_mode & MAY_APPEND)
72502+ reqmode |= GR_APPEND;
72503+ else if (acc_mode & MAY_WRITE)
72504+ reqmode |= GR_WRITE;
72505+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72506+ reqmode |= GR_READ;
72507+
72508+ mode =
72509+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72510+ mnt);
72511+
72512+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72513+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72514+ reqmode & GR_READ ? " reading" : "",
72515+ reqmode & GR_WRITE ? " writing" : reqmode &
72516+ GR_APPEND ? " appending" : "");
72517+ return reqmode;
72518+ } else
72519+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72520+ {
72521+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72522+ reqmode & GR_READ ? " reading" : "",
72523+ reqmode & GR_WRITE ? " writing" : reqmode &
72524+ GR_APPEND ? " appending" : "");
72525+ return 0;
72526+ } else if (unlikely((mode & reqmode) != reqmode))
72527+ return 0;
72528+
72529+ return reqmode;
72530+}
72531+
72532+__u32
72533+gr_acl_handle_creat(const struct dentry * dentry,
72534+ const struct dentry * p_dentry,
72535+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72536+ const int imode)
72537+{
72538+ __u32 reqmode = GR_WRITE | GR_CREATE;
72539+ __u32 mode;
72540+
72541+ if (acc_mode & MAY_APPEND)
72542+ reqmode |= GR_APPEND;
72543+ // if a directory was required or the directory already exists, then
72544+ // don't count this open as a read
72545+ if ((acc_mode & MAY_READ) &&
72546+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72547+ reqmode |= GR_READ;
72548+ if ((open_flags & O_CREAT) &&
72549+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72550+ reqmode |= GR_SETID;
72551+
72552+ mode =
72553+ gr_check_create(dentry, p_dentry, p_mnt,
72554+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72555+
72556+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72557+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72558+ reqmode & GR_READ ? " reading" : "",
72559+ reqmode & GR_WRITE ? " writing" : reqmode &
72560+ GR_APPEND ? " appending" : "");
72561+ return reqmode;
72562+ } else
72563+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72564+ {
72565+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72566+ reqmode & GR_READ ? " reading" : "",
72567+ reqmode & GR_WRITE ? " writing" : reqmode &
72568+ GR_APPEND ? " appending" : "");
72569+ return 0;
72570+ } else if (unlikely((mode & reqmode) != reqmode))
72571+ return 0;
72572+
72573+ return reqmode;
72574+}
72575+
72576+__u32
72577+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72578+ const int fmode)
72579+{
72580+ __u32 mode, reqmode = GR_FIND;
72581+
72582+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72583+ reqmode |= GR_EXEC;
72584+ if (fmode & S_IWOTH)
72585+ reqmode |= GR_WRITE;
72586+ if (fmode & S_IROTH)
72587+ reqmode |= GR_READ;
72588+
72589+ mode =
72590+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72591+ mnt);
72592+
72593+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72594+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72595+ reqmode & GR_READ ? " reading" : "",
72596+ reqmode & GR_WRITE ? " writing" : "",
72597+ reqmode & GR_EXEC ? " executing" : "");
72598+ return reqmode;
72599+ } else
72600+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72601+ {
72602+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72603+ reqmode & GR_READ ? " reading" : "",
72604+ reqmode & GR_WRITE ? " writing" : "",
72605+ reqmode & GR_EXEC ? " executing" : "");
72606+ return 0;
72607+ } else if (unlikely((mode & reqmode) != reqmode))
72608+ return 0;
72609+
72610+ return reqmode;
72611+}
72612+
72613+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72614+{
72615+ __u32 mode;
72616+
72617+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72618+
72619+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72620+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72621+ return mode;
72622+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72623+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72624+ return 0;
72625+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72626+ return 0;
72627+
72628+ return (reqmode);
72629+}
72630+
72631+__u32
72632+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72633+{
72634+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72635+}
72636+
72637+__u32
72638+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72639+{
72640+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72641+}
72642+
72643+__u32
72644+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72645+{
72646+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72647+}
72648+
72649+__u32
72650+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72651+{
72652+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72653+}
72654+
72655+__u32
72656+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72657+ umode_t *modeptr)
72658+{
72659+ umode_t mode;
72660+
72661+ *modeptr &= ~gr_acl_umask();
72662+ mode = *modeptr;
72663+
72664+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72665+ return 1;
72666+
72667+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72668+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72669+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72670+ GR_CHMOD_ACL_MSG);
72671+ } else {
72672+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72673+ }
72674+}
72675+
72676+__u32
72677+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72678+{
72679+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72680+}
72681+
72682+__u32
72683+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72684+{
72685+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72686+}
72687+
72688+__u32
72689+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
72690+{
72691+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
72692+}
72693+
72694+__u32
72695+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
72696+{
72697+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
72698+}
72699+
72700+__u32
72701+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
72702+{
72703+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
72704+ GR_UNIXCONNECT_ACL_MSG);
72705+}
72706+
72707+/* hardlinks require at minimum create and link permission,
72708+ any additional privilege required is based on the
72709+ privilege of the file being linked to
72710+*/
72711+__u32
72712+gr_acl_handle_link(const struct dentry * new_dentry,
72713+ const struct dentry * parent_dentry,
72714+ const struct vfsmount * parent_mnt,
72715+ const struct dentry * old_dentry,
72716+ const struct vfsmount * old_mnt, const struct filename *to)
72717+{
72718+ __u32 mode;
72719+ __u32 needmode = GR_CREATE | GR_LINK;
72720+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
72721+
72722+ mode =
72723+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
72724+ old_mnt);
72725+
72726+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
72727+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72728+ return mode;
72729+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72730+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72731+ return 0;
72732+ } else if (unlikely((mode & needmode) != needmode))
72733+ return 0;
72734+
72735+ return 1;
72736+}
72737+
72738+__u32
72739+gr_acl_handle_symlink(const struct dentry * new_dentry,
72740+ const struct dentry * parent_dentry,
72741+ const struct vfsmount * parent_mnt, const struct filename *from)
72742+{
72743+ __u32 needmode = GR_WRITE | GR_CREATE;
72744+ __u32 mode;
72745+
72746+ mode =
72747+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
72748+ GR_CREATE | GR_AUDIT_CREATE |
72749+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
72750+
72751+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
72752+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72753+ return mode;
72754+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72755+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72756+ return 0;
72757+ } else if (unlikely((mode & needmode) != needmode))
72758+ return 0;
72759+
72760+ return (GR_WRITE | GR_CREATE);
72761+}
72762+
72763+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
72764+{
72765+ __u32 mode;
72766+
72767+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72768+
72769+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72770+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
72771+ return mode;
72772+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72773+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
72774+ return 0;
72775+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72776+ return 0;
72777+
72778+ return (reqmode);
72779+}
72780+
72781+__u32
72782+gr_acl_handle_mknod(const struct dentry * new_dentry,
72783+ const struct dentry * parent_dentry,
72784+ const struct vfsmount * parent_mnt,
72785+ const int mode)
72786+{
72787+ __u32 reqmode = GR_WRITE | GR_CREATE;
72788+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72789+ reqmode |= GR_SETID;
72790+
72791+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72792+ reqmode, GR_MKNOD_ACL_MSG);
72793+}
72794+
72795+__u32
72796+gr_acl_handle_mkdir(const struct dentry *new_dentry,
72797+ const struct dentry *parent_dentry,
72798+ const struct vfsmount *parent_mnt)
72799+{
72800+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72801+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
72802+}
72803+
72804+#define RENAME_CHECK_SUCCESS(old, new) \
72805+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
72806+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
72807+
72808+int
72809+gr_acl_handle_rename(struct dentry *new_dentry,
72810+ struct dentry *parent_dentry,
72811+ const struct vfsmount *parent_mnt,
72812+ struct dentry *old_dentry,
72813+ struct inode *old_parent_inode,
72814+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
72815+{
72816+ __u32 comp1, comp2;
72817+ int error = 0;
72818+
72819+ if (unlikely(!gr_acl_is_enabled()))
72820+ return 0;
72821+
72822+ if (flags & RENAME_EXCHANGE) {
72823+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72824+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72825+ GR_SUPPRESS, parent_mnt);
72826+ comp2 =
72827+ gr_search_file(old_dentry,
72828+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72829+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72830+ } else if (d_is_negative(new_dentry)) {
72831+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
72832+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
72833+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
72834+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
72835+ GR_DELETE | GR_AUDIT_DELETE |
72836+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72837+ GR_SUPPRESS, old_mnt);
72838+ } else {
72839+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72840+ GR_CREATE | GR_DELETE |
72841+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
72842+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72843+ GR_SUPPRESS, parent_mnt);
72844+ comp2 =
72845+ gr_search_file(old_dentry,
72846+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72847+ GR_DELETE | GR_AUDIT_DELETE |
72848+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72849+ }
72850+
72851+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
72852+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
72853+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72854+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
72855+ && !(comp2 & GR_SUPPRESS)) {
72856+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72857+ error = -EACCES;
72858+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
72859+ error = -EACCES;
72860+
72861+ return error;
72862+}
72863+
72864+void
72865+gr_acl_handle_exit(void)
72866+{
72867+ u16 id;
72868+ char *rolename;
72869+
72870+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
72871+ !(current->role->roletype & GR_ROLE_PERSIST))) {
72872+ id = current->acl_role_id;
72873+ rolename = current->role->rolename;
72874+ gr_set_acls(1);
72875+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
72876+ }
72877+
72878+ gr_put_exec_file(current);
72879+ return;
72880+}
72881+
72882+int
72883+gr_acl_handle_procpidmem(const struct task_struct *task)
72884+{
72885+ if (unlikely(!gr_acl_is_enabled()))
72886+ return 0;
72887+
72888+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
72889+ !(current->acl->mode & GR_POVERRIDE) &&
72890+ !(current->role->roletype & GR_ROLE_GOD))
72891+ return -EACCES;
72892+
72893+ return 0;
72894+}
72895diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
72896new file mode 100644
72897index 0000000..f056b81
72898--- /dev/null
72899+++ b/grsecurity/gracl_ip.c
72900@@ -0,0 +1,386 @@
72901+#include <linux/kernel.h>
72902+#include <asm/uaccess.h>
72903+#include <asm/errno.h>
72904+#include <net/sock.h>
72905+#include <linux/file.h>
72906+#include <linux/fs.h>
72907+#include <linux/net.h>
72908+#include <linux/in.h>
72909+#include <linux/skbuff.h>
72910+#include <linux/ip.h>
72911+#include <linux/udp.h>
72912+#include <linux/types.h>
72913+#include <linux/sched.h>
72914+#include <linux/netdevice.h>
72915+#include <linux/inetdevice.h>
72916+#include <linux/gracl.h>
72917+#include <linux/grsecurity.h>
72918+#include <linux/grinternal.h>
72919+
72920+#define GR_BIND 0x01
72921+#define GR_CONNECT 0x02
72922+#define GR_INVERT 0x04
72923+#define GR_BINDOVERRIDE 0x08
72924+#define GR_CONNECTOVERRIDE 0x10
72925+#define GR_SOCK_FAMILY 0x20
72926+
72927+static const char * gr_protocols[IPPROTO_MAX] = {
72928+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
72929+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
72930+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
72931+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
72932+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
72933+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
72934+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
72935+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
72936+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
72937+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
72938+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
72939+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
72940+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
72941+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
72942+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
72943+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
72944+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
72945+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
72946+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
72947+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
72948+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
72949+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
72950+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
72951+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
72952+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
72953+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
72954+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
72955+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
72956+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
72957+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
72958+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
72959+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
72960+ };
72961+
72962+static const char * gr_socktypes[SOCK_MAX] = {
72963+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
72964+ "unknown:7", "unknown:8", "unknown:9", "packet"
72965+ };
72966+
72967+static const char * gr_sockfamilies[AF_MAX+1] = {
72968+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
72969+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
72970+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
72971+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
72972+ };
72973+
72974+const char *
72975+gr_proto_to_name(unsigned char proto)
72976+{
72977+ return gr_protocols[proto];
72978+}
72979+
72980+const char *
72981+gr_socktype_to_name(unsigned char type)
72982+{
72983+ return gr_socktypes[type];
72984+}
72985+
72986+const char *
72987+gr_sockfamily_to_name(unsigned char family)
72988+{
72989+ return gr_sockfamilies[family];
72990+}
72991+
72992+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
72993+
72994+int
72995+gr_search_socket(const int domain, const int type, const int protocol)
72996+{
72997+ struct acl_subject_label *curr;
72998+ const struct cred *cred = current_cred();
72999+
73000+ if (unlikely(!gr_acl_is_enabled()))
73001+ goto exit;
73002+
73003+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73004+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73005+ goto exit; // let the kernel handle it
73006+
73007+ curr = current->acl;
73008+
73009+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73010+ /* the family is allowed, if this is PF_INET allow it only if
73011+ the extra sock type/protocol checks pass */
73012+ if (domain == PF_INET)
73013+ goto inet_check;
73014+ goto exit;
73015+ } else {
73016+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73017+ __u32 fakeip = 0;
73018+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73019+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73020+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73021+ gr_to_filename(current->exec_file->f_path.dentry,
73022+ current->exec_file->f_path.mnt) :
73023+ curr->filename, curr->filename,
73024+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73025+ &current->signal->saved_ip);
73026+ goto exit;
73027+ }
73028+ goto exit_fail;
73029+ }
73030+
73031+inet_check:
73032+ /* the rest of this checking is for IPv4 only */
73033+ if (!curr->ips)
73034+ goto exit;
73035+
73036+ if ((curr->ip_type & (1U << type)) &&
73037+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73038+ goto exit;
73039+
73040+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73041+ /* we don't place acls on raw sockets , and sometimes
73042+ dgram/ip sockets are opened for ioctl and not
73043+ bind/connect, so we'll fake a bind learn log */
73044+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73045+ __u32 fakeip = 0;
73046+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73047+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73048+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73049+ gr_to_filename(current->exec_file->f_path.dentry,
73050+ current->exec_file->f_path.mnt) :
73051+ curr->filename, curr->filename,
73052+ &fakeip, 0, type,
73053+ protocol, GR_CONNECT, &current->signal->saved_ip);
73054+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73055+ __u32 fakeip = 0;
73056+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73057+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73058+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73059+ gr_to_filename(current->exec_file->f_path.dentry,
73060+ current->exec_file->f_path.mnt) :
73061+ curr->filename, curr->filename,
73062+ &fakeip, 0, type,
73063+ protocol, GR_BIND, &current->signal->saved_ip);
73064+ }
73065+ /* we'll log when they use connect or bind */
73066+ goto exit;
73067+ }
73068+
73069+exit_fail:
73070+ if (domain == PF_INET)
73071+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73072+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73073+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73074+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73075+ gr_socktype_to_name(type), protocol);
73076+
73077+ return 0;
73078+exit:
73079+ return 1;
73080+}
73081+
73082+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73083+{
73084+ if ((ip->mode & mode) &&
73085+ (ip_port >= ip->low) &&
73086+ (ip_port <= ip->high) &&
73087+ ((ntohl(ip_addr) & our_netmask) ==
73088+ (ntohl(our_addr) & our_netmask))
73089+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73090+ && (ip->type & (1U << type))) {
73091+ if (ip->mode & GR_INVERT)
73092+ return 2; // specifically denied
73093+ else
73094+ return 1; // allowed
73095+ }
73096+
73097+ return 0; // not specifically allowed, may continue parsing
73098+}
73099+
73100+static int
73101+gr_search_connectbind(const int full_mode, struct sock *sk,
73102+ struct sockaddr_in *addr, const int type)
73103+{
73104+ char iface[IFNAMSIZ] = {0};
73105+ struct acl_subject_label *curr;
73106+ struct acl_ip_label *ip;
73107+ struct inet_sock *isk;
73108+ struct net_device *dev;
73109+ struct in_device *idev;
73110+ unsigned long i;
73111+ int ret;
73112+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73113+ __u32 ip_addr = 0;
73114+ __u32 our_addr;
73115+ __u32 our_netmask;
73116+ char *p;
73117+ __u16 ip_port = 0;
73118+ const struct cred *cred = current_cred();
73119+
73120+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73121+ return 0;
73122+
73123+ curr = current->acl;
73124+ isk = inet_sk(sk);
73125+
73126+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73127+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73128+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73129+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73130+ struct sockaddr_in saddr;
73131+ int err;
73132+
73133+ saddr.sin_family = AF_INET;
73134+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73135+ saddr.sin_port = isk->inet_sport;
73136+
73137+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73138+ if (err)
73139+ return err;
73140+
73141+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73142+ if (err)
73143+ return err;
73144+ }
73145+
73146+ if (!curr->ips)
73147+ return 0;
73148+
73149+ ip_addr = addr->sin_addr.s_addr;
73150+ ip_port = ntohs(addr->sin_port);
73151+
73152+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73153+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73154+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73155+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73156+ gr_to_filename(current->exec_file->f_path.dentry,
73157+ current->exec_file->f_path.mnt) :
73158+ curr->filename, curr->filename,
73159+ &ip_addr, ip_port, type,
73160+ sk->sk_protocol, mode, &current->signal->saved_ip);
73161+ return 0;
73162+ }
73163+
73164+ for (i = 0; i < curr->ip_num; i++) {
73165+ ip = *(curr->ips + i);
73166+ if (ip->iface != NULL) {
73167+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73168+ p = strchr(iface, ':');
73169+ if (p != NULL)
73170+ *p = '\0';
73171+ dev = dev_get_by_name(sock_net(sk), iface);
73172+ if (dev == NULL)
73173+ continue;
73174+ idev = in_dev_get(dev);
73175+ if (idev == NULL) {
73176+ dev_put(dev);
73177+ continue;
73178+ }
73179+ rcu_read_lock();
73180+ for_ifa(idev) {
73181+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73182+ our_addr = ifa->ifa_address;
73183+ our_netmask = 0xffffffff;
73184+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73185+ if (ret == 1) {
73186+ rcu_read_unlock();
73187+ in_dev_put(idev);
73188+ dev_put(dev);
73189+ return 0;
73190+ } else if (ret == 2) {
73191+ rcu_read_unlock();
73192+ in_dev_put(idev);
73193+ dev_put(dev);
73194+ goto denied;
73195+ }
73196+ }
73197+ } endfor_ifa(idev);
73198+ rcu_read_unlock();
73199+ in_dev_put(idev);
73200+ dev_put(dev);
73201+ } else {
73202+ our_addr = ip->addr;
73203+ our_netmask = ip->netmask;
73204+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73205+ if (ret == 1)
73206+ return 0;
73207+ else if (ret == 2)
73208+ goto denied;
73209+ }
73210+ }
73211+
73212+denied:
73213+ if (mode == GR_BIND)
73214+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73215+ else if (mode == GR_CONNECT)
73216+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73217+
73218+ return -EACCES;
73219+}
73220+
73221+int
73222+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73223+{
73224+ /* always allow disconnection of dgram sockets with connect */
73225+ if (addr->sin_family == AF_UNSPEC)
73226+ return 0;
73227+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73228+}
73229+
73230+int
73231+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73232+{
73233+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73234+}
73235+
73236+int gr_search_listen(struct socket *sock)
73237+{
73238+ struct sock *sk = sock->sk;
73239+ struct sockaddr_in addr;
73240+
73241+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73242+ addr.sin_port = inet_sk(sk)->inet_sport;
73243+
73244+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73245+}
73246+
73247+int gr_search_accept(struct socket *sock)
73248+{
73249+ struct sock *sk = sock->sk;
73250+ struct sockaddr_in addr;
73251+
73252+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73253+ addr.sin_port = inet_sk(sk)->inet_sport;
73254+
73255+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73256+}
73257+
73258+int
73259+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73260+{
73261+ if (addr)
73262+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73263+ else {
73264+ struct sockaddr_in sin;
73265+ const struct inet_sock *inet = inet_sk(sk);
73266+
73267+ sin.sin_addr.s_addr = inet->inet_daddr;
73268+ sin.sin_port = inet->inet_dport;
73269+
73270+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73271+ }
73272+}
73273+
73274+int
73275+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73276+{
73277+ struct sockaddr_in sin;
73278+
73279+ if (unlikely(skb->len < sizeof (struct udphdr)))
73280+ return 0; // skip this packet
73281+
73282+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73283+ sin.sin_port = udp_hdr(skb)->source;
73284+
73285+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73286+}
73287diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73288new file mode 100644
73289index 0000000..25f54ef
73290--- /dev/null
73291+++ b/grsecurity/gracl_learn.c
73292@@ -0,0 +1,207 @@
73293+#include <linux/kernel.h>
73294+#include <linux/mm.h>
73295+#include <linux/sched.h>
73296+#include <linux/poll.h>
73297+#include <linux/string.h>
73298+#include <linux/file.h>
73299+#include <linux/types.h>
73300+#include <linux/vmalloc.h>
73301+#include <linux/grinternal.h>
73302+
73303+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73304+ size_t count, loff_t *ppos);
73305+extern int gr_acl_is_enabled(void);
73306+
73307+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73308+static int gr_learn_attached;
73309+
73310+/* use a 512k buffer */
73311+#define LEARN_BUFFER_SIZE (512 * 1024)
73312+
73313+static DEFINE_SPINLOCK(gr_learn_lock);
73314+static DEFINE_MUTEX(gr_learn_user_mutex);
73315+
73316+/* we need to maintain two buffers, so that the kernel context of grlearn
73317+ uses a semaphore around the userspace copying, and the other kernel contexts
73318+ use a spinlock when copying into the buffer, since they cannot sleep
73319+*/
73320+static char *learn_buffer;
73321+static char *learn_buffer_user;
73322+static int learn_buffer_len;
73323+static int learn_buffer_user_len;
73324+
73325+static ssize_t
73326+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73327+{
73328+ DECLARE_WAITQUEUE(wait, current);
73329+ ssize_t retval = 0;
73330+
73331+ add_wait_queue(&learn_wait, &wait);
73332+ set_current_state(TASK_INTERRUPTIBLE);
73333+ do {
73334+ mutex_lock(&gr_learn_user_mutex);
73335+ spin_lock(&gr_learn_lock);
73336+ if (learn_buffer_len)
73337+ break;
73338+ spin_unlock(&gr_learn_lock);
73339+ mutex_unlock(&gr_learn_user_mutex);
73340+ if (file->f_flags & O_NONBLOCK) {
73341+ retval = -EAGAIN;
73342+ goto out;
73343+ }
73344+ if (signal_pending(current)) {
73345+ retval = -ERESTARTSYS;
73346+ goto out;
73347+ }
73348+
73349+ schedule();
73350+ } while (1);
73351+
73352+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73353+ learn_buffer_user_len = learn_buffer_len;
73354+ retval = learn_buffer_len;
73355+ learn_buffer_len = 0;
73356+
73357+ spin_unlock(&gr_learn_lock);
73358+
73359+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73360+ retval = -EFAULT;
73361+
73362+ mutex_unlock(&gr_learn_user_mutex);
73363+out:
73364+ set_current_state(TASK_RUNNING);
73365+ remove_wait_queue(&learn_wait, &wait);
73366+ return retval;
73367+}
73368+
73369+static unsigned int
73370+poll_learn(struct file * file, poll_table * wait)
73371+{
73372+ poll_wait(file, &learn_wait, wait);
73373+
73374+ if (learn_buffer_len)
73375+ return (POLLIN | POLLRDNORM);
73376+
73377+ return 0;
73378+}
73379+
73380+void
73381+gr_clear_learn_entries(void)
73382+{
73383+ char *tmp;
73384+
73385+ mutex_lock(&gr_learn_user_mutex);
73386+ spin_lock(&gr_learn_lock);
73387+ tmp = learn_buffer;
73388+ learn_buffer = NULL;
73389+ spin_unlock(&gr_learn_lock);
73390+ if (tmp)
73391+ vfree(tmp);
73392+ if (learn_buffer_user != NULL) {
73393+ vfree(learn_buffer_user);
73394+ learn_buffer_user = NULL;
73395+ }
73396+ learn_buffer_len = 0;
73397+ mutex_unlock(&gr_learn_user_mutex);
73398+
73399+ return;
73400+}
73401+
73402+void
73403+gr_add_learn_entry(const char *fmt, ...)
73404+{
73405+ va_list args;
73406+ unsigned int len;
73407+
73408+ if (!gr_learn_attached)
73409+ return;
73410+
73411+ spin_lock(&gr_learn_lock);
73412+
73413+ /* leave a gap at the end so we know when it's "full" but don't have to
73414+ compute the exact length of the string we're trying to append
73415+ */
73416+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73417+ spin_unlock(&gr_learn_lock);
73418+ wake_up_interruptible(&learn_wait);
73419+ return;
73420+ }
73421+ if (learn_buffer == NULL) {
73422+ spin_unlock(&gr_learn_lock);
73423+ return;
73424+ }
73425+
73426+ va_start(args, fmt);
73427+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73428+ va_end(args);
73429+
73430+ learn_buffer_len += len + 1;
73431+
73432+ spin_unlock(&gr_learn_lock);
73433+ wake_up_interruptible(&learn_wait);
73434+
73435+ return;
73436+}
73437+
73438+static int
73439+open_learn(struct inode *inode, struct file *file)
73440+{
73441+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73442+ return -EBUSY;
73443+ if (file->f_mode & FMODE_READ) {
73444+ int retval = 0;
73445+ mutex_lock(&gr_learn_user_mutex);
73446+ if (learn_buffer == NULL)
73447+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73448+ if (learn_buffer_user == NULL)
73449+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73450+ if (learn_buffer == NULL) {
73451+ retval = -ENOMEM;
73452+ goto out_error;
73453+ }
73454+ if (learn_buffer_user == NULL) {
73455+ retval = -ENOMEM;
73456+ goto out_error;
73457+ }
73458+ learn_buffer_len = 0;
73459+ learn_buffer_user_len = 0;
73460+ gr_learn_attached = 1;
73461+out_error:
73462+ mutex_unlock(&gr_learn_user_mutex);
73463+ return retval;
73464+ }
73465+ return 0;
73466+}
73467+
73468+static int
73469+close_learn(struct inode *inode, struct file *file)
73470+{
73471+ if (file->f_mode & FMODE_READ) {
73472+ char *tmp = NULL;
73473+ mutex_lock(&gr_learn_user_mutex);
73474+ spin_lock(&gr_learn_lock);
73475+ tmp = learn_buffer;
73476+ learn_buffer = NULL;
73477+ spin_unlock(&gr_learn_lock);
73478+ if (tmp)
73479+ vfree(tmp);
73480+ if (learn_buffer_user != NULL) {
73481+ vfree(learn_buffer_user);
73482+ learn_buffer_user = NULL;
73483+ }
73484+ learn_buffer_len = 0;
73485+ learn_buffer_user_len = 0;
73486+ gr_learn_attached = 0;
73487+ mutex_unlock(&gr_learn_user_mutex);
73488+ }
73489+
73490+ return 0;
73491+}
73492+
73493+const struct file_operations grsec_fops = {
73494+ .read = read_learn,
73495+ .write = write_grsec_handler,
73496+ .open = open_learn,
73497+ .release = close_learn,
73498+ .poll = poll_learn,
73499+};
73500diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73501new file mode 100644
73502index 0000000..fd26052
73503--- /dev/null
73504+++ b/grsecurity/gracl_policy.c
73505@@ -0,0 +1,1781 @@
73506+#include <linux/kernel.h>
73507+#include <linux/module.h>
73508+#include <linux/sched.h>
73509+#include <linux/mm.h>
73510+#include <linux/file.h>
73511+#include <linux/fs.h>
73512+#include <linux/namei.h>
73513+#include <linux/mount.h>
73514+#include <linux/tty.h>
73515+#include <linux/proc_fs.h>
73516+#include <linux/lglock.h>
73517+#include <linux/slab.h>
73518+#include <linux/vmalloc.h>
73519+#include <linux/types.h>
73520+#include <linux/sysctl.h>
73521+#include <linux/netdevice.h>
73522+#include <linux/ptrace.h>
73523+#include <linux/gracl.h>
73524+#include <linux/gralloc.h>
73525+#include <linux/security.h>
73526+#include <linux/grinternal.h>
73527+#include <linux/pid_namespace.h>
73528+#include <linux/stop_machine.h>
73529+#include <linux/fdtable.h>
73530+#include <linux/percpu.h>
73531+#include <linux/lglock.h>
73532+#include <linux/hugetlb.h>
73533+#include <linux/posix-timers.h>
73534+#include "../fs/mount.h"
73535+
73536+#include <asm/uaccess.h>
73537+#include <asm/errno.h>
73538+#include <asm/mman.h>
73539+
73540+extern struct gr_policy_state *polstate;
73541+
73542+#define FOR_EACH_ROLE_START(role) \
73543+ role = polstate->role_list; \
73544+ while (role) {
73545+
73546+#define FOR_EACH_ROLE_END(role) \
73547+ role = role->prev; \
73548+ }
73549+
73550+struct path gr_real_root;
73551+
73552+extern struct gr_alloc_state *current_alloc_state;
73553+
73554+u16 acl_sp_role_value;
73555+
73556+static DEFINE_MUTEX(gr_dev_mutex);
73557+
73558+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73559+extern void gr_clear_learn_entries(void);
73560+
73561+struct gr_arg *gr_usermode __read_only;
73562+unsigned char *gr_system_salt __read_only;
73563+unsigned char *gr_system_sum __read_only;
73564+
73565+static unsigned int gr_auth_attempts = 0;
73566+static unsigned long gr_auth_expires = 0UL;
73567+
73568+struct acl_object_label *fakefs_obj_rw;
73569+struct acl_object_label *fakefs_obj_rwx;
73570+
73571+extern int gr_init_uidset(void);
73572+extern void gr_free_uidset(void);
73573+extern void gr_remove_uid(uid_t uid);
73574+extern int gr_find_uid(uid_t uid);
73575+
73576+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
73577+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73578+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73579+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73580+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73581+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73582+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73583+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73584+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73585+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73586+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73587+extern void assign_special_role(const char *rolename);
73588+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73589+extern int gr_rbac_disable(void *unused);
73590+extern void gr_enable_rbac_system(void);
73591+
73592+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73593+{
73594+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73595+ return -EFAULT;
73596+
73597+ return 0;
73598+}
73599+
73600+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73601+{
73602+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73603+ return -EFAULT;
73604+
73605+ return 0;
73606+}
73607+
73608+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73609+{
73610+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73611+ return -EFAULT;
73612+
73613+ return 0;
73614+}
73615+
73616+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73617+{
73618+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73619+ return -EFAULT;
73620+
73621+ return 0;
73622+}
73623+
73624+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73625+{
73626+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73627+ return -EFAULT;
73628+
73629+ return 0;
73630+}
73631+
73632+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73633+{
73634+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73635+ return -EFAULT;
73636+
73637+ return 0;
73638+}
73639+
73640+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73641+{
73642+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73643+ return -EFAULT;
73644+
73645+ return 0;
73646+}
73647+
73648+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73649+{
73650+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73651+ return -EFAULT;
73652+
73653+ return 0;
73654+}
73655+
73656+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73657+{
73658+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73659+ return -EFAULT;
73660+
73661+ return 0;
73662+}
73663+
73664+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73665+{
73666+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73667+ return -EFAULT;
73668+
73669+ if ((uwrap->version != GRSECURITY_VERSION) ||
73670+ (uwrap->size != sizeof(struct gr_arg)))
73671+ return -EINVAL;
73672+
73673+ return 0;
73674+}
73675+
73676+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73677+{
73678+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73679+ return -EFAULT;
73680+
73681+ return 0;
73682+}
73683+
73684+static size_t get_gr_arg_wrapper_size_normal(void)
73685+{
73686+ return sizeof(struct gr_arg_wrapper);
73687+}
73688+
73689+#ifdef CONFIG_COMPAT
73690+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
73691+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
73692+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
73693+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
73694+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
73695+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
73696+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
73697+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
73698+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
73699+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
73700+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
73701+extern size_t get_gr_arg_wrapper_size_compat(void);
73702+
73703+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
73704+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
73705+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
73706+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
73707+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
73708+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
73709+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
73710+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
73711+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
73712+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
73713+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
73714+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
73715+
73716+#else
73717+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
73718+#define copy_gr_arg copy_gr_arg_normal
73719+#define copy_gr_hash_struct copy_gr_hash_struct_normal
73720+#define copy_acl_object_label copy_acl_object_label_normal
73721+#define copy_acl_subject_label copy_acl_subject_label_normal
73722+#define copy_acl_role_label copy_acl_role_label_normal
73723+#define copy_acl_ip_label copy_acl_ip_label_normal
73724+#define copy_pointer_from_array copy_pointer_from_array_normal
73725+#define copy_sprole_pw copy_sprole_pw_normal
73726+#define copy_role_transition copy_role_transition_normal
73727+#define copy_role_allowed_ip copy_role_allowed_ip_normal
73728+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
73729+#endif
73730+
73731+static struct acl_subject_label *
73732+lookup_subject_map(const struct acl_subject_label *userp)
73733+{
73734+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
73735+ struct subject_map *match;
73736+
73737+ match = polstate->subj_map_set.s_hash[index];
73738+
73739+ while (match && match->user != userp)
73740+ match = match->next;
73741+
73742+ if (match != NULL)
73743+ return match->kernel;
73744+ else
73745+ return NULL;
73746+}
73747+
73748+static void
73749+insert_subj_map_entry(struct subject_map *subjmap)
73750+{
73751+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
73752+ struct subject_map **curr;
73753+
73754+ subjmap->prev = NULL;
73755+
73756+ curr = &polstate->subj_map_set.s_hash[index];
73757+ if (*curr != NULL)
73758+ (*curr)->prev = subjmap;
73759+
73760+ subjmap->next = *curr;
73761+ *curr = subjmap;
73762+
73763+ return;
73764+}
73765+
73766+static void
73767+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
73768+{
73769+ unsigned int index =
73770+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
73771+ struct acl_role_label **curr;
73772+ struct acl_role_label *tmp, *tmp2;
73773+
73774+ curr = &polstate->acl_role_set.r_hash[index];
73775+
73776+ /* simple case, slot is empty, just set it to our role */
73777+ if (*curr == NULL) {
73778+ *curr = role;
73779+ } else {
73780+ /* example:
73781+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
73782+ 2 -> 3
73783+ */
73784+ /* first check to see if we can already be reached via this slot */
73785+ tmp = *curr;
73786+ while (tmp && tmp != role)
73787+ tmp = tmp->next;
73788+ if (tmp == role) {
73789+ /* we don't need to add ourselves to this slot's chain */
73790+ return;
73791+ }
73792+ /* we need to add ourselves to this chain, two cases */
73793+ if (role->next == NULL) {
73794+ /* simple case, append the current chain to our role */
73795+ role->next = *curr;
73796+ *curr = role;
73797+ } else {
73798+ /* 1 -> 2 -> 3 -> 4
73799+ 2 -> 3 -> 4
73800+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
73801+ */
73802+ /* trickier case: walk our role's chain until we find
73803+ the role for the start of the current slot's chain */
73804+ tmp = role;
73805+ tmp2 = *curr;
73806+ while (tmp->next && tmp->next != tmp2)
73807+ tmp = tmp->next;
73808+ if (tmp->next == tmp2) {
73809+ /* from example above, we found 3, so just
73810+ replace this slot's chain with ours */
73811+ *curr = role;
73812+ } else {
73813+ /* we didn't find a subset of our role's chain
73814+ in the current slot's chain, so append their
73815+ chain to ours, and set us as the first role in
73816+ the slot's chain
73817+
73818+ we could fold this case with the case above,
73819+ but making it explicit for clarity
73820+ */
73821+ tmp->next = tmp2;
73822+ *curr = role;
73823+ }
73824+ }
73825+ }
73826+
73827+ return;
73828+}
73829+
73830+static void
73831+insert_acl_role_label(struct acl_role_label *role)
73832+{
73833+ int i;
73834+
73835+ if (polstate->role_list == NULL) {
73836+ polstate->role_list = role;
73837+ role->prev = NULL;
73838+ } else {
73839+ role->prev = polstate->role_list;
73840+ polstate->role_list = role;
73841+ }
73842+
73843+ /* used for hash chains */
73844+ role->next = NULL;
73845+
73846+ if (role->roletype & GR_ROLE_DOMAIN) {
73847+ for (i = 0; i < role->domain_child_num; i++)
73848+ __insert_acl_role_label(role, role->domain_children[i]);
73849+ } else
73850+ __insert_acl_role_label(role, role->uidgid);
73851+}
73852+
73853+static int
73854+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
73855+{
73856+ struct name_entry **curr, *nentry;
73857+ struct inodev_entry *ientry;
73858+ unsigned int len = strlen(name);
73859+ unsigned int key = full_name_hash(name, len);
73860+ unsigned int index = key % polstate->name_set.n_size;
73861+
73862+ curr = &polstate->name_set.n_hash[index];
73863+
73864+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
73865+ curr = &((*curr)->next);
73866+
73867+ if (*curr != NULL)
73868+ return 1;
73869+
73870+ nentry = acl_alloc(sizeof (struct name_entry));
73871+ if (nentry == NULL)
73872+ return 0;
73873+ ientry = acl_alloc(sizeof (struct inodev_entry));
73874+ if (ientry == NULL)
73875+ return 0;
73876+ ientry->nentry = nentry;
73877+
73878+ nentry->key = key;
73879+ nentry->name = name;
73880+ nentry->inode = inode;
73881+ nentry->device = device;
73882+ nentry->len = len;
73883+ nentry->deleted = deleted;
73884+
73885+ nentry->prev = NULL;
73886+ curr = &polstate->name_set.n_hash[index];
73887+ if (*curr != NULL)
73888+ (*curr)->prev = nentry;
73889+ nentry->next = *curr;
73890+ *curr = nentry;
73891+
73892+ /* insert us into the table searchable by inode/dev */
73893+ __insert_inodev_entry(polstate, ientry);
73894+
73895+ return 1;
73896+}
73897+
73898+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
73899+
73900+static void *
73901+create_table(__u32 * len, int elementsize)
73902+{
73903+ unsigned int table_sizes[] = {
73904+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
73905+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
73906+ 4194301, 8388593, 16777213, 33554393, 67108859
73907+ };
73908+ void *newtable = NULL;
73909+ unsigned int pwr = 0;
73910+
73911+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
73912+ table_sizes[pwr] <= *len)
73913+ pwr++;
73914+
73915+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
73916+ return newtable;
73917+
73918+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
73919+ newtable =
73920+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
73921+ else
73922+ newtable = vmalloc(table_sizes[pwr] * elementsize);
73923+
73924+ *len = table_sizes[pwr];
73925+
73926+ return newtable;
73927+}
73928+
73929+static int
73930+init_variables(const struct gr_arg *arg, bool reload)
73931+{
73932+ struct task_struct *reaper = init_pid_ns.child_reaper;
73933+ unsigned int stacksize;
73934+
73935+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
73936+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
73937+ polstate->name_set.n_size = arg->role_db.num_objects;
73938+ polstate->inodev_set.i_size = arg->role_db.num_objects;
73939+
73940+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
73941+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
73942+ return 1;
73943+
73944+ if (!reload) {
73945+ if (!gr_init_uidset())
73946+ return 1;
73947+ }
73948+
73949+ /* set up the stack that holds allocation info */
73950+
73951+ stacksize = arg->role_db.num_pointers + 5;
73952+
73953+ if (!acl_alloc_stack_init(stacksize))
73954+ return 1;
73955+
73956+ if (!reload) {
73957+ /* grab reference for the real root dentry and vfsmount */
73958+ get_fs_root(reaper->fs, &gr_real_root);
73959+
73960+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
73961+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
73962+#endif
73963+
73964+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
73965+ if (fakefs_obj_rw == NULL)
73966+ return 1;
73967+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
73968+
73969+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
73970+ if (fakefs_obj_rwx == NULL)
73971+ return 1;
73972+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
73973+ }
73974+
73975+ polstate->subj_map_set.s_hash =
73976+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
73977+ polstate->acl_role_set.r_hash =
73978+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
73979+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
73980+ polstate->inodev_set.i_hash =
73981+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
73982+
73983+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
73984+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
73985+ return 1;
73986+
73987+ memset(polstate->subj_map_set.s_hash, 0,
73988+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
73989+ memset(polstate->acl_role_set.r_hash, 0,
73990+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
73991+ memset(polstate->name_set.n_hash, 0,
73992+ sizeof (struct name_entry *) * polstate->name_set.n_size);
73993+ memset(polstate->inodev_set.i_hash, 0,
73994+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
73995+
73996+ return 0;
73997+}
73998+
73999+/* free information not needed after startup
74000+ currently contains user->kernel pointer mappings for subjects
74001+*/
74002+
74003+static void
74004+free_init_variables(void)
74005+{
74006+ __u32 i;
74007+
74008+ if (polstate->subj_map_set.s_hash) {
74009+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74010+ if (polstate->subj_map_set.s_hash[i]) {
74011+ kfree(polstate->subj_map_set.s_hash[i]);
74012+ polstate->subj_map_set.s_hash[i] = NULL;
74013+ }
74014+ }
74015+
74016+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74017+ PAGE_SIZE)
74018+ kfree(polstate->subj_map_set.s_hash);
74019+ else
74020+ vfree(polstate->subj_map_set.s_hash);
74021+ }
74022+
74023+ return;
74024+}
74025+
74026+static void
74027+free_variables(bool reload)
74028+{
74029+ struct acl_subject_label *s;
74030+ struct acl_role_label *r;
74031+ struct task_struct *task, *task2;
74032+ unsigned int x;
74033+
74034+ if (!reload) {
74035+ gr_clear_learn_entries();
74036+
74037+ read_lock(&tasklist_lock);
74038+ do_each_thread(task2, task) {
74039+ task->acl_sp_role = 0;
74040+ task->acl_role_id = 0;
74041+ task->inherited = 0;
74042+ task->acl = NULL;
74043+ task->role = NULL;
74044+ } while_each_thread(task2, task);
74045+ read_unlock(&tasklist_lock);
74046+
74047+ kfree(fakefs_obj_rw);
74048+ fakefs_obj_rw = NULL;
74049+ kfree(fakefs_obj_rwx);
74050+ fakefs_obj_rwx = NULL;
74051+
74052+ /* release the reference to the real root dentry and vfsmount */
74053+ path_put(&gr_real_root);
74054+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74055+ }
74056+
74057+ /* free all object hash tables */
74058+
74059+ FOR_EACH_ROLE_START(r)
74060+ if (r->subj_hash == NULL)
74061+ goto next_role;
74062+ FOR_EACH_SUBJECT_START(r, s, x)
74063+ if (s->obj_hash == NULL)
74064+ break;
74065+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74066+ kfree(s->obj_hash);
74067+ else
74068+ vfree(s->obj_hash);
74069+ FOR_EACH_SUBJECT_END(s, x)
74070+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74071+ if (s->obj_hash == NULL)
74072+ break;
74073+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74074+ kfree(s->obj_hash);
74075+ else
74076+ vfree(s->obj_hash);
74077+ FOR_EACH_NESTED_SUBJECT_END(s)
74078+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74079+ kfree(r->subj_hash);
74080+ else
74081+ vfree(r->subj_hash);
74082+ r->subj_hash = NULL;
74083+next_role:
74084+ FOR_EACH_ROLE_END(r)
74085+
74086+ acl_free_all();
74087+
74088+ if (polstate->acl_role_set.r_hash) {
74089+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74090+ PAGE_SIZE)
74091+ kfree(polstate->acl_role_set.r_hash);
74092+ else
74093+ vfree(polstate->acl_role_set.r_hash);
74094+ }
74095+ if (polstate->name_set.n_hash) {
74096+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74097+ PAGE_SIZE)
74098+ kfree(polstate->name_set.n_hash);
74099+ else
74100+ vfree(polstate->name_set.n_hash);
74101+ }
74102+
74103+ if (polstate->inodev_set.i_hash) {
74104+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74105+ PAGE_SIZE)
74106+ kfree(polstate->inodev_set.i_hash);
74107+ else
74108+ vfree(polstate->inodev_set.i_hash);
74109+ }
74110+
74111+ if (!reload)
74112+ gr_free_uidset();
74113+
74114+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74115+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74116+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74117+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74118+
74119+ polstate->default_role = NULL;
74120+ polstate->kernel_role = NULL;
74121+ polstate->role_list = NULL;
74122+
74123+ return;
74124+}
74125+
74126+static struct acl_subject_label *
74127+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74128+
74129+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74130+{
74131+ unsigned int len = strnlen_user(*name, maxlen);
74132+ char *tmp;
74133+
74134+ if (!len || len >= maxlen)
74135+ return -EINVAL;
74136+
74137+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74138+ return -ENOMEM;
74139+
74140+ if (copy_from_user(tmp, *name, len))
74141+ return -EFAULT;
74142+
74143+ tmp[len-1] = '\0';
74144+ *name = tmp;
74145+
74146+ return 0;
74147+}
74148+
74149+static int
74150+copy_user_glob(struct acl_object_label *obj)
74151+{
74152+ struct acl_object_label *g_tmp, **guser;
74153+ int error;
74154+
74155+ if (obj->globbed == NULL)
74156+ return 0;
74157+
74158+ guser = &obj->globbed;
74159+ while (*guser) {
74160+ g_tmp = (struct acl_object_label *)
74161+ acl_alloc(sizeof (struct acl_object_label));
74162+ if (g_tmp == NULL)
74163+ return -ENOMEM;
74164+
74165+ if (copy_acl_object_label(g_tmp, *guser))
74166+ return -EFAULT;
74167+
74168+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74169+ if (error)
74170+ return error;
74171+
74172+ *guser = g_tmp;
74173+ guser = &(g_tmp->next);
74174+ }
74175+
74176+ return 0;
74177+}
74178+
74179+static int
74180+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74181+ struct acl_role_label *role)
74182+{
74183+ struct acl_object_label *o_tmp;
74184+ int ret;
74185+
74186+ while (userp) {
74187+ if ((o_tmp = (struct acl_object_label *)
74188+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74189+ return -ENOMEM;
74190+
74191+ if (copy_acl_object_label(o_tmp, userp))
74192+ return -EFAULT;
74193+
74194+ userp = o_tmp->prev;
74195+
74196+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74197+ if (ret)
74198+ return ret;
74199+
74200+ insert_acl_obj_label(o_tmp, subj);
74201+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74202+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74203+ return -ENOMEM;
74204+
74205+ ret = copy_user_glob(o_tmp);
74206+ if (ret)
74207+ return ret;
74208+
74209+ if (o_tmp->nested) {
74210+ int already_copied;
74211+
74212+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74213+ if (IS_ERR(o_tmp->nested))
74214+ return PTR_ERR(o_tmp->nested);
74215+
74216+ /* insert into nested subject list if we haven't copied this one yet
74217+ to prevent duplicate entries */
74218+ if (!already_copied) {
74219+ o_tmp->nested->next = role->hash->first;
74220+ role->hash->first = o_tmp->nested;
74221+ }
74222+ }
74223+ }
74224+
74225+ return 0;
74226+}
74227+
74228+static __u32
74229+count_user_subjs(struct acl_subject_label *userp)
74230+{
74231+ struct acl_subject_label s_tmp;
74232+ __u32 num = 0;
74233+
74234+ while (userp) {
74235+ if (copy_acl_subject_label(&s_tmp, userp))
74236+ break;
74237+
74238+ userp = s_tmp.prev;
74239+ }
74240+
74241+ return num;
74242+}
74243+
74244+static int
74245+copy_user_allowedips(struct acl_role_label *rolep)
74246+{
74247+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74248+
74249+ ruserip = rolep->allowed_ips;
74250+
74251+ while (ruserip) {
74252+ rlast = rtmp;
74253+
74254+ if ((rtmp = (struct role_allowed_ip *)
74255+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74256+ return -ENOMEM;
74257+
74258+ if (copy_role_allowed_ip(rtmp, ruserip))
74259+ return -EFAULT;
74260+
74261+ ruserip = rtmp->prev;
74262+
74263+ if (!rlast) {
74264+ rtmp->prev = NULL;
74265+ rolep->allowed_ips = rtmp;
74266+ } else {
74267+ rlast->next = rtmp;
74268+ rtmp->prev = rlast;
74269+ }
74270+
74271+ if (!ruserip)
74272+ rtmp->next = NULL;
74273+ }
74274+
74275+ return 0;
74276+}
74277+
74278+static int
74279+copy_user_transitions(struct acl_role_label *rolep)
74280+{
74281+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74282+ int error;
74283+
74284+ rusertp = rolep->transitions;
74285+
74286+ while (rusertp) {
74287+ rlast = rtmp;
74288+
74289+ if ((rtmp = (struct role_transition *)
74290+ acl_alloc(sizeof (struct role_transition))) == NULL)
74291+ return -ENOMEM;
74292+
74293+ if (copy_role_transition(rtmp, rusertp))
74294+ return -EFAULT;
74295+
74296+ rusertp = rtmp->prev;
74297+
74298+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74299+ if (error)
74300+ return error;
74301+
74302+ if (!rlast) {
74303+ rtmp->prev = NULL;
74304+ rolep->transitions = rtmp;
74305+ } else {
74306+ rlast->next = rtmp;
74307+ rtmp->prev = rlast;
74308+ }
74309+
74310+ if (!rusertp)
74311+ rtmp->next = NULL;
74312+ }
74313+
74314+ return 0;
74315+}
74316+
74317+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74318+{
74319+ struct acl_object_label o_tmp;
74320+ __u32 num = 0;
74321+
74322+ while (userp) {
74323+ if (copy_acl_object_label(&o_tmp, userp))
74324+ break;
74325+
74326+ userp = o_tmp.prev;
74327+ num++;
74328+ }
74329+
74330+ return num;
74331+}
74332+
74333+static struct acl_subject_label *
74334+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74335+{
74336+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74337+ __u32 num_objs;
74338+ struct acl_ip_label **i_tmp, *i_utmp2;
74339+ struct gr_hash_struct ghash;
74340+ struct subject_map *subjmap;
74341+ unsigned int i_num;
74342+ int err;
74343+
74344+ if (already_copied != NULL)
74345+ *already_copied = 0;
74346+
74347+ s_tmp = lookup_subject_map(userp);
74348+
74349+ /* we've already copied this subject into the kernel, just return
74350+ the reference to it, and don't copy it over again
74351+ */
74352+ if (s_tmp) {
74353+ if (already_copied != NULL)
74354+ *already_copied = 1;
74355+ return(s_tmp);
74356+ }
74357+
74358+ if ((s_tmp = (struct acl_subject_label *)
74359+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74360+ return ERR_PTR(-ENOMEM);
74361+
74362+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74363+ if (subjmap == NULL)
74364+ return ERR_PTR(-ENOMEM);
74365+
74366+ subjmap->user = userp;
74367+ subjmap->kernel = s_tmp;
74368+ insert_subj_map_entry(subjmap);
74369+
74370+ if (copy_acl_subject_label(s_tmp, userp))
74371+ return ERR_PTR(-EFAULT);
74372+
74373+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74374+ if (err)
74375+ return ERR_PTR(err);
74376+
74377+ if (!strcmp(s_tmp->filename, "/"))
74378+ role->root_label = s_tmp;
74379+
74380+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74381+ return ERR_PTR(-EFAULT);
74382+
74383+ /* copy user and group transition tables */
74384+
74385+ if (s_tmp->user_trans_num) {
74386+ uid_t *uidlist;
74387+
74388+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74389+ if (uidlist == NULL)
74390+ return ERR_PTR(-ENOMEM);
74391+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74392+ return ERR_PTR(-EFAULT);
74393+
74394+ s_tmp->user_transitions = uidlist;
74395+ }
74396+
74397+ if (s_tmp->group_trans_num) {
74398+ gid_t *gidlist;
74399+
74400+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74401+ if (gidlist == NULL)
74402+ return ERR_PTR(-ENOMEM);
74403+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74404+ return ERR_PTR(-EFAULT);
74405+
74406+ s_tmp->group_transitions = gidlist;
74407+ }
74408+
74409+ /* set up object hash table */
74410+ num_objs = count_user_objs(ghash.first);
74411+
74412+ s_tmp->obj_hash_size = num_objs;
74413+ s_tmp->obj_hash =
74414+ (struct acl_object_label **)
74415+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74416+
74417+ if (!s_tmp->obj_hash)
74418+ return ERR_PTR(-ENOMEM);
74419+
74420+ memset(s_tmp->obj_hash, 0,
74421+ s_tmp->obj_hash_size *
74422+ sizeof (struct acl_object_label *));
74423+
74424+ /* add in objects */
74425+ err = copy_user_objs(ghash.first, s_tmp, role);
74426+
74427+ if (err)
74428+ return ERR_PTR(err);
74429+
74430+ /* set pointer for parent subject */
74431+ if (s_tmp->parent_subject) {
74432+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74433+
74434+ if (IS_ERR(s_tmp2))
74435+ return s_tmp2;
74436+
74437+ s_tmp->parent_subject = s_tmp2;
74438+ }
74439+
74440+ /* add in ip acls */
74441+
74442+ if (!s_tmp->ip_num) {
74443+ s_tmp->ips = NULL;
74444+ goto insert;
74445+ }
74446+
74447+ i_tmp =
74448+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74449+ sizeof (struct acl_ip_label *));
74450+
74451+ if (!i_tmp)
74452+ return ERR_PTR(-ENOMEM);
74453+
74454+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74455+ *(i_tmp + i_num) =
74456+ (struct acl_ip_label *)
74457+ acl_alloc(sizeof (struct acl_ip_label));
74458+ if (!*(i_tmp + i_num))
74459+ return ERR_PTR(-ENOMEM);
74460+
74461+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74462+ return ERR_PTR(-EFAULT);
74463+
74464+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74465+ return ERR_PTR(-EFAULT);
74466+
74467+ if ((*(i_tmp + i_num))->iface == NULL)
74468+ continue;
74469+
74470+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74471+ if (err)
74472+ return ERR_PTR(err);
74473+ }
74474+
74475+ s_tmp->ips = i_tmp;
74476+
74477+insert:
74478+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74479+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74480+ return ERR_PTR(-ENOMEM);
74481+
74482+ return s_tmp;
74483+}
74484+
74485+static int
74486+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74487+{
74488+ struct acl_subject_label s_pre;
74489+ struct acl_subject_label * ret;
74490+ int err;
74491+
74492+ while (userp) {
74493+ if (copy_acl_subject_label(&s_pre, userp))
74494+ return -EFAULT;
74495+
74496+ ret = do_copy_user_subj(userp, role, NULL);
74497+
74498+ err = PTR_ERR(ret);
74499+ if (IS_ERR(ret))
74500+ return err;
74501+
74502+ insert_acl_subj_label(ret, role);
74503+
74504+ userp = s_pre.prev;
74505+ }
74506+
74507+ return 0;
74508+}
74509+
74510+static int
74511+copy_user_acl(struct gr_arg *arg)
74512+{
74513+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74514+ struct acl_subject_label *subj_list;
74515+ struct sprole_pw *sptmp;
74516+ struct gr_hash_struct *ghash;
74517+ uid_t *domainlist;
74518+ unsigned int r_num;
74519+ int err = 0;
74520+ __u16 i;
74521+ __u32 num_subjs;
74522+
74523+ /* we need a default and kernel role */
74524+ if (arg->role_db.num_roles < 2)
74525+ return -EINVAL;
74526+
74527+ /* copy special role authentication info from userspace */
74528+
74529+ polstate->num_sprole_pws = arg->num_sprole_pws;
74530+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74531+
74532+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74533+ return -ENOMEM;
74534+
74535+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74536+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74537+ if (!sptmp)
74538+ return -ENOMEM;
74539+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74540+ return -EFAULT;
74541+
74542+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74543+ if (err)
74544+ return err;
74545+
74546+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74547+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74548+#endif
74549+
74550+ polstate->acl_special_roles[i] = sptmp;
74551+ }
74552+
74553+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74554+
74555+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74556+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74557+
74558+ if (!r_tmp)
74559+ return -ENOMEM;
74560+
74561+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74562+ return -EFAULT;
74563+
74564+ if (copy_acl_role_label(r_tmp, r_utmp2))
74565+ return -EFAULT;
74566+
74567+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74568+ if (err)
74569+ return err;
74570+
74571+ if (!strcmp(r_tmp->rolename, "default")
74572+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74573+ polstate->default_role = r_tmp;
74574+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74575+ polstate->kernel_role = r_tmp;
74576+ }
74577+
74578+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74579+ return -ENOMEM;
74580+
74581+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74582+ return -EFAULT;
74583+
74584+ r_tmp->hash = ghash;
74585+
74586+ num_subjs = count_user_subjs(r_tmp->hash->first);
74587+
74588+ r_tmp->subj_hash_size = num_subjs;
74589+ r_tmp->subj_hash =
74590+ (struct acl_subject_label **)
74591+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74592+
74593+ if (!r_tmp->subj_hash)
74594+ return -ENOMEM;
74595+
74596+ err = copy_user_allowedips(r_tmp);
74597+ if (err)
74598+ return err;
74599+
74600+ /* copy domain info */
74601+ if (r_tmp->domain_children != NULL) {
74602+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74603+ if (domainlist == NULL)
74604+ return -ENOMEM;
74605+
74606+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74607+ return -EFAULT;
74608+
74609+ r_tmp->domain_children = domainlist;
74610+ }
74611+
74612+ err = copy_user_transitions(r_tmp);
74613+ if (err)
74614+ return err;
74615+
74616+ memset(r_tmp->subj_hash, 0,
74617+ r_tmp->subj_hash_size *
74618+ sizeof (struct acl_subject_label *));
74619+
74620+ /* acquire the list of subjects, then NULL out
74621+ the list prior to parsing the subjects for this role,
74622+ as during this parsing the list is replaced with a list
74623+ of *nested* subjects for the role
74624+ */
74625+ subj_list = r_tmp->hash->first;
74626+
74627+ /* set nested subject list to null */
74628+ r_tmp->hash->first = NULL;
74629+
74630+ err = copy_user_subjs(subj_list, r_tmp);
74631+
74632+ if (err)
74633+ return err;
74634+
74635+ insert_acl_role_label(r_tmp);
74636+ }
74637+
74638+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74639+ return -EINVAL;
74640+
74641+ return err;
74642+}
74643+
74644+static int gracl_reload_apply_policies(void *reload)
74645+{
74646+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74647+ struct task_struct *task, *task2;
74648+ struct acl_role_label *role, *rtmp;
74649+ struct acl_subject_label *subj;
74650+ const struct cred *cred;
74651+ int role_applied;
74652+ int ret = 0;
74653+
74654+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74655+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74656+
74657+ /* first make sure we'll be able to apply the new policy cleanly */
74658+ do_each_thread(task2, task) {
74659+ if (task->exec_file == NULL)
74660+ continue;
74661+ role_applied = 0;
74662+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74663+ /* preserve special roles */
74664+ FOR_EACH_ROLE_START(role)
74665+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74666+ rtmp = task->role;
74667+ task->role = role;
74668+ role_applied = 1;
74669+ break;
74670+ }
74671+ FOR_EACH_ROLE_END(role)
74672+ }
74673+ if (!role_applied) {
74674+ cred = __task_cred(task);
74675+ rtmp = task->role;
74676+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74677+ }
74678+ /* this handles non-nested inherited subjects, nested subjects will still
74679+ be dropped currently */
74680+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74681+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
74682+ /* change the role back so that we've made no modifications to the policy */
74683+ task->role = rtmp;
74684+
74685+ if (subj == NULL || task->tmpacl == NULL) {
74686+ ret = -EINVAL;
74687+ goto out;
74688+ }
74689+ } while_each_thread(task2, task);
74690+
74691+ /* now actually apply the policy */
74692+
74693+ do_each_thread(task2, task) {
74694+ if (task->exec_file) {
74695+ role_applied = 0;
74696+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74697+ /* preserve special roles */
74698+ FOR_EACH_ROLE_START(role)
74699+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74700+ task->role = role;
74701+ role_applied = 1;
74702+ break;
74703+ }
74704+ FOR_EACH_ROLE_END(role)
74705+ }
74706+ if (!role_applied) {
74707+ cred = __task_cred(task);
74708+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74709+ }
74710+ /* this handles non-nested inherited subjects, nested subjects will still
74711+ be dropped currently */
74712+ if (!reload_state->oldmode && task->inherited)
74713+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74714+ else {
74715+ /* looked up and tagged to the task previously */
74716+ subj = task->tmpacl;
74717+ }
74718+ /* subj will be non-null */
74719+ __gr_apply_subject_to_task(polstate, task, subj);
74720+ if (reload_state->oldmode) {
74721+ task->acl_role_id = 0;
74722+ task->acl_sp_role = 0;
74723+ task->inherited = 0;
74724+ }
74725+ } else {
74726+ // it's a kernel process
74727+ task->role = polstate->kernel_role;
74728+ task->acl = polstate->kernel_role->root_label;
74729+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
74730+ task->acl->mode &= ~GR_PROCFIND;
74731+#endif
74732+ }
74733+ } while_each_thread(task2, task);
74734+
74735+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
74736+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
74737+
74738+out:
74739+
74740+ return ret;
74741+}
74742+
74743+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
74744+{
74745+ struct gr_reload_state new_reload_state = { };
74746+ int err;
74747+
74748+ new_reload_state.oldpolicy_ptr = polstate;
74749+ new_reload_state.oldalloc_ptr = current_alloc_state;
74750+ new_reload_state.oldmode = oldmode;
74751+
74752+ current_alloc_state = &new_reload_state.newalloc;
74753+ polstate = &new_reload_state.newpolicy;
74754+
74755+ /* everything relevant is now saved off, copy in the new policy */
74756+ if (init_variables(args, true)) {
74757+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74758+ err = -ENOMEM;
74759+ goto error;
74760+ }
74761+
74762+ err = copy_user_acl(args);
74763+ free_init_variables();
74764+ if (err)
74765+ goto error;
74766+ /* the new policy is copied in, with the old policy available via saved_state
74767+ first go through applying roles, making sure to preserve special roles
74768+ then apply new subjects, making sure to preserve inherited and nested subjects,
74769+ though currently only inherited subjects will be preserved
74770+ */
74771+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
74772+ if (err)
74773+ goto error;
74774+
74775+ /* we've now applied the new policy, so restore the old policy state to free it */
74776+ polstate = &new_reload_state.oldpolicy;
74777+ current_alloc_state = &new_reload_state.oldalloc;
74778+ free_variables(true);
74779+
74780+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
74781+ to running_polstate/current_alloc_state inside stop_machine
74782+ */
74783+ err = 0;
74784+ goto out;
74785+error:
74786+ /* on error of loading the new policy, we'll just keep the previous
74787+ policy set around
74788+ */
74789+ free_variables(true);
74790+
74791+ /* doesn't affect runtime, but maintains consistent state */
74792+out:
74793+ polstate = new_reload_state.oldpolicy_ptr;
74794+ current_alloc_state = new_reload_state.oldalloc_ptr;
74795+
74796+ return err;
74797+}
74798+
74799+static int
74800+gracl_init(struct gr_arg *args)
74801+{
74802+ int error = 0;
74803+
74804+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
74805+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
74806+
74807+ if (init_variables(args, false)) {
74808+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74809+ error = -ENOMEM;
74810+ goto out;
74811+ }
74812+
74813+ error = copy_user_acl(args);
74814+ free_init_variables();
74815+ if (error)
74816+ goto out;
74817+
74818+ error = gr_set_acls(0);
74819+ if (error)
74820+ goto out;
74821+
74822+ gr_enable_rbac_system();
74823+
74824+ return 0;
74825+
74826+out:
74827+ free_variables(false);
74828+ return error;
74829+}
74830+
74831+static int
74832+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
74833+ unsigned char **sum)
74834+{
74835+ struct acl_role_label *r;
74836+ struct role_allowed_ip *ipp;
74837+ struct role_transition *trans;
74838+ unsigned int i;
74839+ int found = 0;
74840+ u32 curr_ip = current->signal->curr_ip;
74841+
74842+ current->signal->saved_ip = curr_ip;
74843+
74844+ /* check transition table */
74845+
74846+ for (trans = current->role->transitions; trans; trans = trans->next) {
74847+ if (!strcmp(rolename, trans->rolename)) {
74848+ found = 1;
74849+ break;
74850+ }
74851+ }
74852+
74853+ if (!found)
74854+ return 0;
74855+
74856+ /* handle special roles that do not require authentication
74857+ and check ip */
74858+
74859+ FOR_EACH_ROLE_START(r)
74860+ if (!strcmp(rolename, r->rolename) &&
74861+ (r->roletype & GR_ROLE_SPECIAL)) {
74862+ found = 0;
74863+ if (r->allowed_ips != NULL) {
74864+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
74865+ if ((ntohl(curr_ip) & ipp->netmask) ==
74866+ (ntohl(ipp->addr) & ipp->netmask))
74867+ found = 1;
74868+ }
74869+ } else
74870+ found = 2;
74871+ if (!found)
74872+ return 0;
74873+
74874+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
74875+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
74876+ *salt = NULL;
74877+ *sum = NULL;
74878+ return 1;
74879+ }
74880+ }
74881+ FOR_EACH_ROLE_END(r)
74882+
74883+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74884+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
74885+ *salt = polstate->acl_special_roles[i]->salt;
74886+ *sum = polstate->acl_special_roles[i]->sum;
74887+ return 1;
74888+ }
74889+ }
74890+
74891+ return 0;
74892+}
74893+
74894+int gr_check_secure_terminal(struct task_struct *task)
74895+{
74896+ struct task_struct *p, *p2, *p3;
74897+ struct files_struct *files;
74898+ struct fdtable *fdt;
74899+ struct file *our_file = NULL, *file;
74900+ int i;
74901+
74902+ if (task->signal->tty == NULL)
74903+ return 1;
74904+
74905+ files = get_files_struct(task);
74906+ if (files != NULL) {
74907+ rcu_read_lock();
74908+ fdt = files_fdtable(files);
74909+ for (i=0; i < fdt->max_fds; i++) {
74910+ file = fcheck_files(files, i);
74911+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
74912+ get_file(file);
74913+ our_file = file;
74914+ }
74915+ }
74916+ rcu_read_unlock();
74917+ put_files_struct(files);
74918+ }
74919+
74920+ if (our_file == NULL)
74921+ return 1;
74922+
74923+ read_lock(&tasklist_lock);
74924+ do_each_thread(p2, p) {
74925+ files = get_files_struct(p);
74926+ if (files == NULL ||
74927+ (p->signal && p->signal->tty == task->signal->tty)) {
74928+ if (files != NULL)
74929+ put_files_struct(files);
74930+ continue;
74931+ }
74932+ rcu_read_lock();
74933+ fdt = files_fdtable(files);
74934+ for (i=0; i < fdt->max_fds; i++) {
74935+ file = fcheck_files(files, i);
74936+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
74937+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
74938+ p3 = task;
74939+ while (task_pid_nr(p3) > 0) {
74940+ if (p3 == p)
74941+ break;
74942+ p3 = p3->real_parent;
74943+ }
74944+ if (p3 == p)
74945+ break;
74946+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
74947+ gr_handle_alertkill(p);
74948+ rcu_read_unlock();
74949+ put_files_struct(files);
74950+ read_unlock(&tasklist_lock);
74951+ fput(our_file);
74952+ return 0;
74953+ }
74954+ }
74955+ rcu_read_unlock();
74956+ put_files_struct(files);
74957+ } while_each_thread(p2, p);
74958+ read_unlock(&tasklist_lock);
74959+
74960+ fput(our_file);
74961+ return 1;
74962+}
74963+
74964+ssize_t
74965+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
74966+{
74967+ struct gr_arg_wrapper uwrap;
74968+ unsigned char *sprole_salt = NULL;
74969+ unsigned char *sprole_sum = NULL;
74970+ int error = 0;
74971+ int error2 = 0;
74972+ size_t req_count = 0;
74973+ unsigned char oldmode = 0;
74974+
74975+ mutex_lock(&gr_dev_mutex);
74976+
74977+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
74978+ error = -EPERM;
74979+ goto out;
74980+ }
74981+
74982+#ifdef CONFIG_COMPAT
74983+ pax_open_kernel();
74984+ if (is_compat_task()) {
74985+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
74986+ copy_gr_arg = &copy_gr_arg_compat;
74987+ copy_acl_object_label = &copy_acl_object_label_compat;
74988+ copy_acl_subject_label = &copy_acl_subject_label_compat;
74989+ copy_acl_role_label = &copy_acl_role_label_compat;
74990+ copy_acl_ip_label = &copy_acl_ip_label_compat;
74991+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
74992+ copy_role_transition = &copy_role_transition_compat;
74993+ copy_sprole_pw = &copy_sprole_pw_compat;
74994+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
74995+ copy_pointer_from_array = &copy_pointer_from_array_compat;
74996+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
74997+ } else {
74998+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
74999+ copy_gr_arg = &copy_gr_arg_normal;
75000+ copy_acl_object_label = &copy_acl_object_label_normal;
75001+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75002+ copy_acl_role_label = &copy_acl_role_label_normal;
75003+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75004+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75005+ copy_role_transition = &copy_role_transition_normal;
75006+ copy_sprole_pw = &copy_sprole_pw_normal;
75007+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75008+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75009+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75010+ }
75011+ pax_close_kernel();
75012+#endif
75013+
75014+ req_count = get_gr_arg_wrapper_size();
75015+
75016+ if (count != req_count) {
75017+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75018+ error = -EINVAL;
75019+ goto out;
75020+ }
75021+
75022+
75023+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75024+ gr_auth_expires = 0;
75025+ gr_auth_attempts = 0;
75026+ }
75027+
75028+ error = copy_gr_arg_wrapper(buf, &uwrap);
75029+ if (error)
75030+ goto out;
75031+
75032+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75033+ if (error)
75034+ goto out;
75035+
75036+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75037+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75038+ time_after(gr_auth_expires, get_seconds())) {
75039+ error = -EBUSY;
75040+ goto out;
75041+ }
75042+
75043+ /* if non-root trying to do anything other than use a special role,
75044+ do not attempt authentication, do not count towards authentication
75045+ locking
75046+ */
75047+
75048+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75049+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75050+ gr_is_global_nonroot(current_uid())) {
75051+ error = -EPERM;
75052+ goto out;
75053+ }
75054+
75055+ /* ensure pw and special role name are null terminated */
75056+
75057+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75058+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75059+
75060+ /* Okay.
75061+ * We have our enough of the argument structure..(we have yet
75062+ * to copy_from_user the tables themselves) . Copy the tables
75063+ * only if we need them, i.e. for loading operations. */
75064+
75065+ switch (gr_usermode->mode) {
75066+ case GR_STATUS:
75067+ if (gr_acl_is_enabled()) {
75068+ error = 1;
75069+ if (!gr_check_secure_terminal(current))
75070+ error = 3;
75071+ } else
75072+ error = 2;
75073+ goto out;
75074+ case GR_SHUTDOWN:
75075+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75076+ stop_machine(gr_rbac_disable, NULL, NULL);
75077+ free_variables(false);
75078+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75079+ memset(gr_system_salt, 0, GR_SALT_LEN);
75080+ memset(gr_system_sum, 0, GR_SHA_LEN);
75081+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75082+ } else if (gr_acl_is_enabled()) {
75083+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75084+ error = -EPERM;
75085+ } else {
75086+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75087+ error = -EAGAIN;
75088+ }
75089+ break;
75090+ case GR_ENABLE:
75091+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75092+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75093+ else {
75094+ if (gr_acl_is_enabled())
75095+ error = -EAGAIN;
75096+ else
75097+ error = error2;
75098+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75099+ }
75100+ break;
75101+ case GR_OLDRELOAD:
75102+ oldmode = 1;
75103+ case GR_RELOAD:
75104+ if (!gr_acl_is_enabled()) {
75105+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75106+ error = -EAGAIN;
75107+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75108+ error2 = gracl_reload(gr_usermode, oldmode);
75109+ if (!error2)
75110+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75111+ else {
75112+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75113+ error = error2;
75114+ }
75115+ } else {
75116+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75117+ error = -EPERM;
75118+ }
75119+ break;
75120+ case GR_SEGVMOD:
75121+ if (unlikely(!gr_acl_is_enabled())) {
75122+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75123+ error = -EAGAIN;
75124+ break;
75125+ }
75126+
75127+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75128+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75129+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75130+ struct acl_subject_label *segvacl;
75131+ segvacl =
75132+ lookup_acl_subj_label(gr_usermode->segv_inode,
75133+ gr_usermode->segv_device,
75134+ current->role);
75135+ if (segvacl) {
75136+ segvacl->crashes = 0;
75137+ segvacl->expires = 0;
75138+ }
75139+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75140+ gr_remove_uid(gr_usermode->segv_uid);
75141+ }
75142+ } else {
75143+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75144+ error = -EPERM;
75145+ }
75146+ break;
75147+ case GR_SPROLE:
75148+ case GR_SPROLEPAM:
75149+ if (unlikely(!gr_acl_is_enabled())) {
75150+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75151+ error = -EAGAIN;
75152+ break;
75153+ }
75154+
75155+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75156+ current->role->expires = 0;
75157+ current->role->auth_attempts = 0;
75158+ }
75159+
75160+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75161+ time_after(current->role->expires, get_seconds())) {
75162+ error = -EBUSY;
75163+ goto out;
75164+ }
75165+
75166+ if (lookup_special_role_auth
75167+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75168+ && ((!sprole_salt && !sprole_sum)
75169+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75170+ char *p = "";
75171+ assign_special_role(gr_usermode->sp_role);
75172+ read_lock(&tasklist_lock);
75173+ if (current->real_parent)
75174+ p = current->real_parent->role->rolename;
75175+ read_unlock(&tasklist_lock);
75176+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75177+ p, acl_sp_role_value);
75178+ } else {
75179+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75180+ error = -EPERM;
75181+ if(!(current->role->auth_attempts++))
75182+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75183+
75184+ goto out;
75185+ }
75186+ break;
75187+ case GR_UNSPROLE:
75188+ if (unlikely(!gr_acl_is_enabled())) {
75189+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75190+ error = -EAGAIN;
75191+ break;
75192+ }
75193+
75194+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75195+ char *p = "";
75196+ int i = 0;
75197+
75198+ read_lock(&tasklist_lock);
75199+ if (current->real_parent) {
75200+ p = current->real_parent->role->rolename;
75201+ i = current->real_parent->acl_role_id;
75202+ }
75203+ read_unlock(&tasklist_lock);
75204+
75205+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75206+ gr_set_acls(1);
75207+ } else {
75208+ error = -EPERM;
75209+ goto out;
75210+ }
75211+ break;
75212+ default:
75213+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75214+ error = -EINVAL;
75215+ break;
75216+ }
75217+
75218+ if (error != -EPERM)
75219+ goto out;
75220+
75221+ if(!(gr_auth_attempts++))
75222+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75223+
75224+ out:
75225+ mutex_unlock(&gr_dev_mutex);
75226+
75227+ if (!error)
75228+ error = req_count;
75229+
75230+ return error;
75231+}
75232+
75233+int
75234+gr_set_acls(const int type)
75235+{
75236+ struct task_struct *task, *task2;
75237+ struct acl_role_label *role = current->role;
75238+ struct acl_subject_label *subj;
75239+ __u16 acl_role_id = current->acl_role_id;
75240+ const struct cred *cred;
75241+ int ret;
75242+
75243+ rcu_read_lock();
75244+ read_lock(&tasklist_lock);
75245+ read_lock(&grsec_exec_file_lock);
75246+ do_each_thread(task2, task) {
75247+ /* check to see if we're called from the exit handler,
75248+ if so, only replace ACLs that have inherited the admin
75249+ ACL */
75250+
75251+ if (type && (task->role != role ||
75252+ task->acl_role_id != acl_role_id))
75253+ continue;
75254+
75255+ task->acl_role_id = 0;
75256+ task->acl_sp_role = 0;
75257+ task->inherited = 0;
75258+
75259+ if (task->exec_file) {
75260+ cred = __task_cred(task);
75261+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75262+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75263+ if (subj == NULL) {
75264+ ret = -EINVAL;
75265+ read_unlock(&grsec_exec_file_lock);
75266+ read_unlock(&tasklist_lock);
75267+ rcu_read_unlock();
75268+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75269+ return ret;
75270+ }
75271+ __gr_apply_subject_to_task(polstate, task, subj);
75272+ } else {
75273+ // it's a kernel process
75274+ task->role = polstate->kernel_role;
75275+ task->acl = polstate->kernel_role->root_label;
75276+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75277+ task->acl->mode &= ~GR_PROCFIND;
75278+#endif
75279+ }
75280+ } while_each_thread(task2, task);
75281+ read_unlock(&grsec_exec_file_lock);
75282+ read_unlock(&tasklist_lock);
75283+ rcu_read_unlock();
75284+
75285+ return 0;
75286+}
75287diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75288new file mode 100644
75289index 0000000..39645c9
75290--- /dev/null
75291+++ b/grsecurity/gracl_res.c
75292@@ -0,0 +1,68 @@
75293+#include <linux/kernel.h>
75294+#include <linux/sched.h>
75295+#include <linux/gracl.h>
75296+#include <linux/grinternal.h>
75297+
75298+static const char *restab_log[] = {
75299+ [RLIMIT_CPU] = "RLIMIT_CPU",
75300+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75301+ [RLIMIT_DATA] = "RLIMIT_DATA",
75302+ [RLIMIT_STACK] = "RLIMIT_STACK",
75303+ [RLIMIT_CORE] = "RLIMIT_CORE",
75304+ [RLIMIT_RSS] = "RLIMIT_RSS",
75305+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75306+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75307+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75308+ [RLIMIT_AS] = "RLIMIT_AS",
75309+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75310+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75311+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75312+ [RLIMIT_NICE] = "RLIMIT_NICE",
75313+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75314+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75315+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75316+};
75317+
75318+void
75319+gr_log_resource(const struct task_struct *task,
75320+ const int res, const unsigned long wanted, const int gt)
75321+{
75322+ const struct cred *cred;
75323+ unsigned long rlim;
75324+
75325+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75326+ return;
75327+
75328+ // not yet supported resource
75329+ if (unlikely(!restab_log[res]))
75330+ return;
75331+
75332+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75333+ rlim = task_rlimit_max(task, res);
75334+ else
75335+ rlim = task_rlimit(task, res);
75336+
75337+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75338+ return;
75339+
75340+ rcu_read_lock();
75341+ cred = __task_cred(task);
75342+
75343+ if (res == RLIMIT_NPROC &&
75344+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75345+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75346+ goto out_rcu_unlock;
75347+ else if (res == RLIMIT_MEMLOCK &&
75348+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75349+ goto out_rcu_unlock;
75350+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75351+ goto out_rcu_unlock;
75352+ rcu_read_unlock();
75353+
75354+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75355+
75356+ return;
75357+out_rcu_unlock:
75358+ rcu_read_unlock();
75359+ return;
75360+}
75361diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75362new file mode 100644
75363index 0000000..218b66b
75364--- /dev/null
75365+++ b/grsecurity/gracl_segv.c
75366@@ -0,0 +1,324 @@
75367+#include <linux/kernel.h>
75368+#include <linux/mm.h>
75369+#include <asm/uaccess.h>
75370+#include <asm/errno.h>
75371+#include <asm/mman.h>
75372+#include <net/sock.h>
75373+#include <linux/file.h>
75374+#include <linux/fs.h>
75375+#include <linux/net.h>
75376+#include <linux/in.h>
75377+#include <linux/slab.h>
75378+#include <linux/types.h>
75379+#include <linux/sched.h>
75380+#include <linux/timer.h>
75381+#include <linux/gracl.h>
75382+#include <linux/grsecurity.h>
75383+#include <linux/grinternal.h>
75384+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75385+#include <linux/magic.h>
75386+#include <linux/pagemap.h>
75387+#include "../fs/btrfs/async-thread.h"
75388+#include "../fs/btrfs/ctree.h"
75389+#include "../fs/btrfs/btrfs_inode.h"
75390+#endif
75391+
75392+static struct crash_uid *uid_set;
75393+static unsigned short uid_used;
75394+static DEFINE_SPINLOCK(gr_uid_lock);
75395+extern rwlock_t gr_inode_lock;
75396+extern struct acl_subject_label *
75397+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
75398+ struct acl_role_label *role);
75399+
75400+static inline dev_t __get_dev(const struct dentry *dentry)
75401+{
75402+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75403+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75404+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75405+ else
75406+#endif
75407+ return dentry->d_sb->s_dev;
75408+}
75409+
75410+static inline u64 __get_ino(const struct dentry *dentry)
75411+{
75412+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75413+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75414+ return btrfs_ino(dentry->d_inode);
75415+ else
75416+#endif
75417+ return dentry->d_inode->i_ino;
75418+}
75419+
75420+int
75421+gr_init_uidset(void)
75422+{
75423+ uid_set =
75424+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75425+ uid_used = 0;
75426+
75427+ return uid_set ? 1 : 0;
75428+}
75429+
75430+void
75431+gr_free_uidset(void)
75432+{
75433+ if (uid_set) {
75434+ struct crash_uid *tmpset;
75435+ spin_lock(&gr_uid_lock);
75436+ tmpset = uid_set;
75437+ uid_set = NULL;
75438+ uid_used = 0;
75439+ spin_unlock(&gr_uid_lock);
75440+ if (tmpset)
75441+ kfree(tmpset);
75442+ }
75443+
75444+ return;
75445+}
75446+
75447+int
75448+gr_find_uid(const uid_t uid)
75449+{
75450+ struct crash_uid *tmp = uid_set;
75451+ uid_t buid;
75452+ int low = 0, high = uid_used - 1, mid;
75453+
75454+ while (high >= low) {
75455+ mid = (low + high) >> 1;
75456+ buid = tmp[mid].uid;
75457+ if (buid == uid)
75458+ return mid;
75459+ if (buid > uid)
75460+ high = mid - 1;
75461+ if (buid < uid)
75462+ low = mid + 1;
75463+ }
75464+
75465+ return -1;
75466+}
75467+
75468+static __inline__ void
75469+gr_insertsort(void)
75470+{
75471+ unsigned short i, j;
75472+ struct crash_uid index;
75473+
75474+ for (i = 1; i < uid_used; i++) {
75475+ index = uid_set[i];
75476+ j = i;
75477+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75478+ uid_set[j] = uid_set[j - 1];
75479+ j--;
75480+ }
75481+ uid_set[j] = index;
75482+ }
75483+
75484+ return;
75485+}
75486+
75487+static __inline__ void
75488+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75489+{
75490+ int loc;
75491+ uid_t uid = GR_GLOBAL_UID(kuid);
75492+
75493+ if (uid_used == GR_UIDTABLE_MAX)
75494+ return;
75495+
75496+ loc = gr_find_uid(uid);
75497+
75498+ if (loc >= 0) {
75499+ uid_set[loc].expires = expires;
75500+ return;
75501+ }
75502+
75503+ uid_set[uid_used].uid = uid;
75504+ uid_set[uid_used].expires = expires;
75505+ uid_used++;
75506+
75507+ gr_insertsort();
75508+
75509+ return;
75510+}
75511+
75512+void
75513+gr_remove_uid(const unsigned short loc)
75514+{
75515+ unsigned short i;
75516+
75517+ for (i = loc + 1; i < uid_used; i++)
75518+ uid_set[i - 1] = uid_set[i];
75519+
75520+ uid_used--;
75521+
75522+ return;
75523+}
75524+
75525+int
75526+gr_check_crash_uid(const kuid_t kuid)
75527+{
75528+ int loc;
75529+ int ret = 0;
75530+ uid_t uid;
75531+
75532+ if (unlikely(!gr_acl_is_enabled()))
75533+ return 0;
75534+
75535+ uid = GR_GLOBAL_UID(kuid);
75536+
75537+ spin_lock(&gr_uid_lock);
75538+ loc = gr_find_uid(uid);
75539+
75540+ if (loc < 0)
75541+ goto out_unlock;
75542+
75543+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75544+ gr_remove_uid(loc);
75545+ else
75546+ ret = 1;
75547+
75548+out_unlock:
75549+ spin_unlock(&gr_uid_lock);
75550+ return ret;
75551+}
75552+
75553+static __inline__ int
75554+proc_is_setxid(const struct cred *cred)
75555+{
75556+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75557+ !uid_eq(cred->uid, cred->fsuid))
75558+ return 1;
75559+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75560+ !gid_eq(cred->gid, cred->fsgid))
75561+ return 1;
75562+
75563+ return 0;
75564+}
75565+
75566+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75567+
75568+void
75569+gr_handle_crash(struct task_struct *task, const int sig)
75570+{
75571+ struct acl_subject_label *curr;
75572+ struct task_struct *tsk, *tsk2;
75573+ const struct cred *cred;
75574+ const struct cred *cred2;
75575+
75576+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75577+ return;
75578+
75579+ if (unlikely(!gr_acl_is_enabled()))
75580+ return;
75581+
75582+ curr = task->acl;
75583+
75584+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75585+ return;
75586+
75587+ if (time_before_eq(curr->expires, get_seconds())) {
75588+ curr->expires = 0;
75589+ curr->crashes = 0;
75590+ }
75591+
75592+ curr->crashes++;
75593+
75594+ if (!curr->expires)
75595+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75596+
75597+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75598+ time_after(curr->expires, get_seconds())) {
75599+ rcu_read_lock();
75600+ cred = __task_cred(task);
75601+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75602+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75603+ spin_lock(&gr_uid_lock);
75604+ gr_insert_uid(cred->uid, curr->expires);
75605+ spin_unlock(&gr_uid_lock);
75606+ curr->expires = 0;
75607+ curr->crashes = 0;
75608+ read_lock(&tasklist_lock);
75609+ do_each_thread(tsk2, tsk) {
75610+ cred2 = __task_cred(tsk);
75611+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75612+ gr_fake_force_sig(SIGKILL, tsk);
75613+ } while_each_thread(tsk2, tsk);
75614+ read_unlock(&tasklist_lock);
75615+ } else {
75616+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75617+ read_lock(&tasklist_lock);
75618+ read_lock(&grsec_exec_file_lock);
75619+ do_each_thread(tsk2, tsk) {
75620+ if (likely(tsk != task)) {
75621+ // if this thread has the same subject as the one that triggered
75622+ // RES_CRASH and it's the same binary, kill it
75623+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75624+ gr_fake_force_sig(SIGKILL, tsk);
75625+ }
75626+ } while_each_thread(tsk2, tsk);
75627+ read_unlock(&grsec_exec_file_lock);
75628+ read_unlock(&tasklist_lock);
75629+ }
75630+ rcu_read_unlock();
75631+ }
75632+
75633+ return;
75634+}
75635+
75636+int
75637+gr_check_crash_exec(const struct file *filp)
75638+{
75639+ struct acl_subject_label *curr;
75640+ struct dentry *dentry;
75641+
75642+ if (unlikely(!gr_acl_is_enabled()))
75643+ return 0;
75644+
75645+ read_lock(&gr_inode_lock);
75646+ dentry = filp->f_path.dentry;
75647+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
75648+ current->role);
75649+ read_unlock(&gr_inode_lock);
75650+
75651+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75652+ (!curr->crashes && !curr->expires))
75653+ return 0;
75654+
75655+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75656+ time_after(curr->expires, get_seconds()))
75657+ return 1;
75658+ else if (time_before_eq(curr->expires, get_seconds())) {
75659+ curr->crashes = 0;
75660+ curr->expires = 0;
75661+ }
75662+
75663+ return 0;
75664+}
75665+
75666+void
75667+gr_handle_alertkill(struct task_struct *task)
75668+{
75669+ struct acl_subject_label *curracl;
75670+ __u32 curr_ip;
75671+ struct task_struct *p, *p2;
75672+
75673+ if (unlikely(!gr_acl_is_enabled()))
75674+ return;
75675+
75676+ curracl = task->acl;
75677+ curr_ip = task->signal->curr_ip;
75678+
75679+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75680+ read_lock(&tasklist_lock);
75681+ do_each_thread(p2, p) {
75682+ if (p->signal->curr_ip == curr_ip)
75683+ gr_fake_force_sig(SIGKILL, p);
75684+ } while_each_thread(p2, p);
75685+ read_unlock(&tasklist_lock);
75686+ } else if (curracl->mode & GR_KILLPROC)
75687+ gr_fake_force_sig(SIGKILL, task);
75688+
75689+ return;
75690+}
75691diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75692new file mode 100644
75693index 0000000..6b0c9cc
75694--- /dev/null
75695+++ b/grsecurity/gracl_shm.c
75696@@ -0,0 +1,40 @@
75697+#include <linux/kernel.h>
75698+#include <linux/mm.h>
75699+#include <linux/sched.h>
75700+#include <linux/file.h>
75701+#include <linux/ipc.h>
75702+#include <linux/gracl.h>
75703+#include <linux/grsecurity.h>
75704+#include <linux/grinternal.h>
75705+
75706+int
75707+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
75708+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
75709+{
75710+ struct task_struct *task;
75711+
75712+ if (!gr_acl_is_enabled())
75713+ return 1;
75714+
75715+ rcu_read_lock();
75716+ read_lock(&tasklist_lock);
75717+
75718+ task = find_task_by_vpid(shm_cprid);
75719+
75720+ if (unlikely(!task))
75721+ task = find_task_by_vpid(shm_lapid);
75722+
75723+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
75724+ (task_pid_nr(task) == shm_lapid)) &&
75725+ (task->acl->mode & GR_PROTSHM) &&
75726+ (task->acl != current->acl))) {
75727+ read_unlock(&tasklist_lock);
75728+ rcu_read_unlock();
75729+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
75730+ return 0;
75731+ }
75732+ read_unlock(&tasklist_lock);
75733+ rcu_read_unlock();
75734+
75735+ return 1;
75736+}
75737diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
75738new file mode 100644
75739index 0000000..bc0be01
75740--- /dev/null
75741+++ b/grsecurity/grsec_chdir.c
75742@@ -0,0 +1,19 @@
75743+#include <linux/kernel.h>
75744+#include <linux/sched.h>
75745+#include <linux/fs.h>
75746+#include <linux/file.h>
75747+#include <linux/grsecurity.h>
75748+#include <linux/grinternal.h>
75749+
75750+void
75751+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
75752+{
75753+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75754+ if ((grsec_enable_chdir && grsec_enable_group &&
75755+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
75756+ !grsec_enable_group)) {
75757+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
75758+ }
75759+#endif
75760+ return;
75761+}
75762diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
75763new file mode 100644
75764index 0000000..114ea4f
75765--- /dev/null
75766+++ b/grsecurity/grsec_chroot.c
75767@@ -0,0 +1,467 @@
75768+#include <linux/kernel.h>
75769+#include <linux/module.h>
75770+#include <linux/sched.h>
75771+#include <linux/file.h>
75772+#include <linux/fs.h>
75773+#include <linux/mount.h>
75774+#include <linux/types.h>
75775+#include "../fs/mount.h"
75776+#include <linux/grsecurity.h>
75777+#include <linux/grinternal.h>
75778+
75779+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75780+int gr_init_ran;
75781+#endif
75782+
75783+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75784+{
75785+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75786+ struct dentry *tmpd = dentry;
75787+
75788+ read_seqlock_excl(&mount_lock);
75789+ write_seqlock(&rename_lock);
75790+
75791+ while (tmpd != mnt->mnt_root) {
75792+ atomic_inc(&tmpd->chroot_refcnt);
75793+ tmpd = tmpd->d_parent;
75794+ }
75795+ atomic_inc(&tmpd->chroot_refcnt);
75796+
75797+ write_sequnlock(&rename_lock);
75798+ read_sequnlock_excl(&mount_lock);
75799+#endif
75800+}
75801+
75802+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75803+{
75804+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75805+ struct dentry *tmpd = dentry;
75806+
75807+ read_seqlock_excl(&mount_lock);
75808+ write_seqlock(&rename_lock);
75809+
75810+ while (tmpd != mnt->mnt_root) {
75811+ atomic_dec(&tmpd->chroot_refcnt);
75812+ tmpd = tmpd->d_parent;
75813+ }
75814+ atomic_dec(&tmpd->chroot_refcnt);
75815+
75816+ write_sequnlock(&rename_lock);
75817+ read_sequnlock_excl(&mount_lock);
75818+#endif
75819+}
75820+
75821+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75822+static struct dentry *get_closest_chroot(struct dentry *dentry)
75823+{
75824+ write_seqlock(&rename_lock);
75825+ do {
75826+ if (atomic_read(&dentry->chroot_refcnt)) {
75827+ write_sequnlock(&rename_lock);
75828+ return dentry;
75829+ }
75830+ dentry = dentry->d_parent;
75831+ } while (!IS_ROOT(dentry));
75832+ write_sequnlock(&rename_lock);
75833+ return NULL;
75834+}
75835+#endif
75836+
75837+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
75838+ struct dentry *newdentry, struct vfsmount *newmnt)
75839+{
75840+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75841+ struct dentry *chroot;
75842+
75843+ if (unlikely(!grsec_enable_chroot_rename))
75844+ return 0;
75845+
75846+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
75847+ return 0;
75848+
75849+ chroot = get_closest_chroot(olddentry);
75850+
75851+ if (chroot == NULL)
75852+ return 0;
75853+
75854+ if (is_subdir(newdentry, chroot))
75855+ return 0;
75856+
75857+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
75858+
75859+ return 1;
75860+#else
75861+ return 0;
75862+#endif
75863+}
75864+
75865+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
75866+{
75867+#ifdef CONFIG_GRKERNSEC
75868+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
75869+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
75870+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75871+ && gr_init_ran
75872+#endif
75873+ )
75874+ task->gr_is_chrooted = 1;
75875+ else {
75876+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75877+ if (task_pid_nr(task) == 1 && !gr_init_ran)
75878+ gr_init_ran = 1;
75879+#endif
75880+ task->gr_is_chrooted = 0;
75881+ }
75882+
75883+ task->gr_chroot_dentry = path->dentry;
75884+#endif
75885+ return;
75886+}
75887+
75888+void gr_clear_chroot_entries(struct task_struct *task)
75889+{
75890+#ifdef CONFIG_GRKERNSEC
75891+ task->gr_is_chrooted = 0;
75892+ task->gr_chroot_dentry = NULL;
75893+#endif
75894+ return;
75895+}
75896+
75897+int
75898+gr_handle_chroot_unix(const pid_t pid)
75899+{
75900+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
75901+ struct task_struct *p;
75902+
75903+ if (unlikely(!grsec_enable_chroot_unix))
75904+ return 1;
75905+
75906+ if (likely(!proc_is_chrooted(current)))
75907+ return 1;
75908+
75909+ rcu_read_lock();
75910+ read_lock(&tasklist_lock);
75911+ p = find_task_by_vpid_unrestricted(pid);
75912+ if (unlikely(p && !have_same_root(current, p))) {
75913+ read_unlock(&tasklist_lock);
75914+ rcu_read_unlock();
75915+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
75916+ return 0;
75917+ }
75918+ read_unlock(&tasklist_lock);
75919+ rcu_read_unlock();
75920+#endif
75921+ return 1;
75922+}
75923+
75924+int
75925+gr_handle_chroot_nice(void)
75926+{
75927+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75928+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
75929+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
75930+ return -EPERM;
75931+ }
75932+#endif
75933+ return 0;
75934+}
75935+
75936+int
75937+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
75938+{
75939+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75940+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
75941+ && proc_is_chrooted(current)) {
75942+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
75943+ return -EACCES;
75944+ }
75945+#endif
75946+ return 0;
75947+}
75948+
75949+int
75950+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
75951+{
75952+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75953+ struct task_struct *p;
75954+ int ret = 0;
75955+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
75956+ return ret;
75957+
75958+ read_lock(&tasklist_lock);
75959+ do_each_pid_task(pid, type, p) {
75960+ if (!have_same_root(current, p)) {
75961+ ret = 1;
75962+ goto out;
75963+ }
75964+ } while_each_pid_task(pid, type, p);
75965+out:
75966+ read_unlock(&tasklist_lock);
75967+ return ret;
75968+#endif
75969+ return 0;
75970+}
75971+
75972+int
75973+gr_pid_is_chrooted(struct task_struct *p)
75974+{
75975+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75976+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
75977+ return 0;
75978+
75979+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
75980+ !have_same_root(current, p)) {
75981+ return 1;
75982+ }
75983+#endif
75984+ return 0;
75985+}
75986+
75987+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
75988+
75989+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
75990+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
75991+{
75992+ struct path path, currentroot;
75993+ int ret = 0;
75994+
75995+ path.dentry = (struct dentry *)u_dentry;
75996+ path.mnt = (struct vfsmount *)u_mnt;
75997+ get_fs_root(current->fs, &currentroot);
75998+ if (path_is_under(&path, &currentroot))
75999+ ret = 1;
76000+ path_put(&currentroot);
76001+
76002+ return ret;
76003+}
76004+#endif
76005+
76006+int
76007+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76008+{
76009+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76010+ if (!grsec_enable_chroot_fchdir)
76011+ return 1;
76012+
76013+ if (!proc_is_chrooted(current))
76014+ return 1;
76015+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76016+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76017+ return 0;
76018+ }
76019+#endif
76020+ return 1;
76021+}
76022+
76023+int
76024+gr_chroot_fhandle(void)
76025+{
76026+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76027+ if (!grsec_enable_chroot_fchdir)
76028+ return 1;
76029+
76030+ if (!proc_is_chrooted(current))
76031+ return 1;
76032+ else {
76033+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76034+ return 0;
76035+ }
76036+#endif
76037+ return 1;
76038+}
76039+
76040+int
76041+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76042+ const u64 shm_createtime)
76043+{
76044+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76045+ struct task_struct *p;
76046+
76047+ if (unlikely(!grsec_enable_chroot_shmat))
76048+ return 1;
76049+
76050+ if (likely(!proc_is_chrooted(current)))
76051+ return 1;
76052+
76053+ rcu_read_lock();
76054+ read_lock(&tasklist_lock);
76055+
76056+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76057+ if (time_before_eq64(p->start_time, shm_createtime)) {
76058+ if (have_same_root(current, p)) {
76059+ goto allow;
76060+ } else {
76061+ read_unlock(&tasklist_lock);
76062+ rcu_read_unlock();
76063+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76064+ return 0;
76065+ }
76066+ }
76067+ /* creator exited, pid reuse, fall through to next check */
76068+ }
76069+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76070+ if (unlikely(!have_same_root(current, p))) {
76071+ read_unlock(&tasklist_lock);
76072+ rcu_read_unlock();
76073+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76074+ return 0;
76075+ }
76076+ }
76077+
76078+allow:
76079+ read_unlock(&tasklist_lock);
76080+ rcu_read_unlock();
76081+#endif
76082+ return 1;
76083+}
76084+
76085+void
76086+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76087+{
76088+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76089+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76090+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76091+#endif
76092+ return;
76093+}
76094+
76095+int
76096+gr_handle_chroot_mknod(const struct dentry *dentry,
76097+ const struct vfsmount *mnt, const int mode)
76098+{
76099+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76100+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76101+ proc_is_chrooted(current)) {
76102+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76103+ return -EPERM;
76104+ }
76105+#endif
76106+ return 0;
76107+}
76108+
76109+int
76110+gr_handle_chroot_mount(const struct dentry *dentry,
76111+ const struct vfsmount *mnt, const char *dev_name)
76112+{
76113+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76114+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76115+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76116+ return -EPERM;
76117+ }
76118+#endif
76119+ return 0;
76120+}
76121+
76122+int
76123+gr_handle_chroot_pivot(void)
76124+{
76125+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76126+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76127+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76128+ return -EPERM;
76129+ }
76130+#endif
76131+ return 0;
76132+}
76133+
76134+int
76135+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76136+{
76137+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76138+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76139+ !gr_is_outside_chroot(dentry, mnt)) {
76140+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76141+ return -EPERM;
76142+ }
76143+#endif
76144+ return 0;
76145+}
76146+
76147+extern const char *captab_log[];
76148+extern int captab_log_entries;
76149+
76150+int
76151+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76152+{
76153+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76154+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76155+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76156+ if (cap_raised(chroot_caps, cap)) {
76157+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76158+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76159+ }
76160+ return 0;
76161+ }
76162+ }
76163+#endif
76164+ return 1;
76165+}
76166+
76167+int
76168+gr_chroot_is_capable(const int cap)
76169+{
76170+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76171+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76172+#endif
76173+ return 1;
76174+}
76175+
76176+int
76177+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76178+{
76179+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76180+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76181+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76182+ if (cap_raised(chroot_caps, cap)) {
76183+ return 0;
76184+ }
76185+ }
76186+#endif
76187+ return 1;
76188+}
76189+
76190+int
76191+gr_chroot_is_capable_nolog(const int cap)
76192+{
76193+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76194+ return gr_task_chroot_is_capable_nolog(current, cap);
76195+#endif
76196+ return 1;
76197+}
76198+
76199+int
76200+gr_handle_chroot_sysctl(const int op)
76201+{
76202+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76203+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76204+ proc_is_chrooted(current))
76205+ return -EACCES;
76206+#endif
76207+ return 0;
76208+}
76209+
76210+void
76211+gr_handle_chroot_chdir(const struct path *path)
76212+{
76213+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76214+ if (grsec_enable_chroot_chdir)
76215+ set_fs_pwd(current->fs, path);
76216+#endif
76217+ return;
76218+}
76219+
76220+int
76221+gr_handle_chroot_chmod(const struct dentry *dentry,
76222+ const struct vfsmount *mnt, const int mode)
76223+{
76224+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76225+ /* allow chmod +s on directories, but not files */
76226+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76227+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76228+ proc_is_chrooted(current)) {
76229+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76230+ return -EPERM;
76231+ }
76232+#endif
76233+ return 0;
76234+}
76235diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76236new file mode 100644
76237index 0000000..946f750
76238--- /dev/null
76239+++ b/grsecurity/grsec_disabled.c
76240@@ -0,0 +1,445 @@
76241+#include <linux/kernel.h>
76242+#include <linux/module.h>
76243+#include <linux/sched.h>
76244+#include <linux/file.h>
76245+#include <linux/fs.h>
76246+#include <linux/kdev_t.h>
76247+#include <linux/net.h>
76248+#include <linux/in.h>
76249+#include <linux/ip.h>
76250+#include <linux/skbuff.h>
76251+#include <linux/sysctl.h>
76252+
76253+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76254+void
76255+pax_set_initial_flags(struct linux_binprm *bprm)
76256+{
76257+ return;
76258+}
76259+#endif
76260+
76261+#ifdef CONFIG_SYSCTL
76262+__u32
76263+gr_handle_sysctl(const struct ctl_table * table, const int op)
76264+{
76265+ return 0;
76266+}
76267+#endif
76268+
76269+#ifdef CONFIG_TASKSTATS
76270+int gr_is_taskstats_denied(int pid)
76271+{
76272+ return 0;
76273+}
76274+#endif
76275+
76276+int
76277+gr_acl_is_enabled(void)
76278+{
76279+ return 0;
76280+}
76281+
76282+int
76283+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76284+{
76285+ return 0;
76286+}
76287+
76288+void
76289+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76290+{
76291+ return;
76292+}
76293+
76294+int
76295+gr_handle_rawio(const struct inode *inode)
76296+{
76297+ return 0;
76298+}
76299+
76300+void
76301+gr_acl_handle_psacct(struct task_struct *task, const long code)
76302+{
76303+ return;
76304+}
76305+
76306+int
76307+gr_handle_ptrace(struct task_struct *task, const long request)
76308+{
76309+ return 0;
76310+}
76311+
76312+int
76313+gr_handle_proc_ptrace(struct task_struct *task)
76314+{
76315+ return 0;
76316+}
76317+
76318+int
76319+gr_set_acls(const int type)
76320+{
76321+ return 0;
76322+}
76323+
76324+int
76325+gr_check_hidden_task(const struct task_struct *tsk)
76326+{
76327+ return 0;
76328+}
76329+
76330+int
76331+gr_check_protected_task(const struct task_struct *task)
76332+{
76333+ return 0;
76334+}
76335+
76336+int
76337+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76338+{
76339+ return 0;
76340+}
76341+
76342+void
76343+gr_copy_label(struct task_struct *tsk)
76344+{
76345+ return;
76346+}
76347+
76348+void
76349+gr_set_pax_flags(struct task_struct *task)
76350+{
76351+ return;
76352+}
76353+
76354+int
76355+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76356+ const int unsafe_share)
76357+{
76358+ return 0;
76359+}
76360+
76361+void
76362+gr_handle_delete(const u64 ino, const dev_t dev)
76363+{
76364+ return;
76365+}
76366+
76367+void
76368+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76369+{
76370+ return;
76371+}
76372+
76373+void
76374+gr_handle_crash(struct task_struct *task, const int sig)
76375+{
76376+ return;
76377+}
76378+
76379+int
76380+gr_check_crash_exec(const struct file *filp)
76381+{
76382+ return 0;
76383+}
76384+
76385+int
76386+gr_check_crash_uid(const kuid_t uid)
76387+{
76388+ return 0;
76389+}
76390+
76391+void
76392+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76393+ struct dentry *old_dentry,
76394+ struct dentry *new_dentry,
76395+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76396+{
76397+ return;
76398+}
76399+
76400+int
76401+gr_search_socket(const int family, const int type, const int protocol)
76402+{
76403+ return 1;
76404+}
76405+
76406+int
76407+gr_search_connectbind(const int mode, const struct socket *sock,
76408+ const struct sockaddr_in *addr)
76409+{
76410+ return 0;
76411+}
76412+
76413+void
76414+gr_handle_alertkill(struct task_struct *task)
76415+{
76416+ return;
76417+}
76418+
76419+__u32
76420+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76421+{
76422+ return 1;
76423+}
76424+
76425+__u32
76426+gr_acl_handle_hidden_file(const struct dentry * dentry,
76427+ const struct vfsmount * mnt)
76428+{
76429+ return 1;
76430+}
76431+
76432+__u32
76433+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76434+ int acc_mode)
76435+{
76436+ return 1;
76437+}
76438+
76439+__u32
76440+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76441+{
76442+ return 1;
76443+}
76444+
76445+__u32
76446+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76447+{
76448+ return 1;
76449+}
76450+
76451+int
76452+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76453+ unsigned int *vm_flags)
76454+{
76455+ return 1;
76456+}
76457+
76458+__u32
76459+gr_acl_handle_truncate(const struct dentry * dentry,
76460+ const struct vfsmount * mnt)
76461+{
76462+ return 1;
76463+}
76464+
76465+__u32
76466+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76467+{
76468+ return 1;
76469+}
76470+
76471+__u32
76472+gr_acl_handle_access(const struct dentry * dentry,
76473+ const struct vfsmount * mnt, const int fmode)
76474+{
76475+ return 1;
76476+}
76477+
76478+__u32
76479+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76480+ umode_t *mode)
76481+{
76482+ return 1;
76483+}
76484+
76485+__u32
76486+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76487+{
76488+ return 1;
76489+}
76490+
76491+__u32
76492+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76493+{
76494+ return 1;
76495+}
76496+
76497+__u32
76498+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76499+{
76500+ return 1;
76501+}
76502+
76503+void
76504+grsecurity_init(void)
76505+{
76506+ return;
76507+}
76508+
76509+umode_t gr_acl_umask(void)
76510+{
76511+ return 0;
76512+}
76513+
76514+__u32
76515+gr_acl_handle_mknod(const struct dentry * new_dentry,
76516+ const struct dentry * parent_dentry,
76517+ const struct vfsmount * parent_mnt,
76518+ const int mode)
76519+{
76520+ return 1;
76521+}
76522+
76523+__u32
76524+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76525+ const struct dentry * parent_dentry,
76526+ const struct vfsmount * parent_mnt)
76527+{
76528+ return 1;
76529+}
76530+
76531+__u32
76532+gr_acl_handle_symlink(const struct dentry * new_dentry,
76533+ const struct dentry * parent_dentry,
76534+ const struct vfsmount * parent_mnt, const struct filename *from)
76535+{
76536+ return 1;
76537+}
76538+
76539+__u32
76540+gr_acl_handle_link(const struct dentry * new_dentry,
76541+ const struct dentry * parent_dentry,
76542+ const struct vfsmount * parent_mnt,
76543+ const struct dentry * old_dentry,
76544+ const struct vfsmount * old_mnt, const struct filename *to)
76545+{
76546+ return 1;
76547+}
76548+
76549+int
76550+gr_acl_handle_rename(const struct dentry *new_dentry,
76551+ const struct dentry *parent_dentry,
76552+ const struct vfsmount *parent_mnt,
76553+ const struct dentry *old_dentry,
76554+ const struct inode *old_parent_inode,
76555+ const struct vfsmount *old_mnt, const struct filename *newname,
76556+ unsigned int flags)
76557+{
76558+ return 0;
76559+}
76560+
76561+int
76562+gr_acl_handle_filldir(const struct file *file, const char *name,
76563+ const int namelen, const u64 ino)
76564+{
76565+ return 1;
76566+}
76567+
76568+int
76569+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76570+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76571+{
76572+ return 1;
76573+}
76574+
76575+int
76576+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76577+{
76578+ return 0;
76579+}
76580+
76581+int
76582+gr_search_accept(const struct socket *sock)
76583+{
76584+ return 0;
76585+}
76586+
76587+int
76588+gr_search_listen(const struct socket *sock)
76589+{
76590+ return 0;
76591+}
76592+
76593+int
76594+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76595+{
76596+ return 0;
76597+}
76598+
76599+__u32
76600+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76601+{
76602+ return 1;
76603+}
76604+
76605+__u32
76606+gr_acl_handle_creat(const struct dentry * dentry,
76607+ const struct dentry * p_dentry,
76608+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76609+ const int imode)
76610+{
76611+ return 1;
76612+}
76613+
76614+void
76615+gr_acl_handle_exit(void)
76616+{
76617+ return;
76618+}
76619+
76620+int
76621+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76622+{
76623+ return 1;
76624+}
76625+
76626+void
76627+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76628+{
76629+ return;
76630+}
76631+
76632+int
76633+gr_acl_handle_procpidmem(const struct task_struct *task)
76634+{
76635+ return 0;
76636+}
76637+
76638+int
76639+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76640+{
76641+ return 0;
76642+}
76643+
76644+int
76645+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76646+{
76647+ return 0;
76648+}
76649+
76650+int
76651+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76652+{
76653+ return 0;
76654+}
76655+
76656+int
76657+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76658+{
76659+ return 0;
76660+}
76661+
76662+int gr_acl_enable_at_secure(void)
76663+{
76664+ return 0;
76665+}
76666+
76667+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76668+{
76669+ return dentry->d_sb->s_dev;
76670+}
76671+
76672+u64 gr_get_ino_from_dentry(struct dentry *dentry)
76673+{
76674+ return dentry->d_inode->i_ino;
76675+}
76676+
76677+void gr_put_exec_file(struct task_struct *task)
76678+{
76679+ return;
76680+}
76681+
76682+#ifdef CONFIG_SECURITY
76683+EXPORT_SYMBOL_GPL(gr_check_user_change);
76684+EXPORT_SYMBOL_GPL(gr_check_group_change);
76685+#endif
76686diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76687new file mode 100644
76688index 0000000..14638ff
76689--- /dev/null
76690+++ b/grsecurity/grsec_exec.c
76691@@ -0,0 +1,188 @@
76692+#include <linux/kernel.h>
76693+#include <linux/sched.h>
76694+#include <linux/file.h>
76695+#include <linux/binfmts.h>
76696+#include <linux/fs.h>
76697+#include <linux/types.h>
76698+#include <linux/grdefs.h>
76699+#include <linux/grsecurity.h>
76700+#include <linux/grinternal.h>
76701+#include <linux/capability.h>
76702+#include <linux/module.h>
76703+#include <linux/compat.h>
76704+
76705+#include <asm/uaccess.h>
76706+
76707+#ifdef CONFIG_GRKERNSEC_EXECLOG
76708+static char gr_exec_arg_buf[132];
76709+static DEFINE_MUTEX(gr_exec_arg_mutex);
76710+#endif
76711+
76712+struct user_arg_ptr {
76713+#ifdef CONFIG_COMPAT
76714+ bool is_compat;
76715+#endif
76716+ union {
76717+ const char __user *const __user *native;
76718+#ifdef CONFIG_COMPAT
76719+ const compat_uptr_t __user *compat;
76720+#endif
76721+ } ptr;
76722+};
76723+
76724+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76725+
76726+void
76727+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76728+{
76729+#ifdef CONFIG_GRKERNSEC_EXECLOG
76730+ char *grarg = gr_exec_arg_buf;
76731+ unsigned int i, x, execlen = 0;
76732+ char c;
76733+
76734+ if (!((grsec_enable_execlog && grsec_enable_group &&
76735+ in_group_p(grsec_audit_gid))
76736+ || (grsec_enable_execlog && !grsec_enable_group)))
76737+ return;
76738+
76739+ mutex_lock(&gr_exec_arg_mutex);
76740+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76741+
76742+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76743+ const char __user *p;
76744+ unsigned int len;
76745+
76746+ p = get_user_arg_ptr(argv, i);
76747+ if (IS_ERR(p))
76748+ goto log;
76749+
76750+ len = strnlen_user(p, 128 - execlen);
76751+ if (len > 128 - execlen)
76752+ len = 128 - execlen;
76753+ else if (len > 0)
76754+ len--;
76755+ if (copy_from_user(grarg + execlen, p, len))
76756+ goto log;
76757+
76758+ /* rewrite unprintable characters */
76759+ for (x = 0; x < len; x++) {
76760+ c = *(grarg + execlen + x);
76761+ if (c < 32 || c > 126)
76762+ *(grarg + execlen + x) = ' ';
76763+ }
76764+
76765+ execlen += len;
76766+ *(grarg + execlen) = ' ';
76767+ *(grarg + execlen + 1) = '\0';
76768+ execlen++;
76769+ }
76770+
76771+ log:
76772+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76773+ bprm->file->f_path.mnt, grarg);
76774+ mutex_unlock(&gr_exec_arg_mutex);
76775+#endif
76776+ return;
76777+}
76778+
76779+#ifdef CONFIG_GRKERNSEC
76780+extern int gr_acl_is_capable(const int cap);
76781+extern int gr_acl_is_capable_nolog(const int cap);
76782+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76783+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
76784+extern int gr_chroot_is_capable(const int cap);
76785+extern int gr_chroot_is_capable_nolog(const int cap);
76786+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76787+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
76788+#endif
76789+
76790+const char *captab_log[] = {
76791+ "CAP_CHOWN",
76792+ "CAP_DAC_OVERRIDE",
76793+ "CAP_DAC_READ_SEARCH",
76794+ "CAP_FOWNER",
76795+ "CAP_FSETID",
76796+ "CAP_KILL",
76797+ "CAP_SETGID",
76798+ "CAP_SETUID",
76799+ "CAP_SETPCAP",
76800+ "CAP_LINUX_IMMUTABLE",
76801+ "CAP_NET_BIND_SERVICE",
76802+ "CAP_NET_BROADCAST",
76803+ "CAP_NET_ADMIN",
76804+ "CAP_NET_RAW",
76805+ "CAP_IPC_LOCK",
76806+ "CAP_IPC_OWNER",
76807+ "CAP_SYS_MODULE",
76808+ "CAP_SYS_RAWIO",
76809+ "CAP_SYS_CHROOT",
76810+ "CAP_SYS_PTRACE",
76811+ "CAP_SYS_PACCT",
76812+ "CAP_SYS_ADMIN",
76813+ "CAP_SYS_BOOT",
76814+ "CAP_SYS_NICE",
76815+ "CAP_SYS_RESOURCE",
76816+ "CAP_SYS_TIME",
76817+ "CAP_SYS_TTY_CONFIG",
76818+ "CAP_MKNOD",
76819+ "CAP_LEASE",
76820+ "CAP_AUDIT_WRITE",
76821+ "CAP_AUDIT_CONTROL",
76822+ "CAP_SETFCAP",
76823+ "CAP_MAC_OVERRIDE",
76824+ "CAP_MAC_ADMIN",
76825+ "CAP_SYSLOG",
76826+ "CAP_WAKE_ALARM",
76827+ "CAP_BLOCK_SUSPEND"
76828+};
76829+
76830+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
76831+
76832+int gr_is_capable(const int cap)
76833+{
76834+#ifdef CONFIG_GRKERNSEC
76835+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
76836+ return 1;
76837+ return 0;
76838+#else
76839+ return 1;
76840+#endif
76841+}
76842+
76843+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76844+{
76845+#ifdef CONFIG_GRKERNSEC
76846+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
76847+ return 1;
76848+ return 0;
76849+#else
76850+ return 1;
76851+#endif
76852+}
76853+
76854+int gr_is_capable_nolog(const int cap)
76855+{
76856+#ifdef CONFIG_GRKERNSEC
76857+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
76858+ return 1;
76859+ return 0;
76860+#else
76861+ return 1;
76862+#endif
76863+}
76864+
76865+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
76866+{
76867+#ifdef CONFIG_GRKERNSEC
76868+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
76869+ return 1;
76870+ return 0;
76871+#else
76872+ return 1;
76873+#endif
76874+}
76875+
76876+EXPORT_SYMBOL_GPL(gr_is_capable);
76877+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
76878+EXPORT_SYMBOL_GPL(gr_task_is_capable);
76879+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
76880diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
76881new file mode 100644
76882index 0000000..06cc6ea
76883--- /dev/null
76884+++ b/grsecurity/grsec_fifo.c
76885@@ -0,0 +1,24 @@
76886+#include <linux/kernel.h>
76887+#include <linux/sched.h>
76888+#include <linux/fs.h>
76889+#include <linux/file.h>
76890+#include <linux/grinternal.h>
76891+
76892+int
76893+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
76894+ const struct dentry *dir, const int flag, const int acc_mode)
76895+{
76896+#ifdef CONFIG_GRKERNSEC_FIFO
76897+ const struct cred *cred = current_cred();
76898+
76899+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
76900+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
76901+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
76902+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
76903+ if (!inode_permission(dentry->d_inode, acc_mode))
76904+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
76905+ return -EACCES;
76906+ }
76907+#endif
76908+ return 0;
76909+}
76910diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
76911new file mode 100644
76912index 0000000..8ca18bf
76913--- /dev/null
76914+++ b/grsecurity/grsec_fork.c
76915@@ -0,0 +1,23 @@
76916+#include <linux/kernel.h>
76917+#include <linux/sched.h>
76918+#include <linux/grsecurity.h>
76919+#include <linux/grinternal.h>
76920+#include <linux/errno.h>
76921+
76922+void
76923+gr_log_forkfail(const int retval)
76924+{
76925+#ifdef CONFIG_GRKERNSEC_FORKFAIL
76926+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
76927+ switch (retval) {
76928+ case -EAGAIN:
76929+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
76930+ break;
76931+ case -ENOMEM:
76932+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
76933+ break;
76934+ }
76935+ }
76936+#endif
76937+ return;
76938+}
76939diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
76940new file mode 100644
76941index 0000000..4ed9e7d
76942--- /dev/null
76943+++ b/grsecurity/grsec_init.c
76944@@ -0,0 +1,290 @@
76945+#include <linux/kernel.h>
76946+#include <linux/sched.h>
76947+#include <linux/mm.h>
76948+#include <linux/gracl.h>
76949+#include <linux/slab.h>
76950+#include <linux/vmalloc.h>
76951+#include <linux/percpu.h>
76952+#include <linux/module.h>
76953+
76954+int grsec_enable_ptrace_readexec;
76955+int grsec_enable_setxid;
76956+int grsec_enable_symlinkown;
76957+kgid_t grsec_symlinkown_gid;
76958+int grsec_enable_brute;
76959+int grsec_enable_link;
76960+int grsec_enable_dmesg;
76961+int grsec_enable_harden_ptrace;
76962+int grsec_enable_harden_ipc;
76963+int grsec_enable_fifo;
76964+int grsec_enable_execlog;
76965+int grsec_enable_signal;
76966+int grsec_enable_forkfail;
76967+int grsec_enable_audit_ptrace;
76968+int grsec_enable_time;
76969+int grsec_enable_group;
76970+kgid_t grsec_audit_gid;
76971+int grsec_enable_chdir;
76972+int grsec_enable_mount;
76973+int grsec_enable_rofs;
76974+int grsec_deny_new_usb;
76975+int grsec_enable_chroot_findtask;
76976+int grsec_enable_chroot_mount;
76977+int grsec_enable_chroot_shmat;
76978+int grsec_enable_chroot_fchdir;
76979+int grsec_enable_chroot_double;
76980+int grsec_enable_chroot_pivot;
76981+int grsec_enable_chroot_chdir;
76982+int grsec_enable_chroot_chmod;
76983+int grsec_enable_chroot_mknod;
76984+int grsec_enable_chroot_nice;
76985+int grsec_enable_chroot_execlog;
76986+int grsec_enable_chroot_caps;
76987+int grsec_enable_chroot_rename;
76988+int grsec_enable_chroot_sysctl;
76989+int grsec_enable_chroot_unix;
76990+int grsec_enable_tpe;
76991+kgid_t grsec_tpe_gid;
76992+int grsec_enable_blackhole;
76993+#ifdef CONFIG_IPV6_MODULE
76994+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
76995+#endif
76996+int grsec_lastack_retries;
76997+int grsec_enable_tpe_all;
76998+int grsec_enable_tpe_invert;
76999+int grsec_enable_socket_all;
77000+kgid_t grsec_socket_all_gid;
77001+int grsec_enable_socket_client;
77002+kgid_t grsec_socket_client_gid;
77003+int grsec_enable_socket_server;
77004+kgid_t grsec_socket_server_gid;
77005+int grsec_resource_logging;
77006+int grsec_disable_privio;
77007+int grsec_enable_log_rwxmaps;
77008+int grsec_lock;
77009+
77010+DEFINE_SPINLOCK(grsec_alert_lock);
77011+unsigned long grsec_alert_wtime = 0;
77012+unsigned long grsec_alert_fyet = 0;
77013+
77014+DEFINE_SPINLOCK(grsec_audit_lock);
77015+
77016+DEFINE_RWLOCK(grsec_exec_file_lock);
77017+
77018+char *gr_shared_page[4];
77019+
77020+char *gr_alert_log_fmt;
77021+char *gr_audit_log_fmt;
77022+char *gr_alert_log_buf;
77023+char *gr_audit_log_buf;
77024+
77025+extern struct gr_arg *gr_usermode;
77026+extern unsigned char *gr_system_salt;
77027+extern unsigned char *gr_system_sum;
77028+
77029+void __init
77030+grsecurity_init(void)
77031+{
77032+ int j;
77033+ /* create the per-cpu shared pages */
77034+
77035+#ifdef CONFIG_X86
77036+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77037+#endif
77038+
77039+ for (j = 0; j < 4; j++) {
77040+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77041+ if (gr_shared_page[j] == NULL) {
77042+ panic("Unable to allocate grsecurity shared page");
77043+ return;
77044+ }
77045+ }
77046+
77047+ /* allocate log buffers */
77048+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77049+ if (!gr_alert_log_fmt) {
77050+ panic("Unable to allocate grsecurity alert log format buffer");
77051+ return;
77052+ }
77053+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77054+ if (!gr_audit_log_fmt) {
77055+ panic("Unable to allocate grsecurity audit log format buffer");
77056+ return;
77057+ }
77058+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77059+ if (!gr_alert_log_buf) {
77060+ panic("Unable to allocate grsecurity alert log buffer");
77061+ return;
77062+ }
77063+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77064+ if (!gr_audit_log_buf) {
77065+ panic("Unable to allocate grsecurity audit log buffer");
77066+ return;
77067+ }
77068+
77069+ /* allocate memory for authentication structure */
77070+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77071+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77072+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77073+
77074+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77075+ panic("Unable to allocate grsecurity authentication structure");
77076+ return;
77077+ }
77078+
77079+#ifdef CONFIG_GRKERNSEC_IO
77080+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77081+ grsec_disable_privio = 1;
77082+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77083+ grsec_disable_privio = 1;
77084+#else
77085+ grsec_disable_privio = 0;
77086+#endif
77087+#endif
77088+
77089+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77090+ /* for backward compatibility, tpe_invert always defaults to on if
77091+ enabled in the kernel
77092+ */
77093+ grsec_enable_tpe_invert = 1;
77094+#endif
77095+
77096+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77097+#ifndef CONFIG_GRKERNSEC_SYSCTL
77098+ grsec_lock = 1;
77099+#endif
77100+
77101+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77102+ grsec_enable_log_rwxmaps = 1;
77103+#endif
77104+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77105+ grsec_enable_group = 1;
77106+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77107+#endif
77108+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77109+ grsec_enable_ptrace_readexec = 1;
77110+#endif
77111+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77112+ grsec_enable_chdir = 1;
77113+#endif
77114+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77115+ grsec_enable_harden_ptrace = 1;
77116+#endif
77117+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77118+ grsec_enable_harden_ipc = 1;
77119+#endif
77120+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77121+ grsec_enable_mount = 1;
77122+#endif
77123+#ifdef CONFIG_GRKERNSEC_LINK
77124+ grsec_enable_link = 1;
77125+#endif
77126+#ifdef CONFIG_GRKERNSEC_BRUTE
77127+ grsec_enable_brute = 1;
77128+#endif
77129+#ifdef CONFIG_GRKERNSEC_DMESG
77130+ grsec_enable_dmesg = 1;
77131+#endif
77132+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77133+ grsec_enable_blackhole = 1;
77134+ grsec_lastack_retries = 4;
77135+#endif
77136+#ifdef CONFIG_GRKERNSEC_FIFO
77137+ grsec_enable_fifo = 1;
77138+#endif
77139+#ifdef CONFIG_GRKERNSEC_EXECLOG
77140+ grsec_enable_execlog = 1;
77141+#endif
77142+#ifdef CONFIG_GRKERNSEC_SETXID
77143+ grsec_enable_setxid = 1;
77144+#endif
77145+#ifdef CONFIG_GRKERNSEC_SIGNAL
77146+ grsec_enable_signal = 1;
77147+#endif
77148+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77149+ grsec_enable_forkfail = 1;
77150+#endif
77151+#ifdef CONFIG_GRKERNSEC_TIME
77152+ grsec_enable_time = 1;
77153+#endif
77154+#ifdef CONFIG_GRKERNSEC_RESLOG
77155+ grsec_resource_logging = 1;
77156+#endif
77157+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77158+ grsec_enable_chroot_findtask = 1;
77159+#endif
77160+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77161+ grsec_enable_chroot_unix = 1;
77162+#endif
77163+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77164+ grsec_enable_chroot_mount = 1;
77165+#endif
77166+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77167+ grsec_enable_chroot_fchdir = 1;
77168+#endif
77169+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77170+ grsec_enable_chroot_shmat = 1;
77171+#endif
77172+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77173+ grsec_enable_audit_ptrace = 1;
77174+#endif
77175+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77176+ grsec_enable_chroot_double = 1;
77177+#endif
77178+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77179+ grsec_enable_chroot_pivot = 1;
77180+#endif
77181+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77182+ grsec_enable_chroot_chdir = 1;
77183+#endif
77184+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77185+ grsec_enable_chroot_chmod = 1;
77186+#endif
77187+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77188+ grsec_enable_chroot_mknod = 1;
77189+#endif
77190+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77191+ grsec_enable_chroot_nice = 1;
77192+#endif
77193+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77194+ grsec_enable_chroot_execlog = 1;
77195+#endif
77196+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77197+ grsec_enable_chroot_caps = 1;
77198+#endif
77199+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77200+ grsec_enable_chroot_rename = 1;
77201+#endif
77202+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77203+ grsec_enable_chroot_sysctl = 1;
77204+#endif
77205+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77206+ grsec_enable_symlinkown = 1;
77207+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77208+#endif
77209+#ifdef CONFIG_GRKERNSEC_TPE
77210+ grsec_enable_tpe = 1;
77211+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77212+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77213+ grsec_enable_tpe_all = 1;
77214+#endif
77215+#endif
77216+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77217+ grsec_enable_socket_all = 1;
77218+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77219+#endif
77220+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77221+ grsec_enable_socket_client = 1;
77222+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77223+#endif
77224+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77225+ grsec_enable_socket_server = 1;
77226+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77227+#endif
77228+#endif
77229+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77230+ grsec_deny_new_usb = 1;
77231+#endif
77232+
77233+ return;
77234+}
77235diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77236new file mode 100644
77237index 0000000..1773300
77238--- /dev/null
77239+++ b/grsecurity/grsec_ipc.c
77240@@ -0,0 +1,48 @@
77241+#include <linux/kernel.h>
77242+#include <linux/mm.h>
77243+#include <linux/sched.h>
77244+#include <linux/file.h>
77245+#include <linux/ipc.h>
77246+#include <linux/ipc_namespace.h>
77247+#include <linux/grsecurity.h>
77248+#include <linux/grinternal.h>
77249+
77250+int
77251+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77252+{
77253+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77254+ int write;
77255+ int orig_granted_mode;
77256+ kuid_t euid;
77257+ kgid_t egid;
77258+
77259+ if (!grsec_enable_harden_ipc)
77260+ return 1;
77261+
77262+ euid = current_euid();
77263+ egid = current_egid();
77264+
77265+ write = requested_mode & 00002;
77266+ orig_granted_mode = ipcp->mode;
77267+
77268+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77269+ orig_granted_mode >>= 6;
77270+ else {
77271+ /* if likely wrong permissions, lock to user */
77272+ if (orig_granted_mode & 0007)
77273+ orig_granted_mode = 0;
77274+ /* otherwise do a egid-only check */
77275+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77276+ orig_granted_mode >>= 3;
77277+ /* otherwise, no access */
77278+ else
77279+ orig_granted_mode = 0;
77280+ }
77281+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77282+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77283+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77284+ return 0;
77285+ }
77286+#endif
77287+ return 1;
77288+}
77289diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77290new file mode 100644
77291index 0000000..5e05e20
77292--- /dev/null
77293+++ b/grsecurity/grsec_link.c
77294@@ -0,0 +1,58 @@
77295+#include <linux/kernel.h>
77296+#include <linux/sched.h>
77297+#include <linux/fs.h>
77298+#include <linux/file.h>
77299+#include <linux/grinternal.h>
77300+
77301+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77302+{
77303+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77304+ const struct inode *link_inode = link->dentry->d_inode;
77305+
77306+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77307+ /* ignore root-owned links, e.g. /proc/self */
77308+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77309+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77310+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77311+ return 1;
77312+ }
77313+#endif
77314+ return 0;
77315+}
77316+
77317+int
77318+gr_handle_follow_link(const struct inode *parent,
77319+ const struct inode *inode,
77320+ const struct dentry *dentry, const struct vfsmount *mnt)
77321+{
77322+#ifdef CONFIG_GRKERNSEC_LINK
77323+ const struct cred *cred = current_cred();
77324+
77325+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77326+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77327+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77328+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77329+ return -EACCES;
77330+ }
77331+#endif
77332+ return 0;
77333+}
77334+
77335+int
77336+gr_handle_hardlink(const struct dentry *dentry,
77337+ const struct vfsmount *mnt,
77338+ struct inode *inode, const int mode, const struct filename *to)
77339+{
77340+#ifdef CONFIG_GRKERNSEC_LINK
77341+ const struct cred *cred = current_cred();
77342+
77343+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77344+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77345+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77346+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77347+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77348+ return -EPERM;
77349+ }
77350+#endif
77351+ return 0;
77352+}
77353diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77354new file mode 100644
77355index 0000000..dbe0a6b
77356--- /dev/null
77357+++ b/grsecurity/grsec_log.c
77358@@ -0,0 +1,341 @@
77359+#include <linux/kernel.h>
77360+#include <linux/sched.h>
77361+#include <linux/file.h>
77362+#include <linux/tty.h>
77363+#include <linux/fs.h>
77364+#include <linux/mm.h>
77365+#include <linux/grinternal.h>
77366+
77367+#ifdef CONFIG_TREE_PREEMPT_RCU
77368+#define DISABLE_PREEMPT() preempt_disable()
77369+#define ENABLE_PREEMPT() preempt_enable()
77370+#else
77371+#define DISABLE_PREEMPT()
77372+#define ENABLE_PREEMPT()
77373+#endif
77374+
77375+#define BEGIN_LOCKS(x) \
77376+ DISABLE_PREEMPT(); \
77377+ rcu_read_lock(); \
77378+ read_lock(&tasklist_lock); \
77379+ read_lock(&grsec_exec_file_lock); \
77380+ if (x != GR_DO_AUDIT) \
77381+ spin_lock(&grsec_alert_lock); \
77382+ else \
77383+ spin_lock(&grsec_audit_lock)
77384+
77385+#define END_LOCKS(x) \
77386+ if (x != GR_DO_AUDIT) \
77387+ spin_unlock(&grsec_alert_lock); \
77388+ else \
77389+ spin_unlock(&grsec_audit_lock); \
77390+ read_unlock(&grsec_exec_file_lock); \
77391+ read_unlock(&tasklist_lock); \
77392+ rcu_read_unlock(); \
77393+ ENABLE_PREEMPT(); \
77394+ if (x == GR_DONT_AUDIT) \
77395+ gr_handle_alertkill(current)
77396+
77397+enum {
77398+ FLOODING,
77399+ NO_FLOODING
77400+};
77401+
77402+extern char *gr_alert_log_fmt;
77403+extern char *gr_audit_log_fmt;
77404+extern char *gr_alert_log_buf;
77405+extern char *gr_audit_log_buf;
77406+
77407+static int gr_log_start(int audit)
77408+{
77409+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77410+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77411+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77412+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77413+ unsigned long curr_secs = get_seconds();
77414+
77415+ if (audit == GR_DO_AUDIT)
77416+ goto set_fmt;
77417+
77418+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77419+ grsec_alert_wtime = curr_secs;
77420+ grsec_alert_fyet = 0;
77421+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77422+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77423+ grsec_alert_fyet++;
77424+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77425+ grsec_alert_wtime = curr_secs;
77426+ grsec_alert_fyet++;
77427+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77428+ return FLOODING;
77429+ }
77430+ else return FLOODING;
77431+
77432+set_fmt:
77433+#endif
77434+ memset(buf, 0, PAGE_SIZE);
77435+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77436+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77437+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77438+ } else if (current->signal->curr_ip) {
77439+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77440+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77441+ } else if (gr_acl_is_enabled()) {
77442+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77443+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77444+ } else {
77445+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77446+ strcpy(buf, fmt);
77447+ }
77448+
77449+ return NO_FLOODING;
77450+}
77451+
77452+static void gr_log_middle(int audit, const char *msg, va_list ap)
77453+ __attribute__ ((format (printf, 2, 0)));
77454+
77455+static void gr_log_middle(int audit, const char *msg, va_list ap)
77456+{
77457+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77458+ unsigned int len = strlen(buf);
77459+
77460+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77461+
77462+ return;
77463+}
77464+
77465+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77466+ __attribute__ ((format (printf, 2, 3)));
77467+
77468+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77469+{
77470+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77471+ unsigned int len = strlen(buf);
77472+ va_list ap;
77473+
77474+ va_start(ap, msg);
77475+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77476+ va_end(ap);
77477+
77478+ return;
77479+}
77480+
77481+static void gr_log_end(int audit, int append_default)
77482+{
77483+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77484+ if (append_default) {
77485+ struct task_struct *task = current;
77486+ struct task_struct *parent = task->real_parent;
77487+ const struct cred *cred = __task_cred(task);
77488+ const struct cred *pcred = __task_cred(parent);
77489+ unsigned int len = strlen(buf);
77490+
77491+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77492+ }
77493+
77494+ printk("%s\n", buf);
77495+
77496+ return;
77497+}
77498+
77499+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77500+{
77501+ int logtype;
77502+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77503+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77504+ void *voidptr = NULL;
77505+ int num1 = 0, num2 = 0;
77506+ unsigned long ulong1 = 0, ulong2 = 0;
77507+ struct dentry *dentry = NULL;
77508+ struct vfsmount *mnt = NULL;
77509+ struct file *file = NULL;
77510+ struct task_struct *task = NULL;
77511+ struct vm_area_struct *vma = NULL;
77512+ const struct cred *cred, *pcred;
77513+ va_list ap;
77514+
77515+ BEGIN_LOCKS(audit);
77516+ logtype = gr_log_start(audit);
77517+ if (logtype == FLOODING) {
77518+ END_LOCKS(audit);
77519+ return;
77520+ }
77521+ va_start(ap, argtypes);
77522+ switch (argtypes) {
77523+ case GR_TTYSNIFF:
77524+ task = va_arg(ap, struct task_struct *);
77525+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77526+ break;
77527+ case GR_SYSCTL_HIDDEN:
77528+ str1 = va_arg(ap, char *);
77529+ gr_log_middle_varargs(audit, msg, result, str1);
77530+ break;
77531+ case GR_RBAC:
77532+ dentry = va_arg(ap, struct dentry *);
77533+ mnt = va_arg(ap, struct vfsmount *);
77534+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77535+ break;
77536+ case GR_RBAC_STR:
77537+ dentry = va_arg(ap, struct dentry *);
77538+ mnt = va_arg(ap, struct vfsmount *);
77539+ str1 = va_arg(ap, char *);
77540+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77541+ break;
77542+ case GR_STR_RBAC:
77543+ str1 = va_arg(ap, char *);
77544+ dentry = va_arg(ap, struct dentry *);
77545+ mnt = va_arg(ap, struct vfsmount *);
77546+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77547+ break;
77548+ case GR_RBAC_MODE2:
77549+ dentry = va_arg(ap, struct dentry *);
77550+ mnt = va_arg(ap, struct vfsmount *);
77551+ str1 = va_arg(ap, char *);
77552+ str2 = va_arg(ap, char *);
77553+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77554+ break;
77555+ case GR_RBAC_MODE3:
77556+ dentry = va_arg(ap, struct dentry *);
77557+ mnt = va_arg(ap, struct vfsmount *);
77558+ str1 = va_arg(ap, char *);
77559+ str2 = va_arg(ap, char *);
77560+ str3 = va_arg(ap, char *);
77561+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77562+ break;
77563+ case GR_FILENAME:
77564+ dentry = va_arg(ap, struct dentry *);
77565+ mnt = va_arg(ap, struct vfsmount *);
77566+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77567+ break;
77568+ case GR_STR_FILENAME:
77569+ str1 = va_arg(ap, char *);
77570+ dentry = va_arg(ap, struct dentry *);
77571+ mnt = va_arg(ap, struct vfsmount *);
77572+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77573+ break;
77574+ case GR_FILENAME_STR:
77575+ dentry = va_arg(ap, struct dentry *);
77576+ mnt = va_arg(ap, struct vfsmount *);
77577+ str1 = va_arg(ap, char *);
77578+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77579+ break;
77580+ case GR_FILENAME_TWO_INT:
77581+ dentry = va_arg(ap, struct dentry *);
77582+ mnt = va_arg(ap, struct vfsmount *);
77583+ num1 = va_arg(ap, int);
77584+ num2 = va_arg(ap, int);
77585+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77586+ break;
77587+ case GR_FILENAME_TWO_INT_STR:
77588+ dentry = va_arg(ap, struct dentry *);
77589+ mnt = va_arg(ap, struct vfsmount *);
77590+ num1 = va_arg(ap, int);
77591+ num2 = va_arg(ap, int);
77592+ str1 = va_arg(ap, char *);
77593+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77594+ break;
77595+ case GR_TEXTREL:
77596+ file = va_arg(ap, struct file *);
77597+ ulong1 = va_arg(ap, unsigned long);
77598+ ulong2 = va_arg(ap, unsigned long);
77599+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77600+ break;
77601+ case GR_PTRACE:
77602+ task = va_arg(ap, struct task_struct *);
77603+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77604+ break;
77605+ case GR_RESOURCE:
77606+ task = va_arg(ap, struct task_struct *);
77607+ cred = __task_cred(task);
77608+ pcred = __task_cred(task->real_parent);
77609+ ulong1 = va_arg(ap, unsigned long);
77610+ str1 = va_arg(ap, char *);
77611+ ulong2 = va_arg(ap, unsigned long);
77612+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77613+ break;
77614+ case GR_CAP:
77615+ task = va_arg(ap, struct task_struct *);
77616+ cred = __task_cred(task);
77617+ pcred = __task_cred(task->real_parent);
77618+ str1 = va_arg(ap, char *);
77619+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77620+ break;
77621+ case GR_SIG:
77622+ str1 = va_arg(ap, char *);
77623+ voidptr = va_arg(ap, void *);
77624+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77625+ break;
77626+ case GR_SIG2:
77627+ task = va_arg(ap, struct task_struct *);
77628+ cred = __task_cred(task);
77629+ pcred = __task_cred(task->real_parent);
77630+ num1 = va_arg(ap, int);
77631+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77632+ break;
77633+ case GR_CRASH1:
77634+ task = va_arg(ap, struct task_struct *);
77635+ cred = __task_cred(task);
77636+ pcred = __task_cred(task->real_parent);
77637+ ulong1 = va_arg(ap, unsigned long);
77638+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77639+ break;
77640+ case GR_CRASH2:
77641+ task = va_arg(ap, struct task_struct *);
77642+ cred = __task_cred(task);
77643+ pcred = __task_cred(task->real_parent);
77644+ ulong1 = va_arg(ap, unsigned long);
77645+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77646+ break;
77647+ case GR_RWXMAP:
77648+ file = va_arg(ap, struct file *);
77649+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77650+ break;
77651+ case GR_RWXMAPVMA:
77652+ vma = va_arg(ap, struct vm_area_struct *);
77653+ if (vma->vm_file)
77654+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77655+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77656+ str1 = "<stack>";
77657+ else if (vma->vm_start <= current->mm->brk &&
77658+ vma->vm_end >= current->mm->start_brk)
77659+ str1 = "<heap>";
77660+ else
77661+ str1 = "<anonymous mapping>";
77662+ gr_log_middle_varargs(audit, msg, str1);
77663+ break;
77664+ case GR_PSACCT:
77665+ {
77666+ unsigned int wday, cday;
77667+ __u8 whr, chr;
77668+ __u8 wmin, cmin;
77669+ __u8 wsec, csec;
77670+ char cur_tty[64] = { 0 };
77671+ char parent_tty[64] = { 0 };
77672+
77673+ task = va_arg(ap, struct task_struct *);
77674+ wday = va_arg(ap, unsigned int);
77675+ cday = va_arg(ap, unsigned int);
77676+ whr = va_arg(ap, int);
77677+ chr = va_arg(ap, int);
77678+ wmin = va_arg(ap, int);
77679+ cmin = va_arg(ap, int);
77680+ wsec = va_arg(ap, int);
77681+ csec = va_arg(ap, int);
77682+ ulong1 = va_arg(ap, unsigned long);
77683+ cred = __task_cred(task);
77684+ pcred = __task_cred(task->real_parent);
77685+
77686+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77687+ }
77688+ break;
77689+ default:
77690+ gr_log_middle(audit, msg, ap);
77691+ }
77692+ va_end(ap);
77693+ // these don't need DEFAULTSECARGS printed on the end
77694+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77695+ gr_log_end(audit, 0);
77696+ else
77697+ gr_log_end(audit, 1);
77698+ END_LOCKS(audit);
77699+}
77700diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77701new file mode 100644
77702index 0000000..0e39d8c
77703--- /dev/null
77704+++ b/grsecurity/grsec_mem.c
77705@@ -0,0 +1,48 @@
77706+#include <linux/kernel.h>
77707+#include <linux/sched.h>
77708+#include <linux/mm.h>
77709+#include <linux/mman.h>
77710+#include <linux/module.h>
77711+#include <linux/grinternal.h>
77712+
77713+void gr_handle_msr_write(void)
77714+{
77715+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77716+ return;
77717+}
77718+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77719+
77720+void
77721+gr_handle_ioperm(void)
77722+{
77723+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77724+ return;
77725+}
77726+
77727+void
77728+gr_handle_iopl(void)
77729+{
77730+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77731+ return;
77732+}
77733+
77734+void
77735+gr_handle_mem_readwrite(u64 from, u64 to)
77736+{
77737+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77738+ return;
77739+}
77740+
77741+void
77742+gr_handle_vm86(void)
77743+{
77744+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77745+ return;
77746+}
77747+
77748+void
77749+gr_log_badprocpid(const char *entry)
77750+{
77751+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77752+ return;
77753+}
77754diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77755new file mode 100644
77756index 0000000..6f9eb73
77757--- /dev/null
77758+++ b/grsecurity/grsec_mount.c
77759@@ -0,0 +1,65 @@
77760+#include <linux/kernel.h>
77761+#include <linux/sched.h>
77762+#include <linux/mount.h>
77763+#include <linux/major.h>
77764+#include <linux/grsecurity.h>
77765+#include <linux/grinternal.h>
77766+
77767+void
77768+gr_log_remount(const char *devname, const int retval)
77769+{
77770+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77771+ if (grsec_enable_mount && (retval >= 0))
77772+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77773+#endif
77774+ return;
77775+}
77776+
77777+void
77778+gr_log_unmount(const char *devname, const int retval)
77779+{
77780+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77781+ if (grsec_enable_mount && (retval >= 0))
77782+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77783+#endif
77784+ return;
77785+}
77786+
77787+void
77788+gr_log_mount(const char *from, struct path *to, const int retval)
77789+{
77790+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77791+ if (grsec_enable_mount && (retval >= 0))
77792+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
77793+#endif
77794+ return;
77795+}
77796+
77797+int
77798+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
77799+{
77800+#ifdef CONFIG_GRKERNSEC_ROFS
77801+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
77802+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
77803+ return -EPERM;
77804+ } else
77805+ return 0;
77806+#endif
77807+ return 0;
77808+}
77809+
77810+int
77811+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
77812+{
77813+#ifdef CONFIG_GRKERNSEC_ROFS
77814+ struct inode *inode = dentry->d_inode;
77815+
77816+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
77817+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
77818+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
77819+ return -EPERM;
77820+ } else
77821+ return 0;
77822+#endif
77823+ return 0;
77824+}
77825diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
77826new file mode 100644
77827index 0000000..6ee9d50
77828--- /dev/null
77829+++ b/grsecurity/grsec_pax.c
77830@@ -0,0 +1,45 @@
77831+#include <linux/kernel.h>
77832+#include <linux/sched.h>
77833+#include <linux/mm.h>
77834+#include <linux/file.h>
77835+#include <linux/grinternal.h>
77836+#include <linux/grsecurity.h>
77837+
77838+void
77839+gr_log_textrel(struct vm_area_struct * vma)
77840+{
77841+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77842+ if (grsec_enable_log_rwxmaps)
77843+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
77844+#endif
77845+ return;
77846+}
77847+
77848+void gr_log_ptgnustack(struct file *file)
77849+{
77850+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77851+ if (grsec_enable_log_rwxmaps)
77852+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
77853+#endif
77854+ return;
77855+}
77856+
77857+void
77858+gr_log_rwxmmap(struct file *file)
77859+{
77860+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77861+ if (grsec_enable_log_rwxmaps)
77862+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
77863+#endif
77864+ return;
77865+}
77866+
77867+void
77868+gr_log_rwxmprotect(struct vm_area_struct *vma)
77869+{
77870+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77871+ if (grsec_enable_log_rwxmaps)
77872+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
77873+#endif
77874+ return;
77875+}
77876diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
77877new file mode 100644
77878index 0000000..2005a3a
77879--- /dev/null
77880+++ b/grsecurity/grsec_proc.c
77881@@ -0,0 +1,20 @@
77882+#include <linux/kernel.h>
77883+#include <linux/sched.h>
77884+#include <linux/grsecurity.h>
77885+#include <linux/grinternal.h>
77886+
77887+int gr_proc_is_restricted(void)
77888+{
77889+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77890+ const struct cred *cred = current_cred();
77891+#endif
77892+
77893+#ifdef CONFIG_GRKERNSEC_PROC_USER
77894+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
77895+ return -EACCES;
77896+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77897+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
77898+ return -EACCES;
77899+#endif
77900+ return 0;
77901+}
77902diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
77903new file mode 100644
77904index 0000000..f7f29aa
77905--- /dev/null
77906+++ b/grsecurity/grsec_ptrace.c
77907@@ -0,0 +1,30 @@
77908+#include <linux/kernel.h>
77909+#include <linux/sched.h>
77910+#include <linux/grinternal.h>
77911+#include <linux/security.h>
77912+
77913+void
77914+gr_audit_ptrace(struct task_struct *task)
77915+{
77916+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77917+ if (grsec_enable_audit_ptrace)
77918+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
77919+#endif
77920+ return;
77921+}
77922+
77923+int
77924+gr_ptrace_readexec(struct file *file, int unsafe_flags)
77925+{
77926+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77927+ const struct dentry *dentry = file->f_path.dentry;
77928+ const struct vfsmount *mnt = file->f_path.mnt;
77929+
77930+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
77931+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
77932+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
77933+ return -EACCES;
77934+ }
77935+#endif
77936+ return 0;
77937+}
77938diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
77939new file mode 100644
77940index 0000000..3860c7e
77941--- /dev/null
77942+++ b/grsecurity/grsec_sig.c
77943@@ -0,0 +1,236 @@
77944+#include <linux/kernel.h>
77945+#include <linux/sched.h>
77946+#include <linux/fs.h>
77947+#include <linux/delay.h>
77948+#include <linux/grsecurity.h>
77949+#include <linux/grinternal.h>
77950+#include <linux/hardirq.h>
77951+
77952+char *signames[] = {
77953+ [SIGSEGV] = "Segmentation fault",
77954+ [SIGILL] = "Illegal instruction",
77955+ [SIGABRT] = "Abort",
77956+ [SIGBUS] = "Invalid alignment/Bus error"
77957+};
77958+
77959+void
77960+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
77961+{
77962+#ifdef CONFIG_GRKERNSEC_SIGNAL
77963+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
77964+ (sig == SIGABRT) || (sig == SIGBUS))) {
77965+ if (task_pid_nr(t) == task_pid_nr(current)) {
77966+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
77967+ } else {
77968+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
77969+ }
77970+ }
77971+#endif
77972+ return;
77973+}
77974+
77975+int
77976+gr_handle_signal(const struct task_struct *p, const int sig)
77977+{
77978+#ifdef CONFIG_GRKERNSEC
77979+ /* ignore the 0 signal for protected task checks */
77980+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
77981+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
77982+ return -EPERM;
77983+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
77984+ return -EPERM;
77985+ }
77986+#endif
77987+ return 0;
77988+}
77989+
77990+#ifdef CONFIG_GRKERNSEC
77991+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
77992+
77993+int gr_fake_force_sig(int sig, struct task_struct *t)
77994+{
77995+ unsigned long int flags;
77996+ int ret, blocked, ignored;
77997+ struct k_sigaction *action;
77998+
77999+ spin_lock_irqsave(&t->sighand->siglock, flags);
78000+ action = &t->sighand->action[sig-1];
78001+ ignored = action->sa.sa_handler == SIG_IGN;
78002+ blocked = sigismember(&t->blocked, sig);
78003+ if (blocked || ignored) {
78004+ action->sa.sa_handler = SIG_DFL;
78005+ if (blocked) {
78006+ sigdelset(&t->blocked, sig);
78007+ recalc_sigpending_and_wake(t);
78008+ }
78009+ }
78010+ if (action->sa.sa_handler == SIG_DFL)
78011+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78012+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78013+
78014+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78015+
78016+ return ret;
78017+}
78018+#endif
78019+
78020+#define GR_USER_BAN_TIME (15 * 60)
78021+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78022+
78023+void gr_handle_brute_attach(int dumpable)
78024+{
78025+#ifdef CONFIG_GRKERNSEC_BRUTE
78026+ struct task_struct *p = current;
78027+ kuid_t uid = GLOBAL_ROOT_UID;
78028+ int daemon = 0;
78029+
78030+ if (!grsec_enable_brute)
78031+ return;
78032+
78033+ rcu_read_lock();
78034+ read_lock(&tasklist_lock);
78035+ read_lock(&grsec_exec_file_lock);
78036+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78037+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78038+ p->real_parent->brute = 1;
78039+ daemon = 1;
78040+ } else {
78041+ const struct cred *cred = __task_cred(p), *cred2;
78042+ struct task_struct *tsk, *tsk2;
78043+
78044+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78045+ struct user_struct *user;
78046+
78047+ uid = cred->uid;
78048+
78049+ /* this is put upon execution past expiration */
78050+ user = find_user(uid);
78051+ if (user == NULL)
78052+ goto unlock;
78053+ user->suid_banned = 1;
78054+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78055+ if (user->suid_ban_expires == ~0UL)
78056+ user->suid_ban_expires--;
78057+
78058+ /* only kill other threads of the same binary, from the same user */
78059+ do_each_thread(tsk2, tsk) {
78060+ cred2 = __task_cred(tsk);
78061+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78062+ gr_fake_force_sig(SIGKILL, tsk);
78063+ } while_each_thread(tsk2, tsk);
78064+ }
78065+ }
78066+unlock:
78067+ read_unlock(&grsec_exec_file_lock);
78068+ read_unlock(&tasklist_lock);
78069+ rcu_read_unlock();
78070+
78071+ if (gr_is_global_nonroot(uid))
78072+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78073+ else if (daemon)
78074+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78075+
78076+#endif
78077+ return;
78078+}
78079+
78080+void gr_handle_brute_check(void)
78081+{
78082+#ifdef CONFIG_GRKERNSEC_BRUTE
78083+ struct task_struct *p = current;
78084+
78085+ if (unlikely(p->brute)) {
78086+ if (!grsec_enable_brute)
78087+ p->brute = 0;
78088+ else if (time_before(get_seconds(), p->brute_expires))
78089+ msleep(30 * 1000);
78090+ }
78091+#endif
78092+ return;
78093+}
78094+
78095+void gr_handle_kernel_exploit(void)
78096+{
78097+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78098+ const struct cred *cred;
78099+ struct task_struct *tsk, *tsk2;
78100+ struct user_struct *user;
78101+ kuid_t uid;
78102+
78103+ if (in_irq() || in_serving_softirq() || in_nmi())
78104+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78105+
78106+ uid = current_uid();
78107+
78108+ if (gr_is_global_root(uid))
78109+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78110+ else {
78111+ /* kill all the processes of this user, hold a reference
78112+ to their creds struct, and prevent them from creating
78113+ another process until system reset
78114+ */
78115+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78116+ GR_GLOBAL_UID(uid));
78117+ /* we intentionally leak this ref */
78118+ user = get_uid(current->cred->user);
78119+ if (user)
78120+ user->kernel_banned = 1;
78121+
78122+ /* kill all processes of this user */
78123+ read_lock(&tasklist_lock);
78124+ do_each_thread(tsk2, tsk) {
78125+ cred = __task_cred(tsk);
78126+ if (uid_eq(cred->uid, uid))
78127+ gr_fake_force_sig(SIGKILL, tsk);
78128+ } while_each_thread(tsk2, tsk);
78129+ read_unlock(&tasklist_lock);
78130+ }
78131+#endif
78132+}
78133+
78134+#ifdef CONFIG_GRKERNSEC_BRUTE
78135+static bool suid_ban_expired(struct user_struct *user)
78136+{
78137+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78138+ user->suid_banned = 0;
78139+ user->suid_ban_expires = 0;
78140+ free_uid(user);
78141+ return true;
78142+ }
78143+
78144+ return false;
78145+}
78146+#endif
78147+
78148+int gr_process_kernel_exec_ban(void)
78149+{
78150+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78151+ if (unlikely(current->cred->user->kernel_banned))
78152+ return -EPERM;
78153+#endif
78154+ return 0;
78155+}
78156+
78157+int gr_process_kernel_setuid_ban(struct user_struct *user)
78158+{
78159+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78160+ if (unlikely(user->kernel_banned))
78161+ gr_fake_force_sig(SIGKILL, current);
78162+#endif
78163+ return 0;
78164+}
78165+
78166+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78167+{
78168+#ifdef CONFIG_GRKERNSEC_BRUTE
78169+ struct user_struct *user = current->cred->user;
78170+ if (unlikely(user->suid_banned)) {
78171+ if (suid_ban_expired(user))
78172+ return 0;
78173+ /* disallow execution of suid binaries only */
78174+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78175+ return -EPERM;
78176+ }
78177+#endif
78178+ return 0;
78179+}
78180diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78181new file mode 100644
78182index 0000000..e3650b6
78183--- /dev/null
78184+++ b/grsecurity/grsec_sock.c
78185@@ -0,0 +1,244 @@
78186+#include <linux/kernel.h>
78187+#include <linux/module.h>
78188+#include <linux/sched.h>
78189+#include <linux/file.h>
78190+#include <linux/net.h>
78191+#include <linux/in.h>
78192+#include <linux/ip.h>
78193+#include <net/sock.h>
78194+#include <net/inet_sock.h>
78195+#include <linux/grsecurity.h>
78196+#include <linux/grinternal.h>
78197+#include <linux/gracl.h>
78198+
78199+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78200+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78201+
78202+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78203+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78204+
78205+#ifdef CONFIG_UNIX_MODULE
78206+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78207+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78208+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78209+EXPORT_SYMBOL_GPL(gr_handle_create);
78210+#endif
78211+
78212+#ifdef CONFIG_GRKERNSEC
78213+#define gr_conn_table_size 32749
78214+struct conn_table_entry {
78215+ struct conn_table_entry *next;
78216+ struct signal_struct *sig;
78217+};
78218+
78219+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78220+DEFINE_SPINLOCK(gr_conn_table_lock);
78221+
78222+extern const char * gr_socktype_to_name(unsigned char type);
78223+extern const char * gr_proto_to_name(unsigned char proto);
78224+extern const char * gr_sockfamily_to_name(unsigned char family);
78225+
78226+static __inline__ int
78227+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78228+{
78229+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78230+}
78231+
78232+static __inline__ int
78233+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78234+ __u16 sport, __u16 dport)
78235+{
78236+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78237+ sig->gr_sport == sport && sig->gr_dport == dport))
78238+ return 1;
78239+ else
78240+ return 0;
78241+}
78242+
78243+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78244+{
78245+ struct conn_table_entry **match;
78246+ unsigned int index;
78247+
78248+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78249+ sig->gr_sport, sig->gr_dport,
78250+ gr_conn_table_size);
78251+
78252+ newent->sig = sig;
78253+
78254+ match = &gr_conn_table[index];
78255+ newent->next = *match;
78256+ *match = newent;
78257+
78258+ return;
78259+}
78260+
78261+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78262+{
78263+ struct conn_table_entry *match, *last = NULL;
78264+ unsigned int index;
78265+
78266+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78267+ sig->gr_sport, sig->gr_dport,
78268+ gr_conn_table_size);
78269+
78270+ match = gr_conn_table[index];
78271+ while (match && !conn_match(match->sig,
78272+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78273+ sig->gr_dport)) {
78274+ last = match;
78275+ match = match->next;
78276+ }
78277+
78278+ if (match) {
78279+ if (last)
78280+ last->next = match->next;
78281+ else
78282+ gr_conn_table[index] = NULL;
78283+ kfree(match);
78284+ }
78285+
78286+ return;
78287+}
78288+
78289+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78290+ __u16 sport, __u16 dport)
78291+{
78292+ struct conn_table_entry *match;
78293+ unsigned int index;
78294+
78295+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78296+
78297+ match = gr_conn_table[index];
78298+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78299+ match = match->next;
78300+
78301+ if (match)
78302+ return match->sig;
78303+ else
78304+ return NULL;
78305+}
78306+
78307+#endif
78308+
78309+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78310+{
78311+#ifdef CONFIG_GRKERNSEC
78312+ struct signal_struct *sig = current->signal;
78313+ struct conn_table_entry *newent;
78314+
78315+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78316+ if (newent == NULL)
78317+ return;
78318+ /* no bh lock needed since we are called with bh disabled */
78319+ spin_lock(&gr_conn_table_lock);
78320+ gr_del_task_from_ip_table_nolock(sig);
78321+ sig->gr_saddr = inet->inet_rcv_saddr;
78322+ sig->gr_daddr = inet->inet_daddr;
78323+ sig->gr_sport = inet->inet_sport;
78324+ sig->gr_dport = inet->inet_dport;
78325+ gr_add_to_task_ip_table_nolock(sig, newent);
78326+ spin_unlock(&gr_conn_table_lock);
78327+#endif
78328+ return;
78329+}
78330+
78331+void gr_del_task_from_ip_table(struct task_struct *task)
78332+{
78333+#ifdef CONFIG_GRKERNSEC
78334+ spin_lock_bh(&gr_conn_table_lock);
78335+ gr_del_task_from_ip_table_nolock(task->signal);
78336+ spin_unlock_bh(&gr_conn_table_lock);
78337+#endif
78338+ return;
78339+}
78340+
78341+void
78342+gr_attach_curr_ip(const struct sock *sk)
78343+{
78344+#ifdef CONFIG_GRKERNSEC
78345+ struct signal_struct *p, *set;
78346+ const struct inet_sock *inet = inet_sk(sk);
78347+
78348+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78349+ return;
78350+
78351+ set = current->signal;
78352+
78353+ spin_lock_bh(&gr_conn_table_lock);
78354+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78355+ inet->inet_dport, inet->inet_sport);
78356+ if (unlikely(p != NULL)) {
78357+ set->curr_ip = p->curr_ip;
78358+ set->used_accept = 1;
78359+ gr_del_task_from_ip_table_nolock(p);
78360+ spin_unlock_bh(&gr_conn_table_lock);
78361+ return;
78362+ }
78363+ spin_unlock_bh(&gr_conn_table_lock);
78364+
78365+ set->curr_ip = inet->inet_daddr;
78366+ set->used_accept = 1;
78367+#endif
78368+ return;
78369+}
78370+
78371+int
78372+gr_handle_sock_all(const int family, const int type, const int protocol)
78373+{
78374+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78375+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78376+ (family != AF_UNIX)) {
78377+ if (family == AF_INET)
78378+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78379+ else
78380+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78381+ return -EACCES;
78382+ }
78383+#endif
78384+ return 0;
78385+}
78386+
78387+int
78388+gr_handle_sock_server(const struct sockaddr *sck)
78389+{
78390+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78391+ if (grsec_enable_socket_server &&
78392+ in_group_p(grsec_socket_server_gid) &&
78393+ sck && (sck->sa_family != AF_UNIX) &&
78394+ (sck->sa_family != AF_LOCAL)) {
78395+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78396+ return -EACCES;
78397+ }
78398+#endif
78399+ return 0;
78400+}
78401+
78402+int
78403+gr_handle_sock_server_other(const struct sock *sck)
78404+{
78405+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78406+ if (grsec_enable_socket_server &&
78407+ in_group_p(grsec_socket_server_gid) &&
78408+ sck && (sck->sk_family != AF_UNIX) &&
78409+ (sck->sk_family != AF_LOCAL)) {
78410+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78411+ return -EACCES;
78412+ }
78413+#endif
78414+ return 0;
78415+}
78416+
78417+int
78418+gr_handle_sock_client(const struct sockaddr *sck)
78419+{
78420+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78421+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78422+ sck && (sck->sa_family != AF_UNIX) &&
78423+ (sck->sa_family != AF_LOCAL)) {
78424+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78425+ return -EACCES;
78426+ }
78427+#endif
78428+ return 0;
78429+}
78430diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78431new file mode 100644
78432index 0000000..cce889e
78433--- /dev/null
78434+++ b/grsecurity/grsec_sysctl.c
78435@@ -0,0 +1,488 @@
78436+#include <linux/kernel.h>
78437+#include <linux/sched.h>
78438+#include <linux/sysctl.h>
78439+#include <linux/grsecurity.h>
78440+#include <linux/grinternal.h>
78441+
78442+int
78443+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78444+{
78445+#ifdef CONFIG_GRKERNSEC_SYSCTL
78446+ if (dirname == NULL || name == NULL)
78447+ return 0;
78448+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78449+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78450+ return -EACCES;
78451+ }
78452+#endif
78453+ return 0;
78454+}
78455+
78456+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78457+static int __maybe_unused __read_only one = 1;
78458+#endif
78459+
78460+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78461+ defined(CONFIG_GRKERNSEC_DENYUSB)
78462+struct ctl_table grsecurity_table[] = {
78463+#ifdef CONFIG_GRKERNSEC_SYSCTL
78464+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78465+#ifdef CONFIG_GRKERNSEC_IO
78466+ {
78467+ .procname = "disable_priv_io",
78468+ .data = &grsec_disable_privio,
78469+ .maxlen = sizeof(int),
78470+ .mode = 0600,
78471+ .proc_handler = &proc_dointvec,
78472+ },
78473+#endif
78474+#endif
78475+#ifdef CONFIG_GRKERNSEC_LINK
78476+ {
78477+ .procname = "linking_restrictions",
78478+ .data = &grsec_enable_link,
78479+ .maxlen = sizeof(int),
78480+ .mode = 0600,
78481+ .proc_handler = &proc_dointvec,
78482+ },
78483+#endif
78484+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78485+ {
78486+ .procname = "enforce_symlinksifowner",
78487+ .data = &grsec_enable_symlinkown,
78488+ .maxlen = sizeof(int),
78489+ .mode = 0600,
78490+ .proc_handler = &proc_dointvec,
78491+ },
78492+ {
78493+ .procname = "symlinkown_gid",
78494+ .data = &grsec_symlinkown_gid,
78495+ .maxlen = sizeof(int),
78496+ .mode = 0600,
78497+ .proc_handler = &proc_dointvec,
78498+ },
78499+#endif
78500+#ifdef CONFIG_GRKERNSEC_BRUTE
78501+ {
78502+ .procname = "deter_bruteforce",
78503+ .data = &grsec_enable_brute,
78504+ .maxlen = sizeof(int),
78505+ .mode = 0600,
78506+ .proc_handler = &proc_dointvec,
78507+ },
78508+#endif
78509+#ifdef CONFIG_GRKERNSEC_FIFO
78510+ {
78511+ .procname = "fifo_restrictions",
78512+ .data = &grsec_enable_fifo,
78513+ .maxlen = sizeof(int),
78514+ .mode = 0600,
78515+ .proc_handler = &proc_dointvec,
78516+ },
78517+#endif
78518+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78519+ {
78520+ .procname = "ptrace_readexec",
78521+ .data = &grsec_enable_ptrace_readexec,
78522+ .maxlen = sizeof(int),
78523+ .mode = 0600,
78524+ .proc_handler = &proc_dointvec,
78525+ },
78526+#endif
78527+#ifdef CONFIG_GRKERNSEC_SETXID
78528+ {
78529+ .procname = "consistent_setxid",
78530+ .data = &grsec_enable_setxid,
78531+ .maxlen = sizeof(int),
78532+ .mode = 0600,
78533+ .proc_handler = &proc_dointvec,
78534+ },
78535+#endif
78536+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78537+ {
78538+ .procname = "ip_blackhole",
78539+ .data = &grsec_enable_blackhole,
78540+ .maxlen = sizeof(int),
78541+ .mode = 0600,
78542+ .proc_handler = &proc_dointvec,
78543+ },
78544+ {
78545+ .procname = "lastack_retries",
78546+ .data = &grsec_lastack_retries,
78547+ .maxlen = sizeof(int),
78548+ .mode = 0600,
78549+ .proc_handler = &proc_dointvec,
78550+ },
78551+#endif
78552+#ifdef CONFIG_GRKERNSEC_EXECLOG
78553+ {
78554+ .procname = "exec_logging",
78555+ .data = &grsec_enable_execlog,
78556+ .maxlen = sizeof(int),
78557+ .mode = 0600,
78558+ .proc_handler = &proc_dointvec,
78559+ },
78560+#endif
78561+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78562+ {
78563+ .procname = "rwxmap_logging",
78564+ .data = &grsec_enable_log_rwxmaps,
78565+ .maxlen = sizeof(int),
78566+ .mode = 0600,
78567+ .proc_handler = &proc_dointvec,
78568+ },
78569+#endif
78570+#ifdef CONFIG_GRKERNSEC_SIGNAL
78571+ {
78572+ .procname = "signal_logging",
78573+ .data = &grsec_enable_signal,
78574+ .maxlen = sizeof(int),
78575+ .mode = 0600,
78576+ .proc_handler = &proc_dointvec,
78577+ },
78578+#endif
78579+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78580+ {
78581+ .procname = "forkfail_logging",
78582+ .data = &grsec_enable_forkfail,
78583+ .maxlen = sizeof(int),
78584+ .mode = 0600,
78585+ .proc_handler = &proc_dointvec,
78586+ },
78587+#endif
78588+#ifdef CONFIG_GRKERNSEC_TIME
78589+ {
78590+ .procname = "timechange_logging",
78591+ .data = &grsec_enable_time,
78592+ .maxlen = sizeof(int),
78593+ .mode = 0600,
78594+ .proc_handler = &proc_dointvec,
78595+ },
78596+#endif
78597+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78598+ {
78599+ .procname = "chroot_deny_shmat",
78600+ .data = &grsec_enable_chroot_shmat,
78601+ .maxlen = sizeof(int),
78602+ .mode = 0600,
78603+ .proc_handler = &proc_dointvec,
78604+ },
78605+#endif
78606+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78607+ {
78608+ .procname = "chroot_deny_unix",
78609+ .data = &grsec_enable_chroot_unix,
78610+ .maxlen = sizeof(int),
78611+ .mode = 0600,
78612+ .proc_handler = &proc_dointvec,
78613+ },
78614+#endif
78615+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78616+ {
78617+ .procname = "chroot_deny_mount",
78618+ .data = &grsec_enable_chroot_mount,
78619+ .maxlen = sizeof(int),
78620+ .mode = 0600,
78621+ .proc_handler = &proc_dointvec,
78622+ },
78623+#endif
78624+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78625+ {
78626+ .procname = "chroot_deny_fchdir",
78627+ .data = &grsec_enable_chroot_fchdir,
78628+ .maxlen = sizeof(int),
78629+ .mode = 0600,
78630+ .proc_handler = &proc_dointvec,
78631+ },
78632+#endif
78633+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78634+ {
78635+ .procname = "chroot_deny_chroot",
78636+ .data = &grsec_enable_chroot_double,
78637+ .maxlen = sizeof(int),
78638+ .mode = 0600,
78639+ .proc_handler = &proc_dointvec,
78640+ },
78641+#endif
78642+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78643+ {
78644+ .procname = "chroot_deny_pivot",
78645+ .data = &grsec_enable_chroot_pivot,
78646+ .maxlen = sizeof(int),
78647+ .mode = 0600,
78648+ .proc_handler = &proc_dointvec,
78649+ },
78650+#endif
78651+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78652+ {
78653+ .procname = "chroot_enforce_chdir",
78654+ .data = &grsec_enable_chroot_chdir,
78655+ .maxlen = sizeof(int),
78656+ .mode = 0600,
78657+ .proc_handler = &proc_dointvec,
78658+ },
78659+#endif
78660+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78661+ {
78662+ .procname = "chroot_deny_chmod",
78663+ .data = &grsec_enable_chroot_chmod,
78664+ .maxlen = sizeof(int),
78665+ .mode = 0600,
78666+ .proc_handler = &proc_dointvec,
78667+ },
78668+#endif
78669+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78670+ {
78671+ .procname = "chroot_deny_mknod",
78672+ .data = &grsec_enable_chroot_mknod,
78673+ .maxlen = sizeof(int),
78674+ .mode = 0600,
78675+ .proc_handler = &proc_dointvec,
78676+ },
78677+#endif
78678+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78679+ {
78680+ .procname = "chroot_restrict_nice",
78681+ .data = &grsec_enable_chroot_nice,
78682+ .maxlen = sizeof(int),
78683+ .mode = 0600,
78684+ .proc_handler = &proc_dointvec,
78685+ },
78686+#endif
78687+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78688+ {
78689+ .procname = "chroot_execlog",
78690+ .data = &grsec_enable_chroot_execlog,
78691+ .maxlen = sizeof(int),
78692+ .mode = 0600,
78693+ .proc_handler = &proc_dointvec,
78694+ },
78695+#endif
78696+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78697+ {
78698+ .procname = "chroot_caps",
78699+ .data = &grsec_enable_chroot_caps,
78700+ .maxlen = sizeof(int),
78701+ .mode = 0600,
78702+ .proc_handler = &proc_dointvec,
78703+ },
78704+#endif
78705+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78706+ {
78707+ .procname = "chroot_deny_bad_rename",
78708+ .data = &grsec_enable_chroot_rename,
78709+ .maxlen = sizeof(int),
78710+ .mode = 0600,
78711+ .proc_handler = &proc_dointvec,
78712+ },
78713+#endif
78714+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78715+ {
78716+ .procname = "chroot_deny_sysctl",
78717+ .data = &grsec_enable_chroot_sysctl,
78718+ .maxlen = sizeof(int),
78719+ .mode = 0600,
78720+ .proc_handler = &proc_dointvec,
78721+ },
78722+#endif
78723+#ifdef CONFIG_GRKERNSEC_TPE
78724+ {
78725+ .procname = "tpe",
78726+ .data = &grsec_enable_tpe,
78727+ .maxlen = sizeof(int),
78728+ .mode = 0600,
78729+ .proc_handler = &proc_dointvec,
78730+ },
78731+ {
78732+ .procname = "tpe_gid",
78733+ .data = &grsec_tpe_gid,
78734+ .maxlen = sizeof(int),
78735+ .mode = 0600,
78736+ .proc_handler = &proc_dointvec,
78737+ },
78738+#endif
78739+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78740+ {
78741+ .procname = "tpe_invert",
78742+ .data = &grsec_enable_tpe_invert,
78743+ .maxlen = sizeof(int),
78744+ .mode = 0600,
78745+ .proc_handler = &proc_dointvec,
78746+ },
78747+#endif
78748+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78749+ {
78750+ .procname = "tpe_restrict_all",
78751+ .data = &grsec_enable_tpe_all,
78752+ .maxlen = sizeof(int),
78753+ .mode = 0600,
78754+ .proc_handler = &proc_dointvec,
78755+ },
78756+#endif
78757+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78758+ {
78759+ .procname = "socket_all",
78760+ .data = &grsec_enable_socket_all,
78761+ .maxlen = sizeof(int),
78762+ .mode = 0600,
78763+ .proc_handler = &proc_dointvec,
78764+ },
78765+ {
78766+ .procname = "socket_all_gid",
78767+ .data = &grsec_socket_all_gid,
78768+ .maxlen = sizeof(int),
78769+ .mode = 0600,
78770+ .proc_handler = &proc_dointvec,
78771+ },
78772+#endif
78773+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78774+ {
78775+ .procname = "socket_client",
78776+ .data = &grsec_enable_socket_client,
78777+ .maxlen = sizeof(int),
78778+ .mode = 0600,
78779+ .proc_handler = &proc_dointvec,
78780+ },
78781+ {
78782+ .procname = "socket_client_gid",
78783+ .data = &grsec_socket_client_gid,
78784+ .maxlen = sizeof(int),
78785+ .mode = 0600,
78786+ .proc_handler = &proc_dointvec,
78787+ },
78788+#endif
78789+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78790+ {
78791+ .procname = "socket_server",
78792+ .data = &grsec_enable_socket_server,
78793+ .maxlen = sizeof(int),
78794+ .mode = 0600,
78795+ .proc_handler = &proc_dointvec,
78796+ },
78797+ {
78798+ .procname = "socket_server_gid",
78799+ .data = &grsec_socket_server_gid,
78800+ .maxlen = sizeof(int),
78801+ .mode = 0600,
78802+ .proc_handler = &proc_dointvec,
78803+ },
78804+#endif
78805+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78806+ {
78807+ .procname = "audit_group",
78808+ .data = &grsec_enable_group,
78809+ .maxlen = sizeof(int),
78810+ .mode = 0600,
78811+ .proc_handler = &proc_dointvec,
78812+ },
78813+ {
78814+ .procname = "audit_gid",
78815+ .data = &grsec_audit_gid,
78816+ .maxlen = sizeof(int),
78817+ .mode = 0600,
78818+ .proc_handler = &proc_dointvec,
78819+ },
78820+#endif
78821+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78822+ {
78823+ .procname = "audit_chdir",
78824+ .data = &grsec_enable_chdir,
78825+ .maxlen = sizeof(int),
78826+ .mode = 0600,
78827+ .proc_handler = &proc_dointvec,
78828+ },
78829+#endif
78830+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78831+ {
78832+ .procname = "audit_mount",
78833+ .data = &grsec_enable_mount,
78834+ .maxlen = sizeof(int),
78835+ .mode = 0600,
78836+ .proc_handler = &proc_dointvec,
78837+ },
78838+#endif
78839+#ifdef CONFIG_GRKERNSEC_DMESG
78840+ {
78841+ .procname = "dmesg",
78842+ .data = &grsec_enable_dmesg,
78843+ .maxlen = sizeof(int),
78844+ .mode = 0600,
78845+ .proc_handler = &proc_dointvec,
78846+ },
78847+#endif
78848+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78849+ {
78850+ .procname = "chroot_findtask",
78851+ .data = &grsec_enable_chroot_findtask,
78852+ .maxlen = sizeof(int),
78853+ .mode = 0600,
78854+ .proc_handler = &proc_dointvec,
78855+ },
78856+#endif
78857+#ifdef CONFIG_GRKERNSEC_RESLOG
78858+ {
78859+ .procname = "resource_logging",
78860+ .data = &grsec_resource_logging,
78861+ .maxlen = sizeof(int),
78862+ .mode = 0600,
78863+ .proc_handler = &proc_dointvec,
78864+ },
78865+#endif
78866+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78867+ {
78868+ .procname = "audit_ptrace",
78869+ .data = &grsec_enable_audit_ptrace,
78870+ .maxlen = sizeof(int),
78871+ .mode = 0600,
78872+ .proc_handler = &proc_dointvec,
78873+ },
78874+#endif
78875+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78876+ {
78877+ .procname = "harden_ptrace",
78878+ .data = &grsec_enable_harden_ptrace,
78879+ .maxlen = sizeof(int),
78880+ .mode = 0600,
78881+ .proc_handler = &proc_dointvec,
78882+ },
78883+#endif
78884+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78885+ {
78886+ .procname = "harden_ipc",
78887+ .data = &grsec_enable_harden_ipc,
78888+ .maxlen = sizeof(int),
78889+ .mode = 0600,
78890+ .proc_handler = &proc_dointvec,
78891+ },
78892+#endif
78893+ {
78894+ .procname = "grsec_lock",
78895+ .data = &grsec_lock,
78896+ .maxlen = sizeof(int),
78897+ .mode = 0600,
78898+ .proc_handler = &proc_dointvec,
78899+ },
78900+#endif
78901+#ifdef CONFIG_GRKERNSEC_ROFS
78902+ {
78903+ .procname = "romount_protect",
78904+ .data = &grsec_enable_rofs,
78905+ .maxlen = sizeof(int),
78906+ .mode = 0600,
78907+ .proc_handler = &proc_dointvec_minmax,
78908+ .extra1 = &one,
78909+ .extra2 = &one,
78910+ },
78911+#endif
78912+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
78913+ {
78914+ .procname = "deny_new_usb",
78915+ .data = &grsec_deny_new_usb,
78916+ .maxlen = sizeof(int),
78917+ .mode = 0600,
78918+ .proc_handler = &proc_dointvec,
78919+ },
78920+#endif
78921+ { }
78922+};
78923+#endif
78924diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
78925new file mode 100644
78926index 0000000..61b514e
78927--- /dev/null
78928+++ b/grsecurity/grsec_time.c
78929@@ -0,0 +1,16 @@
78930+#include <linux/kernel.h>
78931+#include <linux/sched.h>
78932+#include <linux/grinternal.h>
78933+#include <linux/module.h>
78934+
78935+void
78936+gr_log_timechange(void)
78937+{
78938+#ifdef CONFIG_GRKERNSEC_TIME
78939+ if (grsec_enable_time)
78940+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
78941+#endif
78942+ return;
78943+}
78944+
78945+EXPORT_SYMBOL_GPL(gr_log_timechange);
78946diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
78947new file mode 100644
78948index 0000000..d1953de
78949--- /dev/null
78950+++ b/grsecurity/grsec_tpe.c
78951@@ -0,0 +1,78 @@
78952+#include <linux/kernel.h>
78953+#include <linux/sched.h>
78954+#include <linux/file.h>
78955+#include <linux/fs.h>
78956+#include <linux/grinternal.h>
78957+
78958+extern int gr_acl_tpe_check(void);
78959+
78960+int
78961+gr_tpe_allow(const struct file *file)
78962+{
78963+#ifdef CONFIG_GRKERNSEC
78964+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
78965+ struct inode *file_inode = file->f_path.dentry->d_inode;
78966+ const struct cred *cred = current_cred();
78967+ char *msg = NULL;
78968+ char *msg2 = NULL;
78969+
78970+ // never restrict root
78971+ if (gr_is_global_root(cred->uid))
78972+ return 1;
78973+
78974+ if (grsec_enable_tpe) {
78975+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78976+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
78977+ msg = "not being in trusted group";
78978+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
78979+ msg = "being in untrusted group";
78980+#else
78981+ if (in_group_p(grsec_tpe_gid))
78982+ msg = "being in untrusted group";
78983+#endif
78984+ }
78985+ if (!msg && gr_acl_tpe_check())
78986+ msg = "being in untrusted role";
78987+
78988+ // not in any affected group/role
78989+ if (!msg)
78990+ goto next_check;
78991+
78992+ if (gr_is_global_nonroot(inode->i_uid))
78993+ msg2 = "file in non-root-owned directory";
78994+ else if (inode->i_mode & S_IWOTH)
78995+ msg2 = "file in world-writable directory";
78996+ else if (inode->i_mode & S_IWGRP)
78997+ msg2 = "file in group-writable directory";
78998+ else if (file_inode->i_mode & S_IWOTH)
78999+ msg2 = "file is world-writable";
79000+
79001+ if (msg && msg2) {
79002+ char fullmsg[70] = {0};
79003+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79004+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79005+ return 0;
79006+ }
79007+ msg = NULL;
79008+next_check:
79009+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79010+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79011+ return 1;
79012+
79013+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79014+ msg = "directory not owned by user";
79015+ else if (inode->i_mode & S_IWOTH)
79016+ msg = "file in world-writable directory";
79017+ else if (inode->i_mode & S_IWGRP)
79018+ msg = "file in group-writable directory";
79019+ else if (file_inode->i_mode & S_IWOTH)
79020+ msg = "file is world-writable";
79021+
79022+ if (msg) {
79023+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79024+ return 0;
79025+ }
79026+#endif
79027+#endif
79028+ return 1;
79029+}
79030diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79031new file mode 100644
79032index 0000000..ae02d8e
79033--- /dev/null
79034+++ b/grsecurity/grsec_usb.c
79035@@ -0,0 +1,15 @@
79036+#include <linux/kernel.h>
79037+#include <linux/grinternal.h>
79038+#include <linux/module.h>
79039+
79040+int gr_handle_new_usb(void)
79041+{
79042+#ifdef CONFIG_GRKERNSEC_DENYUSB
79043+ if (grsec_deny_new_usb) {
79044+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79045+ return 1;
79046+ }
79047+#endif
79048+ return 0;
79049+}
79050+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79051diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79052new file mode 100644
79053index 0000000..158b330
79054--- /dev/null
79055+++ b/grsecurity/grsum.c
79056@@ -0,0 +1,64 @@
79057+#include <linux/err.h>
79058+#include <linux/kernel.h>
79059+#include <linux/sched.h>
79060+#include <linux/mm.h>
79061+#include <linux/scatterlist.h>
79062+#include <linux/crypto.h>
79063+#include <linux/gracl.h>
79064+
79065+
79066+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79067+#error "crypto and sha256 must be built into the kernel"
79068+#endif
79069+
79070+int
79071+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79072+{
79073+ struct crypto_hash *tfm;
79074+ struct hash_desc desc;
79075+ struct scatterlist sg[2];
79076+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79077+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79078+ unsigned long *sumptr = (unsigned long *)sum;
79079+ int cryptres;
79080+ int retval = 1;
79081+ volatile int mismatched = 0;
79082+ volatile int dummy = 0;
79083+ unsigned int i;
79084+
79085+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79086+ if (IS_ERR(tfm)) {
79087+ /* should never happen, since sha256 should be built in */
79088+ memset(entry->pw, 0, GR_PW_LEN);
79089+ return 1;
79090+ }
79091+
79092+ sg_init_table(sg, 2);
79093+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79094+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79095+
79096+ desc.tfm = tfm;
79097+ desc.flags = 0;
79098+
79099+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79100+ temp_sum);
79101+
79102+ memset(entry->pw, 0, GR_PW_LEN);
79103+
79104+ if (cryptres)
79105+ goto out;
79106+
79107+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79108+ if (sumptr[i] != tmpsumptr[i])
79109+ mismatched = 1;
79110+ else
79111+ dummy = 1; // waste a cycle
79112+
79113+ if (!mismatched)
79114+ retval = dummy - 1;
79115+
79116+out:
79117+ crypto_free_hash(tfm);
79118+
79119+ return retval;
79120+}
79121diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79122index 77ff547..181834f 100644
79123--- a/include/asm-generic/4level-fixup.h
79124+++ b/include/asm-generic/4level-fixup.h
79125@@ -13,8 +13,10 @@
79126 #define pmd_alloc(mm, pud, address) \
79127 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79128 NULL: pmd_offset(pud, address))
79129+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79130
79131 #define pud_alloc(mm, pgd, address) (pgd)
79132+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79133 #define pud_offset(pgd, start) (pgd)
79134 #define pud_none(pud) 0
79135 #define pud_bad(pud) 0
79136diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79137index b7babf0..1e4b4f1 100644
79138--- a/include/asm-generic/atomic-long.h
79139+++ b/include/asm-generic/atomic-long.h
79140@@ -22,6 +22,12 @@
79141
79142 typedef atomic64_t atomic_long_t;
79143
79144+#ifdef CONFIG_PAX_REFCOUNT
79145+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79146+#else
79147+typedef atomic64_t atomic_long_unchecked_t;
79148+#endif
79149+
79150 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79151
79152 static inline long atomic_long_read(atomic_long_t *l)
79153@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79154 return (long)atomic64_read(v);
79155 }
79156
79157+#ifdef CONFIG_PAX_REFCOUNT
79158+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79159+{
79160+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79161+
79162+ return (long)atomic64_read_unchecked(v);
79163+}
79164+#endif
79165+
79166 static inline void atomic_long_set(atomic_long_t *l, long i)
79167 {
79168 atomic64_t *v = (atomic64_t *)l;
79169@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79170 atomic64_set(v, i);
79171 }
79172
79173+#ifdef CONFIG_PAX_REFCOUNT
79174+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79175+{
79176+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79177+
79178+ atomic64_set_unchecked(v, i);
79179+}
79180+#endif
79181+
79182 static inline void atomic_long_inc(atomic_long_t *l)
79183 {
79184 atomic64_t *v = (atomic64_t *)l;
79185@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79186 atomic64_inc(v);
79187 }
79188
79189+#ifdef CONFIG_PAX_REFCOUNT
79190+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79191+{
79192+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79193+
79194+ atomic64_inc_unchecked(v);
79195+}
79196+#endif
79197+
79198 static inline void atomic_long_dec(atomic_long_t *l)
79199 {
79200 atomic64_t *v = (atomic64_t *)l;
79201@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79202 atomic64_dec(v);
79203 }
79204
79205+#ifdef CONFIG_PAX_REFCOUNT
79206+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79207+{
79208+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79209+
79210+ atomic64_dec_unchecked(v);
79211+}
79212+#endif
79213+
79214 static inline void atomic_long_add(long i, atomic_long_t *l)
79215 {
79216 atomic64_t *v = (atomic64_t *)l;
79217@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79218 atomic64_add(i, v);
79219 }
79220
79221+#ifdef CONFIG_PAX_REFCOUNT
79222+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79223+{
79224+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79225+
79226+ atomic64_add_unchecked(i, v);
79227+}
79228+#endif
79229+
79230 static inline void atomic_long_sub(long i, atomic_long_t *l)
79231 {
79232 atomic64_t *v = (atomic64_t *)l;
79233@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79234 atomic64_sub(i, v);
79235 }
79236
79237+#ifdef CONFIG_PAX_REFCOUNT
79238+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79239+{
79240+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79241+
79242+ atomic64_sub_unchecked(i, v);
79243+}
79244+#endif
79245+
79246 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79247 {
79248 atomic64_t *v = (atomic64_t *)l;
79249@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79250 return atomic64_add_negative(i, v);
79251 }
79252
79253-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79254+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79255 {
79256 atomic64_t *v = (atomic64_t *)l;
79257
79258 return (long)atomic64_add_return(i, v);
79259 }
79260
79261+#ifdef CONFIG_PAX_REFCOUNT
79262+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79263+{
79264+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79265+
79266+ return (long)atomic64_add_return_unchecked(i, v);
79267+}
79268+#endif
79269+
79270 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79271 {
79272 atomic64_t *v = (atomic64_t *)l;
79273@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79274 return (long)atomic64_inc_return(v);
79275 }
79276
79277+#ifdef CONFIG_PAX_REFCOUNT
79278+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79279+{
79280+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79281+
79282+ return (long)atomic64_inc_return_unchecked(v);
79283+}
79284+#endif
79285+
79286 static inline long atomic_long_dec_return(atomic_long_t *l)
79287 {
79288 atomic64_t *v = (atomic64_t *)l;
79289@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79290
79291 typedef atomic_t atomic_long_t;
79292
79293+#ifdef CONFIG_PAX_REFCOUNT
79294+typedef atomic_unchecked_t atomic_long_unchecked_t;
79295+#else
79296+typedef atomic_t atomic_long_unchecked_t;
79297+#endif
79298+
79299 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79300 static inline long atomic_long_read(atomic_long_t *l)
79301 {
79302@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79303 return (long)atomic_read(v);
79304 }
79305
79306+#ifdef CONFIG_PAX_REFCOUNT
79307+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79308+{
79309+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79310+
79311+ return (long)atomic_read_unchecked(v);
79312+}
79313+#endif
79314+
79315 static inline void atomic_long_set(atomic_long_t *l, long i)
79316 {
79317 atomic_t *v = (atomic_t *)l;
79318@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79319 atomic_set(v, i);
79320 }
79321
79322+#ifdef CONFIG_PAX_REFCOUNT
79323+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79324+{
79325+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79326+
79327+ atomic_set_unchecked(v, i);
79328+}
79329+#endif
79330+
79331 static inline void atomic_long_inc(atomic_long_t *l)
79332 {
79333 atomic_t *v = (atomic_t *)l;
79334@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79335 atomic_inc(v);
79336 }
79337
79338+#ifdef CONFIG_PAX_REFCOUNT
79339+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79340+{
79341+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79342+
79343+ atomic_inc_unchecked(v);
79344+}
79345+#endif
79346+
79347 static inline void atomic_long_dec(atomic_long_t *l)
79348 {
79349 atomic_t *v = (atomic_t *)l;
79350@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79351 atomic_dec(v);
79352 }
79353
79354+#ifdef CONFIG_PAX_REFCOUNT
79355+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79356+{
79357+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79358+
79359+ atomic_dec_unchecked(v);
79360+}
79361+#endif
79362+
79363 static inline void atomic_long_add(long i, atomic_long_t *l)
79364 {
79365 atomic_t *v = (atomic_t *)l;
79366@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79367 atomic_add(i, v);
79368 }
79369
79370+#ifdef CONFIG_PAX_REFCOUNT
79371+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79372+{
79373+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79374+
79375+ atomic_add_unchecked(i, v);
79376+}
79377+#endif
79378+
79379 static inline void atomic_long_sub(long i, atomic_long_t *l)
79380 {
79381 atomic_t *v = (atomic_t *)l;
79382@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79383 atomic_sub(i, v);
79384 }
79385
79386+#ifdef CONFIG_PAX_REFCOUNT
79387+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79388+{
79389+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79390+
79391+ atomic_sub_unchecked(i, v);
79392+}
79393+#endif
79394+
79395 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79396 {
79397 atomic_t *v = (atomic_t *)l;
79398@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79399 return atomic_add_negative(i, v);
79400 }
79401
79402-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79403+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79404 {
79405 atomic_t *v = (atomic_t *)l;
79406
79407 return (long)atomic_add_return(i, v);
79408 }
79409
79410+#ifdef CONFIG_PAX_REFCOUNT
79411+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79412+{
79413+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79414+
79415+ return (long)atomic_add_return_unchecked(i, v);
79416+}
79417+
79418+#endif
79419+
79420 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79421 {
79422 atomic_t *v = (atomic_t *)l;
79423@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79424 return (long)atomic_inc_return(v);
79425 }
79426
79427+#ifdef CONFIG_PAX_REFCOUNT
79428+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79429+{
79430+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79431+
79432+ return (long)atomic_inc_return_unchecked(v);
79433+}
79434+#endif
79435+
79436 static inline long atomic_long_dec_return(atomic_long_t *l)
79437 {
79438 atomic_t *v = (atomic_t *)l;
79439@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79440
79441 #endif /* BITS_PER_LONG == 64 */
79442
79443+#ifdef CONFIG_PAX_REFCOUNT
79444+static inline void pax_refcount_needs_these_functions(void)
79445+{
79446+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79447+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79448+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79449+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79450+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79451+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79452+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79453+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79454+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79455+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79456+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79457+#ifdef CONFIG_X86
79458+ atomic_clear_mask_unchecked(0, NULL);
79459+ atomic_set_mask_unchecked(0, NULL);
79460+#endif
79461+
79462+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79463+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79464+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79465+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79466+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79467+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79468+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79469+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79470+}
79471+#else
79472+#define atomic_read_unchecked(v) atomic_read(v)
79473+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79474+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79475+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79476+#define atomic_inc_unchecked(v) atomic_inc(v)
79477+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79478+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79479+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79480+#define atomic_dec_unchecked(v) atomic_dec(v)
79481+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79482+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79483+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79484+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79485+
79486+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79487+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79488+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79489+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79490+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79491+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79492+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79493+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79494+#endif
79495+
79496 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79497diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79498index 30ad9c8..c70c170 100644
79499--- a/include/asm-generic/atomic64.h
79500+++ b/include/asm-generic/atomic64.h
79501@@ -16,6 +16,8 @@ typedef struct {
79502 long long counter;
79503 } atomic64_t;
79504
79505+typedef atomic64_t atomic64_unchecked_t;
79506+
79507 #define ATOMIC64_INIT(i) { (i) }
79508
79509 extern long long atomic64_read(const atomic64_t *v);
79510@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79511 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79512 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79513
79514+#define atomic64_read_unchecked(v) atomic64_read(v)
79515+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79516+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79517+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79518+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79519+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79520+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79521+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79522+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79523+
79524 #endif /* _ASM_GENERIC_ATOMIC64_H */
79525diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79526index f5c40b0..e902f9d 100644
79527--- a/include/asm-generic/barrier.h
79528+++ b/include/asm-generic/barrier.h
79529@@ -82,7 +82,7 @@
79530 do { \
79531 compiletime_assert_atomic_type(*p); \
79532 smp_mb(); \
79533- ACCESS_ONCE(*p) = (v); \
79534+ ACCESS_ONCE_RW(*p) = (v); \
79535 } while (0)
79536
79537 #define smp_load_acquire(p) \
79538diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79539index a60a7cc..0fe12f2 100644
79540--- a/include/asm-generic/bitops/__fls.h
79541+++ b/include/asm-generic/bitops/__fls.h
79542@@ -9,7 +9,7 @@
79543 *
79544 * Undefined if no set bit exists, so code should check against 0 first.
79545 */
79546-static __always_inline unsigned long __fls(unsigned long word)
79547+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79548 {
79549 int num = BITS_PER_LONG - 1;
79550
79551diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79552index 0576d1f..dad6c71 100644
79553--- a/include/asm-generic/bitops/fls.h
79554+++ b/include/asm-generic/bitops/fls.h
79555@@ -9,7 +9,7 @@
79556 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79557 */
79558
79559-static __always_inline int fls(int x)
79560+static __always_inline int __intentional_overflow(-1) fls(int x)
79561 {
79562 int r = 32;
79563
79564diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79565index b097cf8..3d40e14 100644
79566--- a/include/asm-generic/bitops/fls64.h
79567+++ b/include/asm-generic/bitops/fls64.h
79568@@ -15,7 +15,7 @@
79569 * at position 64.
79570 */
79571 #if BITS_PER_LONG == 32
79572-static __always_inline int fls64(__u64 x)
79573+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79574 {
79575 __u32 h = x >> 32;
79576 if (h)
79577@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79578 return fls(x);
79579 }
79580 #elif BITS_PER_LONG == 64
79581-static __always_inline int fls64(__u64 x)
79582+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79583 {
79584 if (x == 0)
79585 return 0;
79586diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79587index 1bfcfe5..e04c5c9 100644
79588--- a/include/asm-generic/cache.h
79589+++ b/include/asm-generic/cache.h
79590@@ -6,7 +6,7 @@
79591 * cache lines need to provide their own cache.h.
79592 */
79593
79594-#define L1_CACHE_SHIFT 5
79595-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79596+#define L1_CACHE_SHIFT 5UL
79597+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79598
79599 #endif /* __ASM_GENERIC_CACHE_H */
79600diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79601index 0d68a1e..b74a761 100644
79602--- a/include/asm-generic/emergency-restart.h
79603+++ b/include/asm-generic/emergency-restart.h
79604@@ -1,7 +1,7 @@
79605 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79606 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79607
79608-static inline void machine_emergency_restart(void)
79609+static inline __noreturn void machine_emergency_restart(void)
79610 {
79611 machine_restart(NULL);
79612 }
79613diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79614index 90f99c7..00ce236 100644
79615--- a/include/asm-generic/kmap_types.h
79616+++ b/include/asm-generic/kmap_types.h
79617@@ -2,9 +2,9 @@
79618 #define _ASM_GENERIC_KMAP_TYPES_H
79619
79620 #ifdef __WITH_KM_FENCE
79621-# define KM_TYPE_NR 41
79622+# define KM_TYPE_NR 42
79623 #else
79624-# define KM_TYPE_NR 20
79625+# define KM_TYPE_NR 21
79626 #endif
79627
79628 #endif
79629diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79630index 9ceb03b..62b0b8f 100644
79631--- a/include/asm-generic/local.h
79632+++ b/include/asm-generic/local.h
79633@@ -23,24 +23,37 @@ typedef struct
79634 atomic_long_t a;
79635 } local_t;
79636
79637+typedef struct {
79638+ atomic_long_unchecked_t a;
79639+} local_unchecked_t;
79640+
79641 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79642
79643 #define local_read(l) atomic_long_read(&(l)->a)
79644+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79645 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79646+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79647 #define local_inc(l) atomic_long_inc(&(l)->a)
79648+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79649 #define local_dec(l) atomic_long_dec(&(l)->a)
79650+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79651 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79652+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79653 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79654+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79655
79656 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79657 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79658 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79659 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79660 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79661+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79662 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79663 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79664+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79665
79666 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79667+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79668 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79669 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79670 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79671diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79672index 725612b..9cc513a 100644
79673--- a/include/asm-generic/pgtable-nopmd.h
79674+++ b/include/asm-generic/pgtable-nopmd.h
79675@@ -1,14 +1,19 @@
79676 #ifndef _PGTABLE_NOPMD_H
79677 #define _PGTABLE_NOPMD_H
79678
79679-#ifndef __ASSEMBLY__
79680-
79681 #include <asm-generic/pgtable-nopud.h>
79682
79683-struct mm_struct;
79684-
79685 #define __PAGETABLE_PMD_FOLDED
79686
79687+#define PMD_SHIFT PUD_SHIFT
79688+#define PTRS_PER_PMD 1
79689+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79690+#define PMD_MASK (~(PMD_SIZE-1))
79691+
79692+#ifndef __ASSEMBLY__
79693+
79694+struct mm_struct;
79695+
79696 /*
79697 * Having the pmd type consist of a pud gets the size right, and allows
79698 * us to conceptually access the pud entry that this pmd is folded into
79699@@ -16,11 +21,6 @@ struct mm_struct;
79700 */
79701 typedef struct { pud_t pud; } pmd_t;
79702
79703-#define PMD_SHIFT PUD_SHIFT
79704-#define PTRS_PER_PMD 1
79705-#define PMD_SIZE (1UL << PMD_SHIFT)
79706-#define PMD_MASK (~(PMD_SIZE-1))
79707-
79708 /*
79709 * The "pud_xxx()" functions here are trivial for a folded two-level
79710 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79711diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79712index 810431d..0ec4804f 100644
79713--- a/include/asm-generic/pgtable-nopud.h
79714+++ b/include/asm-generic/pgtable-nopud.h
79715@@ -1,10 +1,15 @@
79716 #ifndef _PGTABLE_NOPUD_H
79717 #define _PGTABLE_NOPUD_H
79718
79719-#ifndef __ASSEMBLY__
79720-
79721 #define __PAGETABLE_PUD_FOLDED
79722
79723+#define PUD_SHIFT PGDIR_SHIFT
79724+#define PTRS_PER_PUD 1
79725+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79726+#define PUD_MASK (~(PUD_SIZE-1))
79727+
79728+#ifndef __ASSEMBLY__
79729+
79730 /*
79731 * Having the pud type consist of a pgd gets the size right, and allows
79732 * us to conceptually access the pgd entry that this pud is folded into
79733@@ -12,11 +17,6 @@
79734 */
79735 typedef struct { pgd_t pgd; } pud_t;
79736
79737-#define PUD_SHIFT PGDIR_SHIFT
79738-#define PTRS_PER_PUD 1
79739-#define PUD_SIZE (1UL << PUD_SHIFT)
79740-#define PUD_MASK (~(PUD_SIZE-1))
79741-
79742 /*
79743 * The "pgd_xxx()" functions here are trivial for a folded two-level
79744 * setup: the pud is never bad, and a pud always exists (as it's folded
79745@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79746 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79747
79748 #define pgd_populate(mm, pgd, pud) do { } while (0)
79749+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79750 /*
79751 * (puds are folded into pgds so this doesn't get actually called,
79752 * but the define is needed for a generic inline function.)
79753diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79754index 177d597..2826237 100644
79755--- a/include/asm-generic/pgtable.h
79756+++ b/include/asm-generic/pgtable.h
79757@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79758 }
79759 #endif /* CONFIG_NUMA_BALANCING */
79760
79761+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79762+#ifdef CONFIG_PAX_KERNEXEC
79763+#error KERNEXEC requires pax_open_kernel
79764+#else
79765+static inline unsigned long pax_open_kernel(void) { return 0; }
79766+#endif
79767+#endif
79768+
79769+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
79770+#ifdef CONFIG_PAX_KERNEXEC
79771+#error KERNEXEC requires pax_close_kernel
79772+#else
79773+static inline unsigned long pax_close_kernel(void) { return 0; }
79774+#endif
79775+#endif
79776+
79777 #endif /* CONFIG_MMU */
79778
79779 #endif /* !__ASSEMBLY__ */
79780diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
79781index 72d8803..cb9749c 100644
79782--- a/include/asm-generic/uaccess.h
79783+++ b/include/asm-generic/uaccess.h
79784@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
79785 return __clear_user(to, n);
79786 }
79787
79788+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
79789+#ifdef CONFIG_PAX_MEMORY_UDEREF
79790+#error UDEREF requires pax_open_userland
79791+#else
79792+static inline unsigned long pax_open_userland(void) { return 0; }
79793+#endif
79794+#endif
79795+
79796+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
79797+#ifdef CONFIG_PAX_MEMORY_UDEREF
79798+#error UDEREF requires pax_close_userland
79799+#else
79800+static inline unsigned long pax_close_userland(void) { return 0; }
79801+#endif
79802+#endif
79803+
79804 #endif /* __ASM_GENERIC_UACCESS_H */
79805diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
79806index bee5d68..8d362d1 100644
79807--- a/include/asm-generic/vmlinux.lds.h
79808+++ b/include/asm-generic/vmlinux.lds.h
79809@@ -234,6 +234,7 @@
79810 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
79811 VMLINUX_SYMBOL(__start_rodata) = .; \
79812 *(.rodata) *(.rodata.*) \
79813+ *(.data..read_only) \
79814 *(__vermagic) /* Kernel version magic */ \
79815 . = ALIGN(8); \
79816 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
79817@@ -726,17 +727,18 @@
79818 * section in the linker script will go there too. @phdr should have
79819 * a leading colon.
79820 *
79821- * Note that this macros defines __per_cpu_load as an absolute symbol.
79822+ * Note that this macros defines per_cpu_load as an absolute symbol.
79823 * If there is no need to put the percpu section at a predetermined
79824 * address, use PERCPU_SECTION.
79825 */
79826 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
79827- VMLINUX_SYMBOL(__per_cpu_load) = .; \
79828- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
79829+ per_cpu_load = .; \
79830+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
79831 - LOAD_OFFSET) { \
79832+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
79833 PERCPU_INPUT(cacheline) \
79834 } phdr \
79835- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
79836+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
79837
79838 /**
79839 * PERCPU_SECTION - define output section for percpu area, simple version
79840diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
79841index 623a59c..1e79ab9 100644
79842--- a/include/crypto/algapi.h
79843+++ b/include/crypto/algapi.h
79844@@ -34,7 +34,7 @@ struct crypto_type {
79845 unsigned int maskclear;
79846 unsigned int maskset;
79847 unsigned int tfmsize;
79848-};
79849+} __do_const;
79850
79851 struct crypto_instance {
79852 struct crypto_alg alg;
79853diff --git a/include/drm/drmP.h b/include/drm/drmP.h
79854index e1b2e8b..2697bd2 100644
79855--- a/include/drm/drmP.h
79856+++ b/include/drm/drmP.h
79857@@ -59,6 +59,7 @@
79858
79859 #include <asm/mman.h>
79860 #include <asm/pgalloc.h>
79861+#include <asm/local.h>
79862 #include <asm/uaccess.h>
79863
79864 #include <uapi/drm/drm.h>
79865@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
79866 * \param cmd command.
79867 * \param arg argument.
79868 */
79869-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
79870+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
79871+ struct drm_file *file_priv);
79872+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
79873 struct drm_file *file_priv);
79874
79875-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79876+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
79877 unsigned long arg);
79878
79879 #define DRM_IOCTL_NR(n) _IOC_NR(n)
79880@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79881 struct drm_ioctl_desc {
79882 unsigned int cmd;
79883 int flags;
79884- drm_ioctl_t *func;
79885+ drm_ioctl_t func;
79886 unsigned int cmd_drv;
79887 const char *name;
79888-};
79889+} __do_const;
79890
79891 /**
79892 * Creates a driver or general drm_ioctl_desc array entry for the given
79893@@ -629,7 +632,8 @@ struct drm_info_list {
79894 int (*show)(struct seq_file*, void*); /** show callback */
79895 u32 driver_features; /**< Required driver features for this entry */
79896 void *data;
79897-};
79898+} __do_const;
79899+typedef struct drm_info_list __no_const drm_info_list_no_const;
79900
79901 /**
79902 * debugfs node structure. This structure represents a debugfs file.
79903@@ -713,7 +717,7 @@ struct drm_device {
79904
79905 /** \name Usage Counters */
79906 /*@{ */
79907- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79908+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79909 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
79910 int buf_use; /**< Buffers in use -- cannot alloc */
79911 atomic_t buf_alloc; /**< Buffer allocation in progress */
79912diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
79913index 7adbb65..2a1eb1f 100644
79914--- a/include/drm/drm_crtc_helper.h
79915+++ b/include/drm/drm_crtc_helper.h
79916@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
79917 struct drm_connector *connector);
79918 /* disable encoder when not in use - more explicit than dpms off */
79919 void (*disable)(struct drm_encoder *encoder);
79920-};
79921+} __no_const;
79922
79923 /**
79924 * drm_connector_helper_funcs - helper operations for connectors
79925diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
79926index 180ad0e..53cdacf 100644
79927--- a/include/drm/i915_pciids.h
79928+++ b/include/drm/i915_pciids.h
79929@@ -37,7 +37,7 @@
79930 */
79931 #define INTEL_VGA_DEVICE(id, info) { \
79932 0x8086, id, \
79933- ~0, ~0, \
79934+ PCI_ANY_ID, PCI_ANY_ID, \
79935 0x030000, 0xff0000, \
79936 (unsigned long) info }
79937
79938diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
79939index 72dcbe8..8db58d7 100644
79940--- a/include/drm/ttm/ttm_memory.h
79941+++ b/include/drm/ttm/ttm_memory.h
79942@@ -48,7 +48,7 @@
79943
79944 struct ttm_mem_shrink {
79945 int (*do_shrink) (struct ttm_mem_shrink *);
79946-};
79947+} __no_const;
79948
79949 /**
79950 * struct ttm_mem_global - Global memory accounting structure.
79951diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
79952index 49a8284..9643967 100644
79953--- a/include/drm/ttm/ttm_page_alloc.h
79954+++ b/include/drm/ttm/ttm_page_alloc.h
79955@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
79956 */
79957 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
79958
79959+struct device;
79960 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
79961 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
79962
79963diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
79964index 4b840e8..155d235 100644
79965--- a/include/keys/asymmetric-subtype.h
79966+++ b/include/keys/asymmetric-subtype.h
79967@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
79968 /* Verify the signature on a key of this subtype (optional) */
79969 int (*verify_signature)(const struct key *key,
79970 const struct public_key_signature *sig);
79971-};
79972+} __do_const;
79973
79974 /**
79975 * asymmetric_key_subtype - Get the subtype from an asymmetric key
79976diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
79977index c1da539..1dcec55 100644
79978--- a/include/linux/atmdev.h
79979+++ b/include/linux/atmdev.h
79980@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
79981 #endif
79982
79983 struct k_atm_aal_stats {
79984-#define __HANDLE_ITEM(i) atomic_t i
79985+#define __HANDLE_ITEM(i) atomic_unchecked_t i
79986 __AAL_STAT_ITEMS
79987 #undef __HANDLE_ITEM
79988 };
79989@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
79990 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
79991 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
79992 struct module *owner;
79993-};
79994+} __do_const ;
79995
79996 struct atmphy_ops {
79997 int (*start)(struct atm_dev *dev);
79998diff --git a/include/linux/atomic.h b/include/linux/atomic.h
79999index 5b08a85..60922fb 100644
80000--- a/include/linux/atomic.h
80001+++ b/include/linux/atomic.h
80002@@ -12,7 +12,7 @@
80003 * Atomically adds @a to @v, so long as @v was not already @u.
80004 * Returns non-zero if @v was not @u, and zero otherwise.
80005 */
80006-static inline int atomic_add_unless(atomic_t *v, int a, int u)
80007+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
80008 {
80009 return __atomic_add_unless(v, a, u) != u;
80010 }
80011diff --git a/include/linux/audit.h b/include/linux/audit.h
80012index af84234..4177a40 100644
80013--- a/include/linux/audit.h
80014+++ b/include/linux/audit.h
80015@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
80016 extern unsigned int audit_serial(void);
80017 extern int auditsc_get_stamp(struct audit_context *ctx,
80018 struct timespec *t, unsigned int *serial);
80019-extern int audit_set_loginuid(kuid_t loginuid);
80020+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80021
80022 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80023 {
80024diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80025index 576e463..28fd926 100644
80026--- a/include/linux/binfmts.h
80027+++ b/include/linux/binfmts.h
80028@@ -44,7 +44,7 @@ struct linux_binprm {
80029 unsigned interp_flags;
80030 unsigned interp_data;
80031 unsigned long loader, exec;
80032-};
80033+} __randomize_layout;
80034
80035 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80036 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80037@@ -77,8 +77,10 @@ struct linux_binfmt {
80038 int (*load_binary)(struct linux_binprm *);
80039 int (*load_shlib)(struct file *);
80040 int (*core_dump)(struct coredump_params *cprm);
80041+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80042+ void (*handle_mmap)(struct file *);
80043 unsigned long min_coredump; /* minimal dump size */
80044-};
80045+} __do_const __randomize_layout;
80046
80047 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80048
80049diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80050index 202e403..16e6617 100644
80051--- a/include/linux/bitmap.h
80052+++ b/include/linux/bitmap.h
80053@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80054 return __bitmap_full(src, nbits);
80055 }
80056
80057-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80058+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80059 {
80060 if (small_const_nbits(nbits))
80061 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80062diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80063index 5d858e0..336c1d9 100644
80064--- a/include/linux/bitops.h
80065+++ b/include/linux/bitops.h
80066@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80067 * @word: value to rotate
80068 * @shift: bits to roll
80069 */
80070-static inline __u32 rol32(__u32 word, unsigned int shift)
80071+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80072 {
80073 return (word << shift) | (word >> (32 - shift));
80074 }
80075@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80076 * @word: value to rotate
80077 * @shift: bits to roll
80078 */
80079-static inline __u32 ror32(__u32 word, unsigned int shift)
80080+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80081 {
80082 return (word >> shift) | (word << (32 - shift));
80083 }
80084@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80085 return (__s32)(value << shift) >> shift;
80086 }
80087
80088-static inline unsigned fls_long(unsigned long l)
80089+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80090 {
80091 if (sizeof(l) == 4)
80092 return fls(l);
80093diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80094index 92f4b4b..483d537 100644
80095--- a/include/linux/blkdev.h
80096+++ b/include/linux/blkdev.h
80097@@ -1613,7 +1613,7 @@ struct block_device_operations {
80098 /* this callback is with swap_lock and sometimes page table lock held */
80099 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80100 struct module *owner;
80101-};
80102+} __do_const;
80103
80104 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80105 unsigned long);
80106diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80107index afc1343..9735539 100644
80108--- a/include/linux/blktrace_api.h
80109+++ b/include/linux/blktrace_api.h
80110@@ -25,7 +25,7 @@ struct blk_trace {
80111 struct dentry *dropped_file;
80112 struct dentry *msg_file;
80113 struct list_head running_list;
80114- atomic_t dropped;
80115+ atomic_unchecked_t dropped;
80116 };
80117
80118 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80119diff --git a/include/linux/cache.h b/include/linux/cache.h
80120index 17e7e82..1d7da26 100644
80121--- a/include/linux/cache.h
80122+++ b/include/linux/cache.h
80123@@ -16,6 +16,14 @@
80124 #define __read_mostly
80125 #endif
80126
80127+#ifndef __read_only
80128+#ifdef CONFIG_PAX_KERNEXEC
80129+#error KERNEXEC requires __read_only
80130+#else
80131+#define __read_only __read_mostly
80132+#endif
80133+#endif
80134+
80135 #ifndef ____cacheline_aligned
80136 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80137 #endif
80138diff --git a/include/linux/capability.h b/include/linux/capability.h
80139index aa93e5e..985a1b0 100644
80140--- a/include/linux/capability.h
80141+++ b/include/linux/capability.h
80142@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80143 extern bool capable(int cap);
80144 extern bool ns_capable(struct user_namespace *ns, int cap);
80145 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80146+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80147 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80148+extern bool capable_nolog(int cap);
80149+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80150
80151 /* audit system wants to get cap info from files as well */
80152 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80153
80154+extern int is_privileged_binary(const struct dentry *dentry);
80155+
80156 #endif /* !_LINUX_CAPABILITY_H */
80157diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80158index 8609d57..86e4d79 100644
80159--- a/include/linux/cdrom.h
80160+++ b/include/linux/cdrom.h
80161@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80162
80163 /* driver specifications */
80164 const int capability; /* capability flags */
80165- int n_minors; /* number of active minor devices */
80166 /* handle uniform packets for scsi type devices (scsi,atapi) */
80167 int (*generic_packet) (struct cdrom_device_info *,
80168 struct packet_command *);
80169diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80170index 4ce9056..86caac6 100644
80171--- a/include/linux/cleancache.h
80172+++ b/include/linux/cleancache.h
80173@@ -31,7 +31,7 @@ struct cleancache_ops {
80174 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80175 void (*invalidate_inode)(int, struct cleancache_filekey);
80176 void (*invalidate_fs)(int);
80177-};
80178+} __no_const;
80179
80180 extern struct cleancache_ops *
80181 cleancache_register_ops(struct cleancache_ops *ops);
80182diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80183index d936409..ce9f842 100644
80184--- a/include/linux/clk-provider.h
80185+++ b/include/linux/clk-provider.h
80186@@ -191,6 +191,7 @@ struct clk_ops {
80187 void (*init)(struct clk_hw *hw);
80188 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80189 };
80190+typedef struct clk_ops __no_const clk_ops_no_const;
80191
80192 /**
80193 * struct clk_init_data - holds init data that's common to all clocks and is
80194diff --git a/include/linux/compat.h b/include/linux/compat.h
80195index 7450ca2..a824b81 100644
80196--- a/include/linux/compat.h
80197+++ b/include/linux/compat.h
80198@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80199 compat_size_t __user *len_ptr);
80200
80201 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80202-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80203+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80204 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80205 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80206 compat_ssize_t msgsz, int msgflg);
80207@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80208 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80209 compat_ulong_t addr, compat_ulong_t data);
80210 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80211- compat_long_t addr, compat_long_t data);
80212+ compat_ulong_t addr, compat_ulong_t data);
80213
80214 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80215 /*
80216diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80217index d1a5582..4424efa 100644
80218--- a/include/linux/compiler-gcc4.h
80219+++ b/include/linux/compiler-gcc4.h
80220@@ -39,9 +39,34 @@
80221 # define __compiletime_warning(message) __attribute__((warning(message)))
80222 # define __compiletime_error(message) __attribute__((error(message)))
80223 #endif /* __CHECKER__ */
80224+
80225+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80226+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80227+#define __bos0(ptr) __bos((ptr), 0)
80228+#define __bos1(ptr) __bos((ptr), 1)
80229 #endif /* GCC_VERSION >= 40300 */
80230
80231 #if GCC_VERSION >= 40500
80232+
80233+#ifdef RANDSTRUCT_PLUGIN
80234+#define __randomize_layout __attribute__((randomize_layout))
80235+#define __no_randomize_layout __attribute__((no_randomize_layout))
80236+#endif
80237+
80238+#ifdef CONSTIFY_PLUGIN
80239+#define __no_const __attribute__((no_const))
80240+#define __do_const __attribute__((do_const))
80241+#endif
80242+
80243+#ifdef SIZE_OVERFLOW_PLUGIN
80244+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80245+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80246+#endif
80247+
80248+#ifdef LATENT_ENTROPY_PLUGIN
80249+#define __latent_entropy __attribute__((latent_entropy))
80250+#endif
80251+
80252 /*
80253 * Mark a position in code as unreachable. This can be used to
80254 * suppress control flow warnings after asm blocks that transfer
80255diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80256index c8c5659..d09f2ad 100644
80257--- a/include/linux/compiler-gcc5.h
80258+++ b/include/linux/compiler-gcc5.h
80259@@ -28,6 +28,28 @@
80260 # define __compiletime_error(message) __attribute__((error(message)))
80261 #endif /* __CHECKER__ */
80262
80263+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80264+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80265+#define __bos0(ptr) __bos((ptr), 0)
80266+#define __bos1(ptr) __bos((ptr), 1)
80267+
80268+#ifdef CONSTIFY_PLUGIN
80269+#error not yet
80270+#define __no_const __attribute__((no_const))
80271+#define __do_const __attribute__((do_const))
80272+#endif
80273+
80274+#ifdef SIZE_OVERFLOW_PLUGIN
80275+#error not yet
80276+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80277+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80278+#endif
80279+
80280+#ifdef LATENT_ENTROPY_PLUGIN
80281+#error not yet
80282+#define __latent_entropy __attribute__((latent_entropy))
80283+#endif
80284+
80285 /*
80286 * Mark a position in code as unreachable. This can be used to
80287 * suppress control flow warnings after asm blocks that transfer
80288diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80289index fa6a314..752a6ef 100644
80290--- a/include/linux/compiler.h
80291+++ b/include/linux/compiler.h
80292@@ -5,11 +5,14 @@
80293
80294 #ifdef __CHECKER__
80295 # define __user __attribute__((noderef, address_space(1)))
80296+# define __force_user __force __user
80297 # define __kernel __attribute__((address_space(0)))
80298+# define __force_kernel __force __kernel
80299 # define __safe __attribute__((safe))
80300 # define __force __attribute__((force))
80301 # define __nocast __attribute__((nocast))
80302 # define __iomem __attribute__((noderef, address_space(2)))
80303+# define __force_iomem __force __iomem
80304 # define __must_hold(x) __attribute__((context(x,1,1)))
80305 # define __acquires(x) __attribute__((context(x,0,1)))
80306 # define __releases(x) __attribute__((context(x,1,0)))
80307@@ -17,20 +20,37 @@
80308 # define __release(x) __context__(x,-1)
80309 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80310 # define __percpu __attribute__((noderef, address_space(3)))
80311+# define __force_percpu __force __percpu
80312 #ifdef CONFIG_SPARSE_RCU_POINTER
80313 # define __rcu __attribute__((noderef, address_space(4)))
80314+# define __force_rcu __force __rcu
80315 #else
80316 # define __rcu
80317+# define __force_rcu
80318 #endif
80319 extern void __chk_user_ptr(const volatile void __user *);
80320 extern void __chk_io_ptr(const volatile void __iomem *);
80321 #else
80322-# define __user
80323-# define __kernel
80324+# ifdef CHECKER_PLUGIN
80325+//# define __user
80326+//# define __force_user
80327+//# define __kernel
80328+//# define __force_kernel
80329+# else
80330+# ifdef STRUCTLEAK_PLUGIN
80331+# define __user __attribute__((user))
80332+# else
80333+# define __user
80334+# endif
80335+# define __force_user
80336+# define __kernel
80337+# define __force_kernel
80338+# endif
80339 # define __safe
80340 # define __force
80341 # define __nocast
80342 # define __iomem
80343+# define __force_iomem
80344 # define __chk_user_ptr(x) (void)0
80345 # define __chk_io_ptr(x) (void)0
80346 # define __builtin_warning(x, y...) (1)
80347@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80348 # define __release(x) (void)0
80349 # define __cond_lock(x,c) (c)
80350 # define __percpu
80351+# define __force_percpu
80352 # define __rcu
80353+# define __force_rcu
80354 #endif
80355
80356 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80357@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80358 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80359 {
80360 switch (size) {
80361- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80362- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80363- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80364+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80365+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80366+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80367 #ifdef CONFIG_64BIT
80368- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80369+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80370 #endif
80371 default:
80372 barrier();
80373- __builtin_memcpy((void *)res, (const void *)p, size);
80374+ __builtin_memcpy(res, (const void *)p, size);
80375 data_access_exceeds_word_size();
80376 barrier();
80377 }
80378 }
80379
80380-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80381+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80382 {
80383 switch (size) {
80384- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80385- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80386- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80387+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80388+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80389+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
80390 #ifdef CONFIG_64BIT
80391- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
80392+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
80393 #endif
80394 default:
80395 barrier();
80396- __builtin_memcpy((void *)p, (const void *)res, size);
80397+ __builtin_memcpy((void *)p, res, size);
80398 data_access_exceeds_word_size();
80399 barrier();
80400 }
80401@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80402 # define __attribute_const__ /* unimplemented */
80403 #endif
80404
80405+#ifndef __randomize_layout
80406+# define __randomize_layout
80407+#endif
80408+
80409+#ifndef __no_randomize_layout
80410+# define __no_randomize_layout
80411+#endif
80412+
80413+#ifndef __no_const
80414+# define __no_const
80415+#endif
80416+
80417+#ifndef __do_const
80418+# define __do_const
80419+#endif
80420+
80421+#ifndef __size_overflow
80422+# define __size_overflow(...)
80423+#endif
80424+
80425+#ifndef __intentional_overflow
80426+# define __intentional_overflow(...)
80427+#endif
80428+
80429+#ifndef __latent_entropy
80430+# define __latent_entropy
80431+#endif
80432+
80433 /*
80434 * Tell gcc if a function is cold. The compiler will assume any path
80435 * directly leading to the call is unlikely.
80436@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80437 #define __cold
80438 #endif
80439
80440+#ifndef __alloc_size
80441+#define __alloc_size(...)
80442+#endif
80443+
80444+#ifndef __bos
80445+#define __bos(ptr, arg)
80446+#endif
80447+
80448+#ifndef __bos0
80449+#define __bos0(ptr)
80450+#endif
80451+
80452+#ifndef __bos1
80453+#define __bos1(ptr)
80454+#endif
80455+
80456 /* Simple shorthand for a section definition */
80457 #ifndef __section
80458 # define __section(S) __attribute__ ((__section__(#S)))
80459@@ -462,8 +528,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80460 */
80461 #define __ACCESS_ONCE(x) ({ \
80462 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
80463- (volatile typeof(x) *)&(x); })
80464+ (volatile const typeof(x) *)&(x); })
80465 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
80466+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80467
80468 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80469 #ifdef CONFIG_KPROBES
80470diff --git a/include/linux/completion.h b/include/linux/completion.h
80471index 5d5aaae..0ea9b84 100644
80472--- a/include/linux/completion.h
80473+++ b/include/linux/completion.h
80474@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80475
80476 extern void wait_for_completion(struct completion *);
80477 extern void wait_for_completion_io(struct completion *);
80478-extern int wait_for_completion_interruptible(struct completion *x);
80479-extern int wait_for_completion_killable(struct completion *x);
80480+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80481+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80482 extern unsigned long wait_for_completion_timeout(struct completion *x,
80483- unsigned long timeout);
80484+ unsigned long timeout) __intentional_overflow(-1);
80485 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80486- unsigned long timeout);
80487+ unsigned long timeout) __intentional_overflow(-1);
80488 extern long wait_for_completion_interruptible_timeout(
80489- struct completion *x, unsigned long timeout);
80490+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80491 extern long wait_for_completion_killable_timeout(
80492- struct completion *x, unsigned long timeout);
80493+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80494 extern bool try_wait_for_completion(struct completion *x);
80495 extern bool completion_done(struct completion *x);
80496
80497diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80498index 34025df..d94bbbc 100644
80499--- a/include/linux/configfs.h
80500+++ b/include/linux/configfs.h
80501@@ -125,7 +125,7 @@ struct configfs_attribute {
80502 const char *ca_name;
80503 struct module *ca_owner;
80504 umode_t ca_mode;
80505-};
80506+} __do_const;
80507
80508 /*
80509 * Users often need to create attribute structures for their configurable
80510diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80511index 4d078ce..c970f4d 100644
80512--- a/include/linux/cpufreq.h
80513+++ b/include/linux/cpufreq.h
80514@@ -206,6 +206,7 @@ struct global_attr {
80515 ssize_t (*store)(struct kobject *a, struct attribute *b,
80516 const char *c, size_t count);
80517 };
80518+typedef struct global_attr __no_const global_attr_no_const;
80519
80520 #define define_one_global_ro(_name) \
80521 static struct global_attr _name = \
80522@@ -277,7 +278,7 @@ struct cpufreq_driver {
80523 bool boost_supported;
80524 bool boost_enabled;
80525 int (*set_boost)(int state);
80526-};
80527+} __do_const;
80528
80529 /* flags */
80530 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80531diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80532index ab70f3b..3ef7771 100644
80533--- a/include/linux/cpuidle.h
80534+++ b/include/linux/cpuidle.h
80535@@ -50,7 +50,8 @@ struct cpuidle_state {
80536 int index);
80537
80538 int (*enter_dead) (struct cpuidle_device *dev, int index);
80539-};
80540+} __do_const;
80541+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80542
80543 /* Idle State Flags */
80544 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
80545@@ -206,7 +207,7 @@ struct cpuidle_governor {
80546 void (*reflect) (struct cpuidle_device *dev, int index);
80547
80548 struct module *owner;
80549-};
80550+} __do_const;
80551
80552 #ifdef CONFIG_CPU_IDLE
80553 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80554diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80555index b950e9d..63810aa 100644
80556--- a/include/linux/cpumask.h
80557+++ b/include/linux/cpumask.h
80558@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80559 }
80560
80561 /* Valid inputs for n are -1 and 0. */
80562-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80563+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80564 {
80565 return n+1;
80566 }
80567
80568-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80569+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80570 {
80571 return n+1;
80572 }
80573
80574-static inline unsigned int cpumask_next_and(int n,
80575+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80576 const struct cpumask *srcp,
80577 const struct cpumask *andp)
80578 {
80579@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80580 *
80581 * Returns >= nr_cpu_ids if no further cpus set.
80582 */
80583-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80584+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80585 {
80586 /* -1 is a legal arg here. */
80587 if (n != -1)
80588@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80589 *
80590 * Returns >= nr_cpu_ids if no further cpus unset.
80591 */
80592-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80593+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80594 {
80595 /* -1 is a legal arg here. */
80596 if (n != -1)
80597@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80598 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80599 }
80600
80601-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80602+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80603 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80604 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80605
80606@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
80607 * cpumask_weight - Count of bits in *srcp
80608 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
80609 */
80610-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
80611+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
80612 {
80613 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
80614 }
80615diff --git a/include/linux/cred.h b/include/linux/cred.h
80616index 2fb2ca2..d6a3340 100644
80617--- a/include/linux/cred.h
80618+++ b/include/linux/cred.h
80619@@ -35,7 +35,7 @@ struct group_info {
80620 int nblocks;
80621 kgid_t small_block[NGROUPS_SMALL];
80622 kgid_t *blocks[0];
80623-};
80624+} __randomize_layout;
80625
80626 /**
80627 * get_group_info - Get a reference to a group info structure
80628@@ -137,7 +137,7 @@ struct cred {
80629 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80630 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80631 struct rcu_head rcu; /* RCU deletion hook */
80632-};
80633+} __randomize_layout;
80634
80635 extern void __put_cred(struct cred *);
80636 extern void exit_creds(struct task_struct *);
80637@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80638 static inline void validate_process_creds(void)
80639 {
80640 }
80641+static inline void validate_task_creds(struct task_struct *task)
80642+{
80643+}
80644 #endif
80645
80646 /**
80647@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
80648
80649 #define task_uid(task) (task_cred_xxx((task), uid))
80650 #define task_euid(task) (task_cred_xxx((task), euid))
80651+#define task_securebits(task) (task_cred_xxx((task), securebits))
80652
80653 #define current_cred_xxx(xxx) \
80654 ({ \
80655diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80656index 9c8776d..8c526c2 100644
80657--- a/include/linux/crypto.h
80658+++ b/include/linux/crypto.h
80659@@ -626,7 +626,7 @@ struct cipher_tfm {
80660 const u8 *key, unsigned int keylen);
80661 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80662 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80663-};
80664+} __no_const;
80665
80666 struct hash_tfm {
80667 int (*init)(struct hash_desc *desc);
80668@@ -647,13 +647,13 @@ struct compress_tfm {
80669 int (*cot_decompress)(struct crypto_tfm *tfm,
80670 const u8 *src, unsigned int slen,
80671 u8 *dst, unsigned int *dlen);
80672-};
80673+} __no_const;
80674
80675 struct rng_tfm {
80676 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80677 unsigned int dlen);
80678 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80679-};
80680+} __no_const;
80681
80682 #define crt_ablkcipher crt_u.ablkcipher
80683 #define crt_aead crt_u.aead
80684diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80685index 653589e..4ef254a 100644
80686--- a/include/linux/ctype.h
80687+++ b/include/linux/ctype.h
80688@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80689 * Fast implementation of tolower() for internal usage. Do not use in your
80690 * code.
80691 */
80692-static inline char _tolower(const char c)
80693+static inline unsigned char _tolower(const unsigned char c)
80694 {
80695 return c | 0x20;
80696 }
80697diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80698index 5a81398..6bbee30 100644
80699--- a/include/linux/dcache.h
80700+++ b/include/linux/dcache.h
80701@@ -123,6 +123,9 @@ struct dentry {
80702 unsigned long d_time; /* used by d_revalidate */
80703 void *d_fsdata; /* fs-specific data */
80704
80705+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80706+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
80707+#endif
80708 struct list_head d_lru; /* LRU list */
80709 struct list_head d_child; /* child of parent list */
80710 struct list_head d_subdirs; /* our children */
80711@@ -133,7 +136,7 @@ struct dentry {
80712 struct hlist_node d_alias; /* inode alias list */
80713 struct rcu_head d_rcu;
80714 } d_u;
80715-};
80716+} __randomize_layout;
80717
80718 /*
80719 * dentry->d_lock spinlock nesting subclasses:
80720diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80721index 7925bf0..d5143d2 100644
80722--- a/include/linux/decompress/mm.h
80723+++ b/include/linux/decompress/mm.h
80724@@ -77,7 +77,7 @@ static void free(void *where)
80725 * warnings when not needed (indeed large_malloc / large_free are not
80726 * needed by inflate */
80727
80728-#define malloc(a) kmalloc(a, GFP_KERNEL)
80729+#define malloc(a) kmalloc((a), GFP_KERNEL)
80730 #define free(a) kfree(a)
80731
80732 #define large_malloc(a) vmalloc(a)
80733diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80734index ce447f0..83c66bd 100644
80735--- a/include/linux/devfreq.h
80736+++ b/include/linux/devfreq.h
80737@@ -114,7 +114,7 @@ struct devfreq_governor {
80738 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80739 int (*event_handler)(struct devfreq *devfreq,
80740 unsigned int event, void *data);
80741-};
80742+} __do_const;
80743
80744 /**
80745 * struct devfreq - Device devfreq structure
80746diff --git a/include/linux/device.h b/include/linux/device.h
80747index fb50673..ec0b35b 100644
80748--- a/include/linux/device.h
80749+++ b/include/linux/device.h
80750@@ -311,7 +311,7 @@ struct subsys_interface {
80751 struct list_head node;
80752 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80753 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80754-};
80755+} __do_const;
80756
80757 int subsys_interface_register(struct subsys_interface *sif);
80758 void subsys_interface_unregister(struct subsys_interface *sif);
80759@@ -507,7 +507,7 @@ struct device_type {
80760 void (*release)(struct device *dev);
80761
80762 const struct dev_pm_ops *pm;
80763-};
80764+} __do_const;
80765
80766 /* interface for exporting device attributes */
80767 struct device_attribute {
80768@@ -517,11 +517,12 @@ struct device_attribute {
80769 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80770 const char *buf, size_t count);
80771 };
80772+typedef struct device_attribute __no_const device_attribute_no_const;
80773
80774 struct dev_ext_attribute {
80775 struct device_attribute attr;
80776 void *var;
80777-};
80778+} __do_const;
80779
80780 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80781 char *buf);
80782diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80783index c3007cb..43efc8c 100644
80784--- a/include/linux/dma-mapping.h
80785+++ b/include/linux/dma-mapping.h
80786@@ -60,7 +60,7 @@ struct dma_map_ops {
80787 u64 (*get_required_mask)(struct device *dev);
80788 #endif
80789 int is_phys;
80790-};
80791+} __do_const;
80792
80793 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80794
80795diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80796index 40cd75e..38572a9 100644
80797--- a/include/linux/dmaengine.h
80798+++ b/include/linux/dmaengine.h
80799@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
80800 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80801 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80802
80803-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80804+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80805 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80806-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80807+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80808 struct dma_pinned_list *pinned_list, struct page *page,
80809 unsigned int offset, size_t len);
80810
80811diff --git a/include/linux/efi.h b/include/linux/efi.h
80812index 0238d61..34a758f 100644
80813--- a/include/linux/efi.h
80814+++ b/include/linux/efi.h
80815@@ -1054,6 +1054,7 @@ struct efivar_operations {
80816 efi_set_variable_nonblocking_t *set_variable_nonblocking;
80817 efi_query_variable_store_t *query_variable_store;
80818 };
80819+typedef struct efivar_operations __no_const efivar_operations_no_const;
80820
80821 struct efivars {
80822 /*
80823diff --git a/include/linux/elf.h b/include/linux/elf.h
80824index 20fa8d8..3d0dd18 100644
80825--- a/include/linux/elf.h
80826+++ b/include/linux/elf.h
80827@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
80828 #define elf_note elf32_note
80829 #define elf_addr_t Elf32_Off
80830 #define Elf_Half Elf32_Half
80831+#define elf_dyn Elf32_Dyn
80832
80833 #else
80834
80835@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
80836 #define elf_note elf64_note
80837 #define elf_addr_t Elf64_Off
80838 #define Elf_Half Elf64_Half
80839+#define elf_dyn Elf64_Dyn
80840
80841 #endif
80842
80843diff --git a/include/linux/err.h b/include/linux/err.h
80844index a729120..6ede2c9 100644
80845--- a/include/linux/err.h
80846+++ b/include/linux/err.h
80847@@ -20,12 +20,12 @@
80848
80849 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
80850
80851-static inline void * __must_check ERR_PTR(long error)
80852+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
80853 {
80854 return (void *) error;
80855 }
80856
80857-static inline long __must_check PTR_ERR(__force const void *ptr)
80858+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
80859 {
80860 return (long) ptr;
80861 }
80862diff --git a/include/linux/extcon.h b/include/linux/extcon.h
80863index 36f49c4..a2a1f4c 100644
80864--- a/include/linux/extcon.h
80865+++ b/include/linux/extcon.h
80866@@ -135,7 +135,7 @@ struct extcon_dev {
80867 /* /sys/class/extcon/.../mutually_exclusive/... */
80868 struct attribute_group attr_g_muex;
80869 struct attribute **attrs_muex;
80870- struct device_attribute *d_attrs_muex;
80871+ device_attribute_no_const *d_attrs_muex;
80872 };
80873
80874 /**
80875diff --git a/include/linux/fb.h b/include/linux/fb.h
80876index 09bb7a1..d98870a 100644
80877--- a/include/linux/fb.h
80878+++ b/include/linux/fb.h
80879@@ -305,7 +305,7 @@ struct fb_ops {
80880 /* called at KDB enter and leave time to prepare the console */
80881 int (*fb_debug_enter)(struct fb_info *info);
80882 int (*fb_debug_leave)(struct fb_info *info);
80883-};
80884+} __do_const;
80885
80886 #ifdef CONFIG_FB_TILEBLITTING
80887 #define FB_TILE_CURSOR_NONE 0
80888diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
80889index 230f87b..1fd0485 100644
80890--- a/include/linux/fdtable.h
80891+++ b/include/linux/fdtable.h
80892@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
80893 void put_files_struct(struct files_struct *fs);
80894 void reset_files_struct(struct files_struct *);
80895 int unshare_files(struct files_struct **);
80896-struct files_struct *dup_fd(struct files_struct *, int *);
80897+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
80898 void do_close_on_exec(struct files_struct *);
80899 int iterate_fd(struct files_struct *, unsigned,
80900 int (*)(const void *, struct file *, unsigned),
80901diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
80902index 8293262..2b3b8bd 100644
80903--- a/include/linux/frontswap.h
80904+++ b/include/linux/frontswap.h
80905@@ -11,7 +11,7 @@ struct frontswap_ops {
80906 int (*load)(unsigned, pgoff_t, struct page *);
80907 void (*invalidate_page)(unsigned, pgoff_t);
80908 void (*invalidate_area)(unsigned);
80909-};
80910+} __no_const;
80911
80912 extern bool frontswap_enabled;
80913 extern struct frontswap_ops *
80914diff --git a/include/linux/fs.h b/include/linux/fs.h
80915index 42efe13..72d42ee 100644
80916--- a/include/linux/fs.h
80917+++ b/include/linux/fs.h
80918@@ -413,7 +413,7 @@ struct address_space {
80919 spinlock_t private_lock; /* for use by the address_space */
80920 struct list_head private_list; /* ditto */
80921 void *private_data; /* ditto */
80922-} __attribute__((aligned(sizeof(long))));
80923+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
80924 /*
80925 * On most architectures that alignment is already the case; but
80926 * must be enforced here for CRIS, to let the least significant bit
80927@@ -456,7 +456,7 @@ struct block_device {
80928 int bd_fsfreeze_count;
80929 /* Mutex for freeze */
80930 struct mutex bd_fsfreeze_mutex;
80931-};
80932+} __randomize_layout;
80933
80934 /*
80935 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
80936@@ -642,7 +642,7 @@ struct inode {
80937 #endif
80938
80939 void *i_private; /* fs or device private pointer */
80940-};
80941+} __randomize_layout;
80942
80943 static inline int inode_unhashed(struct inode *inode)
80944 {
80945@@ -837,7 +837,7 @@ struct file {
80946 struct list_head f_tfile_llink;
80947 #endif /* #ifdef CONFIG_EPOLL */
80948 struct address_space *f_mapping;
80949-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
80950+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
80951
80952 struct file_handle {
80953 __u32 handle_bytes;
80954@@ -962,7 +962,7 @@ struct file_lock {
80955 int state; /* state of grant or error if -ve */
80956 } afs;
80957 } fl_u;
80958-};
80959+} __randomize_layout;
80960
80961 /* The following constant reflects the upper bound of the file/locking space */
80962 #ifndef OFFSET_MAX
80963@@ -1305,7 +1305,7 @@ struct super_block {
80964 * Indicates how deep in a filesystem stack this SB is
80965 */
80966 int s_stack_depth;
80967-};
80968+} __randomize_layout;
80969
80970 extern struct timespec current_fs_time(struct super_block *sb);
80971
80972@@ -1536,7 +1536,8 @@ struct file_operations {
80973 long (*fallocate)(struct file *file, int mode, loff_t offset,
80974 loff_t len);
80975 void (*show_fdinfo)(struct seq_file *m, struct file *f);
80976-};
80977+} __do_const __randomize_layout;
80978+typedef struct file_operations __no_const file_operations_no_const;
80979
80980 struct inode_operations {
80981 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
80982@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
80983 return !IS_DEADDIR(inode);
80984 }
80985
80986+static inline bool is_sidechannel_device(const struct inode *inode)
80987+{
80988+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
80989+ umode_t mode = inode->i_mode;
80990+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
80991+#else
80992+ return false;
80993+#endif
80994+}
80995+
80996 #endif /* _LINUX_FS_H */
80997diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
80998index 0efc3e6..fd23610 100644
80999--- a/include/linux/fs_struct.h
81000+++ b/include/linux/fs_struct.h
81001@@ -6,13 +6,13 @@
81002 #include <linux/seqlock.h>
81003
81004 struct fs_struct {
81005- int users;
81006+ atomic_t users;
81007 spinlock_t lock;
81008 seqcount_t seq;
81009 int umask;
81010 int in_exec;
81011 struct path root, pwd;
81012-};
81013+} __randomize_layout;
81014
81015 extern struct kmem_cache *fs_cachep;
81016
81017diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81018index 7714849..a4a5c7a 100644
81019--- a/include/linux/fscache-cache.h
81020+++ b/include/linux/fscache-cache.h
81021@@ -113,7 +113,7 @@ struct fscache_operation {
81022 fscache_operation_release_t release;
81023 };
81024
81025-extern atomic_t fscache_op_debug_id;
81026+extern atomic_unchecked_t fscache_op_debug_id;
81027 extern void fscache_op_work_func(struct work_struct *work);
81028
81029 extern void fscache_enqueue_operation(struct fscache_operation *);
81030@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81031 INIT_WORK(&op->work, fscache_op_work_func);
81032 atomic_set(&op->usage, 1);
81033 op->state = FSCACHE_OP_ST_INITIALISED;
81034- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81035+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81036 op->processor = processor;
81037 op->release = release;
81038 INIT_LIST_HEAD(&op->pend_link);
81039diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81040index 115bb81..e7b812b 100644
81041--- a/include/linux/fscache.h
81042+++ b/include/linux/fscache.h
81043@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81044 * - this is mandatory for any object that may have data
81045 */
81046 void (*now_uncached)(void *cookie_netfs_data);
81047-};
81048+} __do_const;
81049
81050 /*
81051 * fscache cached network filesystem type
81052diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81053index 7ee1774..72505b8 100644
81054--- a/include/linux/fsnotify.h
81055+++ b/include/linux/fsnotify.h
81056@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81057 struct inode *inode = file_inode(file);
81058 __u32 mask = FS_ACCESS;
81059
81060+ if (is_sidechannel_device(inode))
81061+ return;
81062+
81063 if (S_ISDIR(inode->i_mode))
81064 mask |= FS_ISDIR;
81065
81066@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81067 struct inode *inode = file_inode(file);
81068 __u32 mask = FS_MODIFY;
81069
81070+ if (is_sidechannel_device(inode))
81071+ return;
81072+
81073 if (S_ISDIR(inode->i_mode))
81074 mask |= FS_ISDIR;
81075
81076@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81077 */
81078 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81079 {
81080- return kstrdup(name, GFP_KERNEL);
81081+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81082 }
81083
81084 /*
81085diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81086index ec274e0..e678159 100644
81087--- a/include/linux/genhd.h
81088+++ b/include/linux/genhd.h
81089@@ -194,7 +194,7 @@ struct gendisk {
81090 struct kobject *slave_dir;
81091
81092 struct timer_rand_state *random;
81093- atomic_t sync_io; /* RAID */
81094+ atomic_unchecked_t sync_io; /* RAID */
81095 struct disk_events *ev;
81096 #ifdef CONFIG_BLK_DEV_INTEGRITY
81097 struct blk_integrity *integrity;
81098@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81099 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81100
81101 /* drivers/char/random.c */
81102-extern void add_disk_randomness(struct gendisk *disk);
81103+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81104 extern void rand_initialize_disk(struct gendisk *disk);
81105
81106 static inline sector_t get_start_sect(struct block_device *bdev)
81107diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81108index 667c311..abac2a7 100644
81109--- a/include/linux/genl_magic_func.h
81110+++ b/include/linux/genl_magic_func.h
81111@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81112 },
81113
81114 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81115-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81116+static struct genl_ops ZZZ_genl_ops[] = {
81117 #include GENL_MAGIC_INCLUDE_FILE
81118 };
81119
81120diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81121index b840e3b..aeaeef9 100644
81122--- a/include/linux/gfp.h
81123+++ b/include/linux/gfp.h
81124@@ -34,6 +34,13 @@ struct vm_area_struct;
81125 #define ___GFP_NO_KSWAPD 0x400000u
81126 #define ___GFP_OTHER_NODE 0x800000u
81127 #define ___GFP_WRITE 0x1000000u
81128+
81129+#ifdef CONFIG_PAX_USERCOPY_SLABS
81130+#define ___GFP_USERCOPY 0x2000000u
81131+#else
81132+#define ___GFP_USERCOPY 0
81133+#endif
81134+
81135 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81136
81137 /*
81138@@ -90,6 +97,7 @@ struct vm_area_struct;
81139 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81140 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81141 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81142+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81143
81144 /*
81145 * This may seem redundant, but it's a way of annotating false positives vs.
81146@@ -97,7 +105,7 @@ struct vm_area_struct;
81147 */
81148 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81149
81150-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81151+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81152 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81153
81154 /* This equals 0, but use constants in case they ever change */
81155@@ -152,6 +160,8 @@ struct vm_area_struct;
81156 /* 4GB DMA on some platforms */
81157 #define GFP_DMA32 __GFP_DMA32
81158
81159+#define GFP_USERCOPY __GFP_USERCOPY
81160+
81161 /* Convert GFP flags to their corresponding migrate type */
81162 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81163 {
81164diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81165new file mode 100644
81166index 0000000..91858e4
81167--- /dev/null
81168+++ b/include/linux/gracl.h
81169@@ -0,0 +1,342 @@
81170+#ifndef GR_ACL_H
81171+#define GR_ACL_H
81172+
81173+#include <linux/grdefs.h>
81174+#include <linux/resource.h>
81175+#include <linux/capability.h>
81176+#include <linux/dcache.h>
81177+#include <asm/resource.h>
81178+
81179+/* Major status information */
81180+
81181+#define GR_VERSION "grsecurity 3.1"
81182+#define GRSECURITY_VERSION 0x3100
81183+
81184+enum {
81185+ GR_SHUTDOWN = 0,
81186+ GR_ENABLE = 1,
81187+ GR_SPROLE = 2,
81188+ GR_OLDRELOAD = 3,
81189+ GR_SEGVMOD = 4,
81190+ GR_STATUS = 5,
81191+ GR_UNSPROLE = 6,
81192+ GR_PASSSET = 7,
81193+ GR_SPROLEPAM = 8,
81194+ GR_RELOAD = 9,
81195+};
81196+
81197+/* Password setup definitions
81198+ * kernel/grhash.c */
81199+enum {
81200+ GR_PW_LEN = 128,
81201+ GR_SALT_LEN = 16,
81202+ GR_SHA_LEN = 32,
81203+};
81204+
81205+enum {
81206+ GR_SPROLE_LEN = 64,
81207+};
81208+
81209+enum {
81210+ GR_NO_GLOB = 0,
81211+ GR_REG_GLOB,
81212+ GR_CREATE_GLOB
81213+};
81214+
81215+#define GR_NLIMITS 32
81216+
81217+/* Begin Data Structures */
81218+
81219+struct sprole_pw {
81220+ unsigned char *rolename;
81221+ unsigned char salt[GR_SALT_LEN];
81222+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81223+};
81224+
81225+struct name_entry {
81226+ __u32 key;
81227+ u64 inode;
81228+ dev_t device;
81229+ char *name;
81230+ __u16 len;
81231+ __u8 deleted;
81232+ struct name_entry *prev;
81233+ struct name_entry *next;
81234+};
81235+
81236+struct inodev_entry {
81237+ struct name_entry *nentry;
81238+ struct inodev_entry *prev;
81239+ struct inodev_entry *next;
81240+};
81241+
81242+struct acl_role_db {
81243+ struct acl_role_label **r_hash;
81244+ __u32 r_size;
81245+};
81246+
81247+struct inodev_db {
81248+ struct inodev_entry **i_hash;
81249+ __u32 i_size;
81250+};
81251+
81252+struct name_db {
81253+ struct name_entry **n_hash;
81254+ __u32 n_size;
81255+};
81256+
81257+struct crash_uid {
81258+ uid_t uid;
81259+ unsigned long expires;
81260+};
81261+
81262+struct gr_hash_struct {
81263+ void **table;
81264+ void **nametable;
81265+ void *first;
81266+ __u32 table_size;
81267+ __u32 used_size;
81268+ int type;
81269+};
81270+
81271+/* Userspace Grsecurity ACL data structures */
81272+
81273+struct acl_subject_label {
81274+ char *filename;
81275+ u64 inode;
81276+ dev_t device;
81277+ __u32 mode;
81278+ kernel_cap_t cap_mask;
81279+ kernel_cap_t cap_lower;
81280+ kernel_cap_t cap_invert_audit;
81281+
81282+ struct rlimit res[GR_NLIMITS];
81283+ __u32 resmask;
81284+
81285+ __u8 user_trans_type;
81286+ __u8 group_trans_type;
81287+ uid_t *user_transitions;
81288+ gid_t *group_transitions;
81289+ __u16 user_trans_num;
81290+ __u16 group_trans_num;
81291+
81292+ __u32 sock_families[2];
81293+ __u32 ip_proto[8];
81294+ __u32 ip_type;
81295+ struct acl_ip_label **ips;
81296+ __u32 ip_num;
81297+ __u32 inaddr_any_override;
81298+
81299+ __u32 crashes;
81300+ unsigned long expires;
81301+
81302+ struct acl_subject_label *parent_subject;
81303+ struct gr_hash_struct *hash;
81304+ struct acl_subject_label *prev;
81305+ struct acl_subject_label *next;
81306+
81307+ struct acl_object_label **obj_hash;
81308+ __u32 obj_hash_size;
81309+ __u16 pax_flags;
81310+};
81311+
81312+struct role_allowed_ip {
81313+ __u32 addr;
81314+ __u32 netmask;
81315+
81316+ struct role_allowed_ip *prev;
81317+ struct role_allowed_ip *next;
81318+};
81319+
81320+struct role_transition {
81321+ char *rolename;
81322+
81323+ struct role_transition *prev;
81324+ struct role_transition *next;
81325+};
81326+
81327+struct acl_role_label {
81328+ char *rolename;
81329+ uid_t uidgid;
81330+ __u16 roletype;
81331+
81332+ __u16 auth_attempts;
81333+ unsigned long expires;
81334+
81335+ struct acl_subject_label *root_label;
81336+ struct gr_hash_struct *hash;
81337+
81338+ struct acl_role_label *prev;
81339+ struct acl_role_label *next;
81340+
81341+ struct role_transition *transitions;
81342+ struct role_allowed_ip *allowed_ips;
81343+ uid_t *domain_children;
81344+ __u16 domain_child_num;
81345+
81346+ umode_t umask;
81347+
81348+ struct acl_subject_label **subj_hash;
81349+ __u32 subj_hash_size;
81350+};
81351+
81352+struct user_acl_role_db {
81353+ struct acl_role_label **r_table;
81354+ __u32 num_pointers; /* Number of allocations to track */
81355+ __u32 num_roles; /* Number of roles */
81356+ __u32 num_domain_children; /* Number of domain children */
81357+ __u32 num_subjects; /* Number of subjects */
81358+ __u32 num_objects; /* Number of objects */
81359+};
81360+
81361+struct acl_object_label {
81362+ char *filename;
81363+ u64 inode;
81364+ dev_t device;
81365+ __u32 mode;
81366+
81367+ struct acl_subject_label *nested;
81368+ struct acl_object_label *globbed;
81369+
81370+ /* next two structures not used */
81371+
81372+ struct acl_object_label *prev;
81373+ struct acl_object_label *next;
81374+};
81375+
81376+struct acl_ip_label {
81377+ char *iface;
81378+ __u32 addr;
81379+ __u32 netmask;
81380+ __u16 low, high;
81381+ __u8 mode;
81382+ __u32 type;
81383+ __u32 proto[8];
81384+
81385+ /* next two structures not used */
81386+
81387+ struct acl_ip_label *prev;
81388+ struct acl_ip_label *next;
81389+};
81390+
81391+struct gr_arg {
81392+ struct user_acl_role_db role_db;
81393+ unsigned char pw[GR_PW_LEN];
81394+ unsigned char salt[GR_SALT_LEN];
81395+ unsigned char sum[GR_SHA_LEN];
81396+ unsigned char sp_role[GR_SPROLE_LEN];
81397+ struct sprole_pw *sprole_pws;
81398+ dev_t segv_device;
81399+ u64 segv_inode;
81400+ uid_t segv_uid;
81401+ __u16 num_sprole_pws;
81402+ __u16 mode;
81403+};
81404+
81405+struct gr_arg_wrapper {
81406+ struct gr_arg *arg;
81407+ __u32 version;
81408+ __u32 size;
81409+};
81410+
81411+struct subject_map {
81412+ struct acl_subject_label *user;
81413+ struct acl_subject_label *kernel;
81414+ struct subject_map *prev;
81415+ struct subject_map *next;
81416+};
81417+
81418+struct acl_subj_map_db {
81419+ struct subject_map **s_hash;
81420+ __u32 s_size;
81421+};
81422+
81423+struct gr_policy_state {
81424+ struct sprole_pw **acl_special_roles;
81425+ __u16 num_sprole_pws;
81426+ struct acl_role_label *kernel_role;
81427+ struct acl_role_label *role_list;
81428+ struct acl_role_label *default_role;
81429+ struct acl_role_db acl_role_set;
81430+ struct acl_subj_map_db subj_map_set;
81431+ struct name_db name_set;
81432+ struct inodev_db inodev_set;
81433+};
81434+
81435+struct gr_alloc_state {
81436+ unsigned long alloc_stack_next;
81437+ unsigned long alloc_stack_size;
81438+ void **alloc_stack;
81439+};
81440+
81441+struct gr_reload_state {
81442+ struct gr_policy_state oldpolicy;
81443+ struct gr_alloc_state oldalloc;
81444+ struct gr_policy_state newpolicy;
81445+ struct gr_alloc_state newalloc;
81446+ struct gr_policy_state *oldpolicy_ptr;
81447+ struct gr_alloc_state *oldalloc_ptr;
81448+ unsigned char oldmode;
81449+};
81450+
81451+/* End Data Structures Section */
81452+
81453+/* Hash functions generated by empirical testing by Brad Spengler
81454+ Makes good use of the low bits of the inode. Generally 0-1 times
81455+ in loop for successful match. 0-3 for unsuccessful match.
81456+ Shift/add algorithm with modulus of table size and an XOR*/
81457+
81458+static __inline__ unsigned int
81459+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81460+{
81461+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81462+}
81463+
81464+ static __inline__ unsigned int
81465+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81466+{
81467+ return ((const unsigned long)userp % sz);
81468+}
81469+
81470+static __inline__ unsigned int
81471+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
81472+{
81473+ unsigned int rem;
81474+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
81475+ return rem;
81476+}
81477+
81478+static __inline__ unsigned int
81479+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81480+{
81481+ return full_name_hash((const unsigned char *)name, len) % sz;
81482+}
81483+
81484+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81485+ subj = NULL; \
81486+ iter = 0; \
81487+ while (iter < role->subj_hash_size) { \
81488+ if (subj == NULL) \
81489+ subj = role->subj_hash[iter]; \
81490+ if (subj == NULL) { \
81491+ iter++; \
81492+ continue; \
81493+ }
81494+
81495+#define FOR_EACH_SUBJECT_END(subj,iter) \
81496+ subj = subj->next; \
81497+ if (subj == NULL) \
81498+ iter++; \
81499+ }
81500+
81501+
81502+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81503+ subj = role->hash->first; \
81504+ while (subj != NULL) {
81505+
81506+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81507+ subj = subj->next; \
81508+ }
81509+
81510+#endif
81511+
81512diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81513new file mode 100644
81514index 0000000..af64092
81515--- /dev/null
81516+++ b/include/linux/gracl_compat.h
81517@@ -0,0 +1,156 @@
81518+#ifndef GR_ACL_COMPAT_H
81519+#define GR_ACL_COMPAT_H
81520+
81521+#include <linux/resource.h>
81522+#include <asm/resource.h>
81523+
81524+struct sprole_pw_compat {
81525+ compat_uptr_t rolename;
81526+ unsigned char salt[GR_SALT_LEN];
81527+ unsigned char sum[GR_SHA_LEN];
81528+};
81529+
81530+struct gr_hash_struct_compat {
81531+ compat_uptr_t table;
81532+ compat_uptr_t nametable;
81533+ compat_uptr_t first;
81534+ __u32 table_size;
81535+ __u32 used_size;
81536+ int type;
81537+};
81538+
81539+struct acl_subject_label_compat {
81540+ compat_uptr_t filename;
81541+ compat_u64 inode;
81542+ __u32 device;
81543+ __u32 mode;
81544+ kernel_cap_t cap_mask;
81545+ kernel_cap_t cap_lower;
81546+ kernel_cap_t cap_invert_audit;
81547+
81548+ struct compat_rlimit res[GR_NLIMITS];
81549+ __u32 resmask;
81550+
81551+ __u8 user_trans_type;
81552+ __u8 group_trans_type;
81553+ compat_uptr_t user_transitions;
81554+ compat_uptr_t group_transitions;
81555+ __u16 user_trans_num;
81556+ __u16 group_trans_num;
81557+
81558+ __u32 sock_families[2];
81559+ __u32 ip_proto[8];
81560+ __u32 ip_type;
81561+ compat_uptr_t ips;
81562+ __u32 ip_num;
81563+ __u32 inaddr_any_override;
81564+
81565+ __u32 crashes;
81566+ compat_ulong_t expires;
81567+
81568+ compat_uptr_t parent_subject;
81569+ compat_uptr_t hash;
81570+ compat_uptr_t prev;
81571+ compat_uptr_t next;
81572+
81573+ compat_uptr_t obj_hash;
81574+ __u32 obj_hash_size;
81575+ __u16 pax_flags;
81576+};
81577+
81578+struct role_allowed_ip_compat {
81579+ __u32 addr;
81580+ __u32 netmask;
81581+
81582+ compat_uptr_t prev;
81583+ compat_uptr_t next;
81584+};
81585+
81586+struct role_transition_compat {
81587+ compat_uptr_t rolename;
81588+
81589+ compat_uptr_t prev;
81590+ compat_uptr_t next;
81591+};
81592+
81593+struct acl_role_label_compat {
81594+ compat_uptr_t rolename;
81595+ uid_t uidgid;
81596+ __u16 roletype;
81597+
81598+ __u16 auth_attempts;
81599+ compat_ulong_t expires;
81600+
81601+ compat_uptr_t root_label;
81602+ compat_uptr_t hash;
81603+
81604+ compat_uptr_t prev;
81605+ compat_uptr_t next;
81606+
81607+ compat_uptr_t transitions;
81608+ compat_uptr_t allowed_ips;
81609+ compat_uptr_t domain_children;
81610+ __u16 domain_child_num;
81611+
81612+ umode_t umask;
81613+
81614+ compat_uptr_t subj_hash;
81615+ __u32 subj_hash_size;
81616+};
81617+
81618+struct user_acl_role_db_compat {
81619+ compat_uptr_t r_table;
81620+ __u32 num_pointers;
81621+ __u32 num_roles;
81622+ __u32 num_domain_children;
81623+ __u32 num_subjects;
81624+ __u32 num_objects;
81625+};
81626+
81627+struct acl_object_label_compat {
81628+ compat_uptr_t filename;
81629+ compat_u64 inode;
81630+ __u32 device;
81631+ __u32 mode;
81632+
81633+ compat_uptr_t nested;
81634+ compat_uptr_t globbed;
81635+
81636+ compat_uptr_t prev;
81637+ compat_uptr_t next;
81638+};
81639+
81640+struct acl_ip_label_compat {
81641+ compat_uptr_t iface;
81642+ __u32 addr;
81643+ __u32 netmask;
81644+ __u16 low, high;
81645+ __u8 mode;
81646+ __u32 type;
81647+ __u32 proto[8];
81648+
81649+ compat_uptr_t prev;
81650+ compat_uptr_t next;
81651+};
81652+
81653+struct gr_arg_compat {
81654+ struct user_acl_role_db_compat role_db;
81655+ unsigned char pw[GR_PW_LEN];
81656+ unsigned char salt[GR_SALT_LEN];
81657+ unsigned char sum[GR_SHA_LEN];
81658+ unsigned char sp_role[GR_SPROLE_LEN];
81659+ compat_uptr_t sprole_pws;
81660+ __u32 segv_device;
81661+ compat_u64 segv_inode;
81662+ uid_t segv_uid;
81663+ __u16 num_sprole_pws;
81664+ __u16 mode;
81665+};
81666+
81667+struct gr_arg_wrapper_compat {
81668+ compat_uptr_t arg;
81669+ __u32 version;
81670+ __u32 size;
81671+};
81672+
81673+#endif
81674diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81675new file mode 100644
81676index 0000000..323ecf2
81677--- /dev/null
81678+++ b/include/linux/gralloc.h
81679@@ -0,0 +1,9 @@
81680+#ifndef __GRALLOC_H
81681+#define __GRALLOC_H
81682+
81683+void acl_free_all(void);
81684+int acl_alloc_stack_init(unsigned long size);
81685+void *acl_alloc(unsigned long len);
81686+void *acl_alloc_num(unsigned long num, unsigned long len);
81687+
81688+#endif
81689diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81690new file mode 100644
81691index 0000000..be66033
81692--- /dev/null
81693+++ b/include/linux/grdefs.h
81694@@ -0,0 +1,140 @@
81695+#ifndef GRDEFS_H
81696+#define GRDEFS_H
81697+
81698+/* Begin grsecurity status declarations */
81699+
81700+enum {
81701+ GR_READY = 0x01,
81702+ GR_STATUS_INIT = 0x00 // disabled state
81703+};
81704+
81705+/* Begin ACL declarations */
81706+
81707+/* Role flags */
81708+
81709+enum {
81710+ GR_ROLE_USER = 0x0001,
81711+ GR_ROLE_GROUP = 0x0002,
81712+ GR_ROLE_DEFAULT = 0x0004,
81713+ GR_ROLE_SPECIAL = 0x0008,
81714+ GR_ROLE_AUTH = 0x0010,
81715+ GR_ROLE_NOPW = 0x0020,
81716+ GR_ROLE_GOD = 0x0040,
81717+ GR_ROLE_LEARN = 0x0080,
81718+ GR_ROLE_TPE = 0x0100,
81719+ GR_ROLE_DOMAIN = 0x0200,
81720+ GR_ROLE_PAM = 0x0400,
81721+ GR_ROLE_PERSIST = 0x0800
81722+};
81723+
81724+/* ACL Subject and Object mode flags */
81725+enum {
81726+ GR_DELETED = 0x80000000
81727+};
81728+
81729+/* ACL Object-only mode flags */
81730+enum {
81731+ GR_READ = 0x00000001,
81732+ GR_APPEND = 0x00000002,
81733+ GR_WRITE = 0x00000004,
81734+ GR_EXEC = 0x00000008,
81735+ GR_FIND = 0x00000010,
81736+ GR_INHERIT = 0x00000020,
81737+ GR_SETID = 0x00000040,
81738+ GR_CREATE = 0x00000080,
81739+ GR_DELETE = 0x00000100,
81740+ GR_LINK = 0x00000200,
81741+ GR_AUDIT_READ = 0x00000400,
81742+ GR_AUDIT_APPEND = 0x00000800,
81743+ GR_AUDIT_WRITE = 0x00001000,
81744+ GR_AUDIT_EXEC = 0x00002000,
81745+ GR_AUDIT_FIND = 0x00004000,
81746+ GR_AUDIT_INHERIT= 0x00008000,
81747+ GR_AUDIT_SETID = 0x00010000,
81748+ GR_AUDIT_CREATE = 0x00020000,
81749+ GR_AUDIT_DELETE = 0x00040000,
81750+ GR_AUDIT_LINK = 0x00080000,
81751+ GR_PTRACERD = 0x00100000,
81752+ GR_NOPTRACE = 0x00200000,
81753+ GR_SUPPRESS = 0x00400000,
81754+ GR_NOLEARN = 0x00800000,
81755+ GR_INIT_TRANSFER= 0x01000000
81756+};
81757+
81758+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81759+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81760+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81761+
81762+/* ACL subject-only mode flags */
81763+enum {
81764+ GR_KILL = 0x00000001,
81765+ GR_VIEW = 0x00000002,
81766+ GR_PROTECTED = 0x00000004,
81767+ GR_LEARN = 0x00000008,
81768+ GR_OVERRIDE = 0x00000010,
81769+ /* just a placeholder, this mode is only used in userspace */
81770+ GR_DUMMY = 0x00000020,
81771+ GR_PROTSHM = 0x00000040,
81772+ GR_KILLPROC = 0x00000080,
81773+ GR_KILLIPPROC = 0x00000100,
81774+ /* just a placeholder, this mode is only used in userspace */
81775+ GR_NOTROJAN = 0x00000200,
81776+ GR_PROTPROCFD = 0x00000400,
81777+ GR_PROCACCT = 0x00000800,
81778+ GR_RELAXPTRACE = 0x00001000,
81779+ //GR_NESTED = 0x00002000,
81780+ GR_INHERITLEARN = 0x00004000,
81781+ GR_PROCFIND = 0x00008000,
81782+ GR_POVERRIDE = 0x00010000,
81783+ GR_KERNELAUTH = 0x00020000,
81784+ GR_ATSECURE = 0x00040000,
81785+ GR_SHMEXEC = 0x00080000
81786+};
81787+
81788+enum {
81789+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
81790+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
81791+ GR_PAX_ENABLE_MPROTECT = 0x0004,
81792+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
81793+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
81794+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
81795+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
81796+ GR_PAX_DISABLE_MPROTECT = 0x0400,
81797+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
81798+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
81799+};
81800+
81801+enum {
81802+ GR_ID_USER = 0x01,
81803+ GR_ID_GROUP = 0x02,
81804+};
81805+
81806+enum {
81807+ GR_ID_ALLOW = 0x01,
81808+ GR_ID_DENY = 0x02,
81809+};
81810+
81811+#define GR_CRASH_RES 31
81812+#define GR_UIDTABLE_MAX 500
81813+
81814+/* begin resource learning section */
81815+enum {
81816+ GR_RLIM_CPU_BUMP = 60,
81817+ GR_RLIM_FSIZE_BUMP = 50000,
81818+ GR_RLIM_DATA_BUMP = 10000,
81819+ GR_RLIM_STACK_BUMP = 1000,
81820+ GR_RLIM_CORE_BUMP = 10000,
81821+ GR_RLIM_RSS_BUMP = 500000,
81822+ GR_RLIM_NPROC_BUMP = 1,
81823+ GR_RLIM_NOFILE_BUMP = 5,
81824+ GR_RLIM_MEMLOCK_BUMP = 50000,
81825+ GR_RLIM_AS_BUMP = 500000,
81826+ GR_RLIM_LOCKS_BUMP = 2,
81827+ GR_RLIM_SIGPENDING_BUMP = 5,
81828+ GR_RLIM_MSGQUEUE_BUMP = 10000,
81829+ GR_RLIM_NICE_BUMP = 1,
81830+ GR_RLIM_RTPRIO_BUMP = 1,
81831+ GR_RLIM_RTTIME_BUMP = 1000000
81832+};
81833+
81834+#endif
81835diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
81836new file mode 100644
81837index 0000000..fb1de5d
81838--- /dev/null
81839+++ b/include/linux/grinternal.h
81840@@ -0,0 +1,230 @@
81841+#ifndef __GRINTERNAL_H
81842+#define __GRINTERNAL_H
81843+
81844+#ifdef CONFIG_GRKERNSEC
81845+
81846+#include <linux/fs.h>
81847+#include <linux/mnt_namespace.h>
81848+#include <linux/nsproxy.h>
81849+#include <linux/gracl.h>
81850+#include <linux/grdefs.h>
81851+#include <linux/grmsg.h>
81852+
81853+void gr_add_learn_entry(const char *fmt, ...)
81854+ __attribute__ ((format (printf, 1, 2)));
81855+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
81856+ const struct vfsmount *mnt);
81857+__u32 gr_check_create(const struct dentry *new_dentry,
81858+ const struct dentry *parent,
81859+ const struct vfsmount *mnt, const __u32 mode);
81860+int gr_check_protected_task(const struct task_struct *task);
81861+__u32 to_gr_audit(const __u32 reqmode);
81862+int gr_set_acls(const int type);
81863+int gr_acl_is_enabled(void);
81864+char gr_roletype_to_char(void);
81865+
81866+void gr_handle_alertkill(struct task_struct *task);
81867+char *gr_to_filename(const struct dentry *dentry,
81868+ const struct vfsmount *mnt);
81869+char *gr_to_filename1(const struct dentry *dentry,
81870+ const struct vfsmount *mnt);
81871+char *gr_to_filename2(const struct dentry *dentry,
81872+ const struct vfsmount *mnt);
81873+char *gr_to_filename3(const struct dentry *dentry,
81874+ const struct vfsmount *mnt);
81875+
81876+extern int grsec_enable_ptrace_readexec;
81877+extern int grsec_enable_harden_ptrace;
81878+extern int grsec_enable_link;
81879+extern int grsec_enable_fifo;
81880+extern int grsec_enable_execve;
81881+extern int grsec_enable_shm;
81882+extern int grsec_enable_execlog;
81883+extern int grsec_enable_signal;
81884+extern int grsec_enable_audit_ptrace;
81885+extern int grsec_enable_forkfail;
81886+extern int grsec_enable_time;
81887+extern int grsec_enable_rofs;
81888+extern int grsec_deny_new_usb;
81889+extern int grsec_enable_chroot_shmat;
81890+extern int grsec_enable_chroot_mount;
81891+extern int grsec_enable_chroot_double;
81892+extern int grsec_enable_chroot_pivot;
81893+extern int grsec_enable_chroot_chdir;
81894+extern int grsec_enable_chroot_chmod;
81895+extern int grsec_enable_chroot_mknod;
81896+extern int grsec_enable_chroot_fchdir;
81897+extern int grsec_enable_chroot_nice;
81898+extern int grsec_enable_chroot_execlog;
81899+extern int grsec_enable_chroot_caps;
81900+extern int grsec_enable_chroot_rename;
81901+extern int grsec_enable_chroot_sysctl;
81902+extern int grsec_enable_chroot_unix;
81903+extern int grsec_enable_symlinkown;
81904+extern kgid_t grsec_symlinkown_gid;
81905+extern int grsec_enable_tpe;
81906+extern kgid_t grsec_tpe_gid;
81907+extern int grsec_enable_tpe_all;
81908+extern int grsec_enable_tpe_invert;
81909+extern int grsec_enable_socket_all;
81910+extern kgid_t grsec_socket_all_gid;
81911+extern int grsec_enable_socket_client;
81912+extern kgid_t grsec_socket_client_gid;
81913+extern int grsec_enable_socket_server;
81914+extern kgid_t grsec_socket_server_gid;
81915+extern kgid_t grsec_audit_gid;
81916+extern int grsec_enable_group;
81917+extern int grsec_enable_log_rwxmaps;
81918+extern int grsec_enable_mount;
81919+extern int grsec_enable_chdir;
81920+extern int grsec_resource_logging;
81921+extern int grsec_enable_blackhole;
81922+extern int grsec_lastack_retries;
81923+extern int grsec_enable_brute;
81924+extern int grsec_enable_harden_ipc;
81925+extern int grsec_lock;
81926+
81927+extern spinlock_t grsec_alert_lock;
81928+extern unsigned long grsec_alert_wtime;
81929+extern unsigned long grsec_alert_fyet;
81930+
81931+extern spinlock_t grsec_audit_lock;
81932+
81933+extern rwlock_t grsec_exec_file_lock;
81934+
81935+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
81936+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
81937+ (tsk)->exec_file->f_path.mnt) : "/")
81938+
81939+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
81940+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
81941+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
81942+
81943+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
81944+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
81945+ (tsk)->exec_file->f_path.mnt) : "/")
81946+
81947+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
81948+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
81949+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
81950+
81951+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
81952+
81953+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
81954+
81955+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
81956+{
81957+ if (file1 && file2) {
81958+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
81959+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
81960+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
81961+ return true;
81962+ }
81963+
81964+ return false;
81965+}
81966+
81967+#define GR_CHROOT_CAPS {{ \
81968+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
81969+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
81970+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
81971+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
81972+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
81973+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
81974+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
81975+
81976+#define security_learn(normal_msg,args...) \
81977+({ \
81978+ read_lock(&grsec_exec_file_lock); \
81979+ gr_add_learn_entry(normal_msg "\n", ## args); \
81980+ read_unlock(&grsec_exec_file_lock); \
81981+})
81982+
81983+enum {
81984+ GR_DO_AUDIT,
81985+ GR_DONT_AUDIT,
81986+ /* used for non-audit messages that we shouldn't kill the task on */
81987+ GR_DONT_AUDIT_GOOD
81988+};
81989+
81990+enum {
81991+ GR_TTYSNIFF,
81992+ GR_RBAC,
81993+ GR_RBAC_STR,
81994+ GR_STR_RBAC,
81995+ GR_RBAC_MODE2,
81996+ GR_RBAC_MODE3,
81997+ GR_FILENAME,
81998+ GR_SYSCTL_HIDDEN,
81999+ GR_NOARGS,
82000+ GR_ONE_INT,
82001+ GR_ONE_INT_TWO_STR,
82002+ GR_ONE_STR,
82003+ GR_STR_INT,
82004+ GR_TWO_STR_INT,
82005+ GR_TWO_INT,
82006+ GR_TWO_U64,
82007+ GR_THREE_INT,
82008+ GR_FIVE_INT_TWO_STR,
82009+ GR_TWO_STR,
82010+ GR_THREE_STR,
82011+ GR_FOUR_STR,
82012+ GR_STR_FILENAME,
82013+ GR_FILENAME_STR,
82014+ GR_FILENAME_TWO_INT,
82015+ GR_FILENAME_TWO_INT_STR,
82016+ GR_TEXTREL,
82017+ GR_PTRACE,
82018+ GR_RESOURCE,
82019+ GR_CAP,
82020+ GR_SIG,
82021+ GR_SIG2,
82022+ GR_CRASH1,
82023+ GR_CRASH2,
82024+ GR_PSACCT,
82025+ GR_RWXMAP,
82026+ GR_RWXMAPVMA
82027+};
82028+
82029+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82030+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82031+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82032+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82033+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82034+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82035+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82036+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82037+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82038+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82039+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82040+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82041+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82042+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82043+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82044+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82045+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82046+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82047+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82048+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82049+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82050+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82051+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82052+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82053+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82054+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82055+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82056+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82057+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82058+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82059+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82060+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82061+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82062+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82063+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82064+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82065+
82066+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82067+
82068+#endif
82069+
82070+#endif
82071diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82072new file mode 100644
82073index 0000000..26ef560
82074--- /dev/null
82075+++ b/include/linux/grmsg.h
82076@@ -0,0 +1,118 @@
82077+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82078+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82079+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82080+#define GR_STOPMOD_MSG "denied modification of module state by "
82081+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82082+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82083+#define GR_IOPERM_MSG "denied use of ioperm() by "
82084+#define GR_IOPL_MSG "denied use of iopl() by "
82085+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82086+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82087+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82088+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82089+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82090+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82091+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82092+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82093+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82094+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82095+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82096+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82097+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82098+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82099+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82100+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82101+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82102+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82103+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82104+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82105+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82106+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82107+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82108+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82109+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82110+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82111+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82112+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82113+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82114+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82115+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82116+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82117+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82118+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82119+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82120+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82121+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82122+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82123+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82124+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82125+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82126+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82127+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82128+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82129+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82130+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82131+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82132+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82133+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82134+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82135+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82136+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82137+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82138+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82139+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82140+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82141+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82142+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82143+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82144+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82145+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82146+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82147+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82148+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82149+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82150+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82151+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82152+#define GR_NICE_CHROOT_MSG "denied priority change by "
82153+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82154+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82155+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82156+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82157+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82158+#define GR_TIME_MSG "time set by "
82159+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82160+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82161+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82162+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82163+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82164+#define GR_BIND_MSG "denied bind() by "
82165+#define GR_CONNECT_MSG "denied connect() by "
82166+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82167+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82168+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82169+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82170+#define GR_CAP_ACL_MSG "use of %s denied for "
82171+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82172+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82173+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82174+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82175+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82176+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82177+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82178+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82179+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82180+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82181+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82182+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82183+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82184+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82185+#define GR_VM86_MSG "denied use of vm86 by "
82186+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82187+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82188+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82189+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82190+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82191+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82192+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82193+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82194+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82195diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82196new file mode 100644
82197index 0000000..63c1850
82198--- /dev/null
82199+++ b/include/linux/grsecurity.h
82200@@ -0,0 +1,250 @@
82201+#ifndef GR_SECURITY_H
82202+#define GR_SECURITY_H
82203+#include <linux/fs.h>
82204+#include <linux/fs_struct.h>
82205+#include <linux/binfmts.h>
82206+#include <linux/gracl.h>
82207+
82208+/* notify of brain-dead configs */
82209+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82210+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82211+#endif
82212+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82213+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82214+#endif
82215+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82216+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82217+#endif
82218+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82219+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82220+#endif
82221+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82222+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82223+#endif
82224+
82225+int gr_handle_new_usb(void);
82226+
82227+void gr_handle_brute_attach(int dumpable);
82228+void gr_handle_brute_check(void);
82229+void gr_handle_kernel_exploit(void);
82230+
82231+char gr_roletype_to_char(void);
82232+
82233+int gr_proc_is_restricted(void);
82234+
82235+int gr_acl_enable_at_secure(void);
82236+
82237+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82238+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82239+
82240+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82241+
82242+void gr_del_task_from_ip_table(struct task_struct *p);
82243+
82244+int gr_pid_is_chrooted(struct task_struct *p);
82245+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82246+int gr_handle_chroot_nice(void);
82247+int gr_handle_chroot_sysctl(const int op);
82248+int gr_handle_chroot_setpriority(struct task_struct *p,
82249+ const int niceval);
82250+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82251+int gr_chroot_fhandle(void);
82252+int gr_handle_chroot_chroot(const struct dentry *dentry,
82253+ const struct vfsmount *mnt);
82254+void gr_handle_chroot_chdir(const struct path *path);
82255+int gr_handle_chroot_chmod(const struct dentry *dentry,
82256+ const struct vfsmount *mnt, const int mode);
82257+int gr_handle_chroot_mknod(const struct dentry *dentry,
82258+ const struct vfsmount *mnt, const int mode);
82259+int gr_handle_chroot_mount(const struct dentry *dentry,
82260+ const struct vfsmount *mnt,
82261+ const char *dev_name);
82262+int gr_handle_chroot_pivot(void);
82263+int gr_handle_chroot_unix(const pid_t pid);
82264+
82265+int gr_handle_rawio(const struct inode *inode);
82266+
82267+void gr_handle_ioperm(void);
82268+void gr_handle_iopl(void);
82269+void gr_handle_msr_write(void);
82270+
82271+umode_t gr_acl_umask(void);
82272+
82273+int gr_tpe_allow(const struct file *file);
82274+
82275+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82276+void gr_clear_chroot_entries(struct task_struct *task);
82277+
82278+void gr_log_forkfail(const int retval);
82279+void gr_log_timechange(void);
82280+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82281+void gr_log_chdir(const struct dentry *dentry,
82282+ const struct vfsmount *mnt);
82283+void gr_log_chroot_exec(const struct dentry *dentry,
82284+ const struct vfsmount *mnt);
82285+void gr_log_remount(const char *devname, const int retval);
82286+void gr_log_unmount(const char *devname, const int retval);
82287+void gr_log_mount(const char *from, struct path *to, const int retval);
82288+void gr_log_textrel(struct vm_area_struct *vma);
82289+void gr_log_ptgnustack(struct file *file);
82290+void gr_log_rwxmmap(struct file *file);
82291+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82292+
82293+int gr_handle_follow_link(const struct inode *parent,
82294+ const struct inode *inode,
82295+ const struct dentry *dentry,
82296+ const struct vfsmount *mnt);
82297+int gr_handle_fifo(const struct dentry *dentry,
82298+ const struct vfsmount *mnt,
82299+ const struct dentry *dir, const int flag,
82300+ const int acc_mode);
82301+int gr_handle_hardlink(const struct dentry *dentry,
82302+ const struct vfsmount *mnt,
82303+ struct inode *inode,
82304+ const int mode, const struct filename *to);
82305+
82306+int gr_is_capable(const int cap);
82307+int gr_is_capable_nolog(const int cap);
82308+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82309+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82310+
82311+void gr_copy_label(struct task_struct *tsk);
82312+void gr_handle_crash(struct task_struct *task, const int sig);
82313+int gr_handle_signal(const struct task_struct *p, const int sig);
82314+int gr_check_crash_uid(const kuid_t uid);
82315+int gr_check_protected_task(const struct task_struct *task);
82316+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82317+int gr_acl_handle_mmap(const struct file *file,
82318+ const unsigned long prot);
82319+int gr_acl_handle_mprotect(const struct file *file,
82320+ const unsigned long prot);
82321+int gr_check_hidden_task(const struct task_struct *tsk);
82322+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82323+ const struct vfsmount *mnt);
82324+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82325+ const struct vfsmount *mnt);
82326+__u32 gr_acl_handle_access(const struct dentry *dentry,
82327+ const struct vfsmount *mnt, const int fmode);
82328+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82329+ const struct vfsmount *mnt, umode_t *mode);
82330+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82331+ const struct vfsmount *mnt);
82332+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82333+ const struct vfsmount *mnt);
82334+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82335+ const struct vfsmount *mnt);
82336+int gr_handle_ptrace(struct task_struct *task, const long request);
82337+int gr_handle_proc_ptrace(struct task_struct *task);
82338+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82339+ const struct vfsmount *mnt);
82340+int gr_check_crash_exec(const struct file *filp);
82341+int gr_acl_is_enabled(void);
82342+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82343+ const kgid_t gid);
82344+int gr_set_proc_label(const struct dentry *dentry,
82345+ const struct vfsmount *mnt,
82346+ const int unsafe_flags);
82347+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82348+ const struct vfsmount *mnt);
82349+__u32 gr_acl_handle_open(const struct dentry *dentry,
82350+ const struct vfsmount *mnt, int acc_mode);
82351+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82352+ const struct dentry *p_dentry,
82353+ const struct vfsmount *p_mnt,
82354+ int open_flags, int acc_mode, const int imode);
82355+void gr_handle_create(const struct dentry *dentry,
82356+ const struct vfsmount *mnt);
82357+void gr_handle_proc_create(const struct dentry *dentry,
82358+ const struct inode *inode);
82359+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82360+ const struct dentry *parent_dentry,
82361+ const struct vfsmount *parent_mnt,
82362+ const int mode);
82363+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82364+ const struct dentry *parent_dentry,
82365+ const struct vfsmount *parent_mnt);
82366+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82367+ const struct vfsmount *mnt);
82368+void gr_handle_delete(const u64 ino, const dev_t dev);
82369+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82370+ const struct vfsmount *mnt);
82371+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82372+ const struct dentry *parent_dentry,
82373+ const struct vfsmount *parent_mnt,
82374+ const struct filename *from);
82375+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82376+ const struct dentry *parent_dentry,
82377+ const struct vfsmount *parent_mnt,
82378+ const struct dentry *old_dentry,
82379+ const struct vfsmount *old_mnt, const struct filename *to);
82380+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82381+int gr_acl_handle_rename(struct dentry *new_dentry,
82382+ struct dentry *parent_dentry,
82383+ const struct vfsmount *parent_mnt,
82384+ struct dentry *old_dentry,
82385+ struct inode *old_parent_inode,
82386+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82387+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82388+ struct dentry *old_dentry,
82389+ struct dentry *new_dentry,
82390+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82391+__u32 gr_check_link(const struct dentry *new_dentry,
82392+ const struct dentry *parent_dentry,
82393+ const struct vfsmount *parent_mnt,
82394+ const struct dentry *old_dentry,
82395+ const struct vfsmount *old_mnt);
82396+int gr_acl_handle_filldir(const struct file *file, const char *name,
82397+ const unsigned int namelen, const u64 ino);
82398+
82399+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82400+ const struct vfsmount *mnt);
82401+void gr_acl_handle_exit(void);
82402+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82403+int gr_acl_handle_procpidmem(const struct task_struct *task);
82404+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82405+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82406+void gr_audit_ptrace(struct task_struct *task);
82407+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82408+u64 gr_get_ino_from_dentry(struct dentry *dentry);
82409+void gr_put_exec_file(struct task_struct *task);
82410+
82411+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82412+
82413+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82414+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82415+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
82416+ struct dentry *newdentry, struct vfsmount *newmnt);
82417+
82418+#ifdef CONFIG_GRKERNSEC_RESLOG
82419+extern void gr_log_resource(const struct task_struct *task, const int res,
82420+ const unsigned long wanted, const int gt);
82421+#else
82422+static inline void gr_log_resource(const struct task_struct *task, const int res,
82423+ const unsigned long wanted, const int gt)
82424+{
82425+}
82426+#endif
82427+
82428+#ifdef CONFIG_GRKERNSEC
82429+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82430+void gr_handle_vm86(void);
82431+void gr_handle_mem_readwrite(u64 from, u64 to);
82432+
82433+void gr_log_badprocpid(const char *entry);
82434+
82435+extern int grsec_enable_dmesg;
82436+extern int grsec_disable_privio;
82437+
82438+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82439+extern kgid_t grsec_proc_gid;
82440+#endif
82441+
82442+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82443+extern int grsec_enable_chroot_findtask;
82444+#endif
82445+#ifdef CONFIG_GRKERNSEC_SETXID
82446+extern int grsec_enable_setxid;
82447+#endif
82448+#endif
82449+
82450+#endif
82451diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82452new file mode 100644
82453index 0000000..e7ffaaf
82454--- /dev/null
82455+++ b/include/linux/grsock.h
82456@@ -0,0 +1,19 @@
82457+#ifndef __GRSOCK_H
82458+#define __GRSOCK_H
82459+
82460+extern void gr_attach_curr_ip(const struct sock *sk);
82461+extern int gr_handle_sock_all(const int family, const int type,
82462+ const int protocol);
82463+extern int gr_handle_sock_server(const struct sockaddr *sck);
82464+extern int gr_handle_sock_server_other(const struct sock *sck);
82465+extern int gr_handle_sock_client(const struct sockaddr *sck);
82466+extern int gr_search_connect(struct socket * sock,
82467+ struct sockaddr_in * addr);
82468+extern int gr_search_bind(struct socket * sock,
82469+ struct sockaddr_in * addr);
82470+extern int gr_search_listen(struct socket * sock);
82471+extern int gr_search_accept(struct socket * sock);
82472+extern int gr_search_socket(const int domain, const int type,
82473+ const int protocol);
82474+
82475+#endif
82476diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82477index 9286a46..373f27f 100644
82478--- a/include/linux/highmem.h
82479+++ b/include/linux/highmem.h
82480@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82481 kunmap_atomic(kaddr);
82482 }
82483
82484+static inline void sanitize_highpage(struct page *page)
82485+{
82486+ void *kaddr;
82487+ unsigned long flags;
82488+
82489+ local_irq_save(flags);
82490+ kaddr = kmap_atomic(page);
82491+ clear_page(kaddr);
82492+ kunmap_atomic(kaddr);
82493+ local_irq_restore(flags);
82494+}
82495+
82496 static inline void zero_user_segments(struct page *page,
82497 unsigned start1, unsigned end1,
82498 unsigned start2, unsigned end2)
82499diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82500index 1c7b89a..7dda400 100644
82501--- a/include/linux/hwmon-sysfs.h
82502+++ b/include/linux/hwmon-sysfs.h
82503@@ -25,7 +25,8 @@
82504 struct sensor_device_attribute{
82505 struct device_attribute dev_attr;
82506 int index;
82507-};
82508+} __do_const;
82509+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82510 #define to_sensor_dev_attr(_dev_attr) \
82511 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82512
82513@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82514 struct device_attribute dev_attr;
82515 u8 index;
82516 u8 nr;
82517-};
82518+} __do_const;
82519+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82520 #define to_sensor_dev_attr_2(_dev_attr) \
82521 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82522
82523diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82524index 7c76959..153e597 100644
82525--- a/include/linux/i2c.h
82526+++ b/include/linux/i2c.h
82527@@ -413,6 +413,7 @@ struct i2c_algorithm {
82528 int (*unreg_slave)(struct i2c_client *client);
82529 #endif
82530 };
82531+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82532
82533 /**
82534 * struct i2c_bus_recovery_info - I2C bus recovery information
82535diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82536index d23c3c2..eb63c81 100644
82537--- a/include/linux/i2o.h
82538+++ b/include/linux/i2o.h
82539@@ -565,7 +565,7 @@ struct i2o_controller {
82540 struct i2o_device *exec; /* Executive */
82541 #if BITS_PER_LONG == 64
82542 spinlock_t context_list_lock; /* lock for context_list */
82543- atomic_t context_list_counter; /* needed for unique contexts */
82544+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82545 struct list_head context_list; /* list of context id's
82546 and pointers */
82547 #endif
82548diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82549index aff7ad8..3942bbd 100644
82550--- a/include/linux/if_pppox.h
82551+++ b/include/linux/if_pppox.h
82552@@ -76,7 +76,7 @@ struct pppox_proto {
82553 int (*ioctl)(struct socket *sock, unsigned int cmd,
82554 unsigned long arg);
82555 struct module *owner;
82556-};
82557+} __do_const;
82558
82559 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82560 extern void unregister_pppox_proto(int proto_num);
82561diff --git a/include/linux/init.h b/include/linux/init.h
82562index 2df8e8d..3e1280d 100644
82563--- a/include/linux/init.h
82564+++ b/include/linux/init.h
82565@@ -37,9 +37,17 @@
82566 * section.
82567 */
82568
82569+#define add_init_latent_entropy __latent_entropy
82570+
82571+#ifdef CONFIG_MEMORY_HOTPLUG
82572+#define add_meminit_latent_entropy
82573+#else
82574+#define add_meminit_latent_entropy __latent_entropy
82575+#endif
82576+
82577 /* These are for everybody (although not all archs will actually
82578 discard it in modules) */
82579-#define __init __section(.init.text) __cold notrace
82580+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82581 #define __initdata __section(.init.data)
82582 #define __initconst __constsection(.init.rodata)
82583 #define __exitdata __section(.exit.data)
82584@@ -100,7 +108,7 @@
82585 #define __cpuexitconst
82586
82587 /* Used for MEMORY_HOTPLUG */
82588-#define __meminit __section(.meminit.text) __cold notrace
82589+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82590 #define __meminitdata __section(.meminit.data)
82591 #define __meminitconst __constsection(.meminit.rodata)
82592 #define __memexit __section(.memexit.text) __exitused __cold notrace
82593diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82594index 3037fc0..c6527ce 100644
82595--- a/include/linux/init_task.h
82596+++ b/include/linux/init_task.h
82597@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
82598
82599 #define INIT_TASK_COMM "swapper"
82600
82601+#ifdef CONFIG_X86
82602+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82603+#else
82604+#define INIT_TASK_THREAD_INFO
82605+#endif
82606+
82607 #ifdef CONFIG_RT_MUTEXES
82608 # define INIT_RT_MUTEXES(tsk) \
82609 .pi_waiters = RB_ROOT, \
82610@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
82611 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82612 .comm = INIT_TASK_COMM, \
82613 .thread = INIT_THREAD, \
82614+ INIT_TASK_THREAD_INFO \
82615 .fs = &init_fs, \
82616 .files = &init_files, \
82617 .signal = &init_signals, \
82618diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82619index d9b05b5..e5f5b7b 100644
82620--- a/include/linux/interrupt.h
82621+++ b/include/linux/interrupt.h
82622@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82623
82624 struct softirq_action
82625 {
82626- void (*action)(struct softirq_action *);
82627-};
82628+ void (*action)(void);
82629+} __no_const;
82630
82631 asmlinkage void do_softirq(void);
82632 asmlinkage void __do_softirq(void);
82633@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
82634 }
82635 #endif
82636
82637-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82638+extern void open_softirq(int nr, void (*action)(void));
82639 extern void softirq_init(void);
82640 extern void __raise_softirq_irqoff(unsigned int nr);
82641
82642diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82643index 38daa45..4de4317 100644
82644--- a/include/linux/iommu.h
82645+++ b/include/linux/iommu.h
82646@@ -147,7 +147,7 @@ struct iommu_ops {
82647
82648 unsigned long pgsize_bitmap;
82649 void *priv;
82650-};
82651+} __do_const;
82652
82653 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82654 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82655diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82656index 2c525022..345b106 100644
82657--- a/include/linux/ioport.h
82658+++ b/include/linux/ioport.h
82659@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82660 int adjust_resource(struct resource *res, resource_size_t start,
82661 resource_size_t size);
82662 resource_size_t resource_alignment(struct resource *res);
82663-static inline resource_size_t resource_size(const struct resource *res)
82664+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82665 {
82666 return res->end - res->start + 1;
82667 }
82668diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82669index 1eee6bc..9cf4912 100644
82670--- a/include/linux/ipc_namespace.h
82671+++ b/include/linux/ipc_namespace.h
82672@@ -60,7 +60,7 @@ struct ipc_namespace {
82673 struct user_namespace *user_ns;
82674
82675 struct ns_common ns;
82676-};
82677+} __randomize_layout;
82678
82679 extern struct ipc_namespace init_ipc_ns;
82680 extern atomic_t nr_ipc_ns;
82681diff --git a/include/linux/irq.h b/include/linux/irq.h
82682index d09ec7a..f373eb5 100644
82683--- a/include/linux/irq.h
82684+++ b/include/linux/irq.h
82685@@ -364,7 +364,8 @@ struct irq_chip {
82686 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
82687
82688 unsigned long flags;
82689-};
82690+} __do_const;
82691+typedef struct irq_chip __no_const irq_chip_no_const;
82692
82693 /*
82694 * irq_chip specific flags
82695diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82696index 71d706d..817cdec 100644
82697--- a/include/linux/irqchip/arm-gic.h
82698+++ b/include/linux/irqchip/arm-gic.h
82699@@ -95,7 +95,7 @@
82700
82701 struct device_node;
82702
82703-extern struct irq_chip gic_arch_extn;
82704+extern irq_chip_no_const gic_arch_extn;
82705
82706 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82707 u32 offset, struct device_node *);
82708diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
82709index faf433a..7dcb186 100644
82710--- a/include/linux/irqdesc.h
82711+++ b/include/linux/irqdesc.h
82712@@ -61,7 +61,7 @@ struct irq_desc {
82713 unsigned int irq_count; /* For detecting broken IRQs */
82714 unsigned long last_unhandled; /* Aging timer for unhandled count */
82715 unsigned int irqs_unhandled;
82716- atomic_t threads_handled;
82717+ atomic_unchecked_t threads_handled;
82718 int threads_handled_last;
82719 raw_spinlock_t lock;
82720 struct cpumask *percpu_enabled;
82721diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82722index c367cbd..c9b79e6 100644
82723--- a/include/linux/jiffies.h
82724+++ b/include/linux/jiffies.h
82725@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82726 /*
82727 * Convert various time units to each other:
82728 */
82729-extern unsigned int jiffies_to_msecs(const unsigned long j);
82730-extern unsigned int jiffies_to_usecs(const unsigned long j);
82731+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82732+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82733
82734-static inline u64 jiffies_to_nsecs(const unsigned long j)
82735+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82736 {
82737 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82738 }
82739
82740-extern unsigned long msecs_to_jiffies(const unsigned int m);
82741-extern unsigned long usecs_to_jiffies(const unsigned int u);
82742+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82743+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82744 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82745 extern void jiffies_to_timespec(const unsigned long jiffies,
82746- struct timespec *value);
82747-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82748+ struct timespec *value) __intentional_overflow(-1);
82749+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82750 extern void jiffies_to_timeval(const unsigned long jiffies,
82751 struct timeval *value);
82752
82753diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82754index 6883e19..e854fcb 100644
82755--- a/include/linux/kallsyms.h
82756+++ b/include/linux/kallsyms.h
82757@@ -15,7 +15,8 @@
82758
82759 struct module;
82760
82761-#ifdef CONFIG_KALLSYMS
82762+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82763+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82764 /* Lookup the address for a symbol. Returns 0 if not found. */
82765 unsigned long kallsyms_lookup_name(const char *name);
82766
82767@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82768 /* Stupid that this does nothing, but I didn't create this mess. */
82769 #define __print_symbol(fmt, addr)
82770 #endif /*CONFIG_KALLSYMS*/
82771+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82772+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82773+extern unsigned long kallsyms_lookup_name(const char *name);
82774+extern void __print_symbol(const char *fmt, unsigned long address);
82775+extern int sprint_backtrace(char *buffer, unsigned long address);
82776+extern int sprint_symbol(char *buffer, unsigned long address);
82777+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82778+const char *kallsyms_lookup(unsigned long addr,
82779+ unsigned long *symbolsize,
82780+ unsigned long *offset,
82781+ char **modname, char *namebuf);
82782+extern int kallsyms_lookup_size_offset(unsigned long addr,
82783+ unsigned long *symbolsize,
82784+ unsigned long *offset);
82785+#endif
82786
82787 /* This macro allows us to keep printk typechecking */
82788 static __printf(1, 2)
82789diff --git a/include/linux/kernel.h b/include/linux/kernel.h
82790index 64ce58b..6bcdbfa 100644
82791--- a/include/linux/kernel.h
82792+++ b/include/linux/kernel.h
82793@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
82794 /* Obsolete, do not use. Use kstrto<foo> instead */
82795
82796 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
82797-extern long simple_strtol(const char *,char **,unsigned int);
82798+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
82799 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
82800 extern long long simple_strtoll(const char *,char **,unsigned int);
82801
82802diff --git a/include/linux/key-type.h b/include/linux/key-type.h
82803index ff9f1d3..6712be5 100644
82804--- a/include/linux/key-type.h
82805+++ b/include/linux/key-type.h
82806@@ -152,7 +152,7 @@ struct key_type {
82807 /* internal fields */
82808 struct list_head link; /* link in types list */
82809 struct lock_class_key lock_class; /* key->sem lock class */
82810-};
82811+} __do_const;
82812
82813 extern struct key_type key_type_keyring;
82814
82815diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
82816index e465bb1..19f605fd 100644
82817--- a/include/linux/kgdb.h
82818+++ b/include/linux/kgdb.h
82819@@ -52,7 +52,7 @@ extern int kgdb_connected;
82820 extern int kgdb_io_module_registered;
82821
82822 extern atomic_t kgdb_setting_breakpoint;
82823-extern atomic_t kgdb_cpu_doing_single_step;
82824+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
82825
82826 extern struct task_struct *kgdb_usethread;
82827 extern struct task_struct *kgdb_contthread;
82828@@ -254,7 +254,7 @@ struct kgdb_arch {
82829 void (*correct_hw_break)(void);
82830
82831 void (*enable_nmi)(bool on);
82832-};
82833+} __do_const;
82834
82835 /**
82836 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
82837@@ -279,7 +279,7 @@ struct kgdb_io {
82838 void (*pre_exception) (void);
82839 void (*post_exception) (void);
82840 int is_console;
82841-};
82842+} __do_const;
82843
82844 extern struct kgdb_arch arch_kgdb_ops;
82845
82846diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
82847index e705467..a92471d 100644
82848--- a/include/linux/kmemleak.h
82849+++ b/include/linux/kmemleak.h
82850@@ -27,7 +27,7 @@
82851
82852 extern void kmemleak_init(void) __ref;
82853 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82854- gfp_t gfp) __ref;
82855+ gfp_t gfp) __ref __size_overflow(2);
82856 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
82857 extern void kmemleak_free(const void *ptr) __ref;
82858 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
82859@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
82860 static inline void kmemleak_init(void)
82861 {
82862 }
82863-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82864+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
82865 gfp_t gfp)
82866 {
82867 }
82868diff --git a/include/linux/kmod.h b/include/linux/kmod.h
82869index 0555cc6..40116ce 100644
82870--- a/include/linux/kmod.h
82871+++ b/include/linux/kmod.h
82872@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
82873 * usually useless though. */
82874 extern __printf(2, 3)
82875 int __request_module(bool wait, const char *name, ...);
82876+extern __printf(3, 4)
82877+int ___request_module(bool wait, char *param_name, const char *name, ...);
82878 #define request_module(mod...) __request_module(true, mod)
82879 #define request_module_nowait(mod...) __request_module(false, mod)
82880 #define try_then_request_module(x, mod...) \
82881@@ -57,6 +59,9 @@ struct subprocess_info {
82882 struct work_struct work;
82883 struct completion *complete;
82884 char *path;
82885+#ifdef CONFIG_GRKERNSEC
82886+ char *origpath;
82887+#endif
82888 char **argv;
82889 char **envp;
82890 int wait;
82891diff --git a/include/linux/kobject.h b/include/linux/kobject.h
82892index 2d61b90..a1d0a13 100644
82893--- a/include/linux/kobject.h
82894+++ b/include/linux/kobject.h
82895@@ -118,7 +118,7 @@ struct kobj_type {
82896 struct attribute **default_attrs;
82897 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
82898 const void *(*namespace)(struct kobject *kobj);
82899-};
82900+} __do_const;
82901
82902 struct kobj_uevent_env {
82903 char *argv[3];
82904@@ -142,6 +142,7 @@ struct kobj_attribute {
82905 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
82906 const char *buf, size_t count);
82907 };
82908+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
82909
82910 extern const struct sysfs_ops kobj_sysfs_ops;
82911
82912@@ -169,7 +170,7 @@ struct kset {
82913 spinlock_t list_lock;
82914 struct kobject kobj;
82915 const struct kset_uevent_ops *uevent_ops;
82916-};
82917+} __randomize_layout;
82918
82919 extern void kset_init(struct kset *kset);
82920 extern int __must_check kset_register(struct kset *kset);
82921diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
82922index df32d25..fb52e27 100644
82923--- a/include/linux/kobject_ns.h
82924+++ b/include/linux/kobject_ns.h
82925@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
82926 const void *(*netlink_ns)(struct sock *sk);
82927 const void *(*initial_ns)(void);
82928 void (*drop_ns)(void *);
82929-};
82930+} __do_const;
82931
82932 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
82933 int kobj_ns_type_registered(enum kobj_ns_type type);
82934diff --git a/include/linux/kref.h b/include/linux/kref.h
82935index 484604d..0f6c5b6 100644
82936--- a/include/linux/kref.h
82937+++ b/include/linux/kref.h
82938@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
82939 static inline int kref_sub(struct kref *kref, unsigned int count,
82940 void (*release)(struct kref *kref))
82941 {
82942- WARN_ON(release == NULL);
82943+ BUG_ON(release == NULL);
82944
82945 if (atomic_sub_and_test((int) count, &kref->refcount)) {
82946 release(kref);
82947diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
82948index 26f1060..bafc04a 100644
82949--- a/include/linux/kvm_host.h
82950+++ b/include/linux/kvm_host.h
82951@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
82952 {
82953 }
82954 #endif
82955-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
82956+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
82957 struct module *module);
82958 void kvm_exit(void);
82959
82960@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
82961 struct kvm_guest_debug *dbg);
82962 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
82963
82964-int kvm_arch_init(void *opaque);
82965+int kvm_arch_init(const void *opaque);
82966 void kvm_arch_exit(void);
82967
82968 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
82969diff --git a/include/linux/libata.h b/include/linux/libata.h
82970index 91f705d..24be831 100644
82971--- a/include/linux/libata.h
82972+++ b/include/linux/libata.h
82973@@ -979,7 +979,7 @@ struct ata_port_operations {
82974 * fields must be pointers.
82975 */
82976 const struct ata_port_operations *inherits;
82977-};
82978+} __do_const;
82979
82980 struct ata_port_info {
82981 unsigned long flags;
82982diff --git a/include/linux/linkage.h b/include/linux/linkage.h
82983index a6a42dd..6c5ebce 100644
82984--- a/include/linux/linkage.h
82985+++ b/include/linux/linkage.h
82986@@ -36,6 +36,7 @@
82987 #endif
82988
82989 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
82990+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
82991 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
82992
82993 /*
82994diff --git a/include/linux/list.h b/include/linux/list.h
82995index feb773c..98f3075 100644
82996--- a/include/linux/list.h
82997+++ b/include/linux/list.h
82998@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
82999 extern void list_del(struct list_head *entry);
83000 #endif
83001
83002+extern void __pax_list_add(struct list_head *new,
83003+ struct list_head *prev,
83004+ struct list_head *next);
83005+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83006+{
83007+ __pax_list_add(new, head, head->next);
83008+}
83009+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83010+{
83011+ __pax_list_add(new, head->prev, head);
83012+}
83013+extern void pax_list_del(struct list_head *entry);
83014+
83015 /**
83016 * list_replace - replace old entry by new one
83017 * @old : the element to be replaced
83018@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
83019 INIT_LIST_HEAD(entry);
83020 }
83021
83022+extern void pax_list_del_init(struct list_head *entry);
83023+
83024 /**
83025 * list_move - delete from one list and add as another's head
83026 * @list: the entry to move
83027diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83028index 4bfde0e..d6e2e09 100644
83029--- a/include/linux/lockref.h
83030+++ b/include/linux/lockref.h
83031@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83032 return ((int)l->count < 0);
83033 }
83034
83035+static inline unsigned int __lockref_read(struct lockref *lockref)
83036+{
83037+ return lockref->count;
83038+}
83039+
83040+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83041+{
83042+ lockref->count = count;
83043+}
83044+
83045+static inline void __lockref_inc(struct lockref *lockref)
83046+{
83047+
83048+#ifdef CONFIG_PAX_REFCOUNT
83049+ atomic_inc((atomic_t *)&lockref->count);
83050+#else
83051+ lockref->count++;
83052+#endif
83053+
83054+}
83055+
83056+static inline void __lockref_dec(struct lockref *lockref)
83057+{
83058+
83059+#ifdef CONFIG_PAX_REFCOUNT
83060+ atomic_dec((atomic_t *)&lockref->count);
83061+#else
83062+ lockref->count--;
83063+#endif
83064+
83065+}
83066+
83067 #endif /* __LINUX_LOCKREF_H */
83068diff --git a/include/linux/math64.h b/include/linux/math64.h
83069index c45c089..298841c 100644
83070--- a/include/linux/math64.h
83071+++ b/include/linux/math64.h
83072@@ -15,7 +15,7 @@
83073 * This is commonly provided by 32bit archs to provide an optimized 64bit
83074 * divide.
83075 */
83076-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83077+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83078 {
83079 *remainder = dividend % divisor;
83080 return dividend / divisor;
83081@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83082 /**
83083 * div64_u64 - unsigned 64bit divide with 64bit divisor
83084 */
83085-static inline u64 div64_u64(u64 dividend, u64 divisor)
83086+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83087 {
83088 return dividend / divisor;
83089 }
83090@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83091 #define div64_ul(x, y) div_u64((x), (y))
83092
83093 #ifndef div_u64_rem
83094-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83095+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83096 {
83097 *remainder = do_div(dividend, divisor);
83098 return dividend;
83099@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83100 #endif
83101
83102 #ifndef div64_u64
83103-extern u64 div64_u64(u64 dividend, u64 divisor);
83104+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83105 #endif
83106
83107 #ifndef div64_s64
83108@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83109 * divide.
83110 */
83111 #ifndef div_u64
83112-static inline u64 div_u64(u64 dividend, u32 divisor)
83113+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83114 {
83115 u32 remainder;
83116 return div_u64_rem(dividend, divisor, &remainder);
83117diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83118index 3d385c8..deacb6a 100644
83119--- a/include/linux/mempolicy.h
83120+++ b/include/linux/mempolicy.h
83121@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83122 }
83123
83124 #define vma_policy(vma) ((vma)->vm_policy)
83125+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83126+{
83127+ vma->vm_policy = pol;
83128+}
83129
83130 static inline void mpol_get(struct mempolicy *pol)
83131 {
83132@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83133 }
83134
83135 #define vma_policy(vma) NULL
83136+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83137+{
83138+}
83139
83140 static inline int
83141 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83142diff --git a/include/linux/mm.h b/include/linux/mm.h
83143index dd5ea30..cf81cd1 100644
83144--- a/include/linux/mm.h
83145+++ b/include/linux/mm.h
83146@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83147
83148 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83149 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83150+
83151+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83152+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83153+#endif
83154+
83155 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83156 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83157 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83158@@ -256,8 +261,8 @@ struct vm_operations_struct {
83159 /* called by access_process_vm when get_user_pages() fails, typically
83160 * for use by special VMAs that can switch between memory and hardware
83161 */
83162- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83163- void *buf, int len, int write);
83164+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83165+ void *buf, size_t len, int write);
83166
83167 /* Called by the /proc/PID/maps code to ask the vma whether it
83168 * has a special name. Returning non-NULL will also cause this
83169@@ -291,6 +296,7 @@ struct vm_operations_struct {
83170 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83171 unsigned long size, pgoff_t pgoff);
83172 };
83173+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83174
83175 struct mmu_gather;
83176 struct inode;
83177@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83178 unsigned long *pfn);
83179 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83180 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83181-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83182- void *buf, int len, int write);
83183+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83184+ void *buf, size_t len, int write);
83185
83186 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83187 loff_t const holebegin, loff_t const holelen)
83188@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83189 }
83190 #endif
83191
83192-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83193-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83194- void *buf, int len, int write);
83195+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83196+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83197+ void *buf, size_t len, int write);
83198
83199 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83200 unsigned long start, unsigned long nr_pages,
83201@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
83202 int clear_page_dirty_for_io(struct page *page);
83203 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83204
83205-/* Is the vma a continuation of the stack vma above it? */
83206-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83207-{
83208- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83209-}
83210-
83211-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83212- unsigned long addr)
83213-{
83214- return (vma->vm_flags & VM_GROWSDOWN) &&
83215- (vma->vm_start == addr) &&
83216- !vma_growsdown(vma->vm_prev, addr);
83217-}
83218-
83219-/* Is the vma a continuation of the stack vma below it? */
83220-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83221-{
83222- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83223-}
83224-
83225-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83226- unsigned long addr)
83227-{
83228- return (vma->vm_flags & VM_GROWSUP) &&
83229- (vma->vm_end == addr) &&
83230- !vma_growsup(vma->vm_next, addr);
83231-}
83232-
83233 extern struct task_struct *task_of_stack(struct task_struct *task,
83234 struct vm_area_struct *vma, bool in_group);
83235
83236@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83237 {
83238 return 0;
83239 }
83240+
83241+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83242+ unsigned long address)
83243+{
83244+ return 0;
83245+}
83246 #else
83247 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83248+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83249 #endif
83250
83251 #ifdef __PAGETABLE_PMD_FOLDED
83252@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83253 {
83254 return 0;
83255 }
83256+
83257+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83258+ unsigned long address)
83259+{
83260+ return 0;
83261+}
83262 #else
83263 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83264+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83265 #endif
83266
83267 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83268@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83269 NULL: pud_offset(pgd, address);
83270 }
83271
83272+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83273+{
83274+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83275+ NULL: pud_offset(pgd, address);
83276+}
83277+
83278 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83279 {
83280 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83281 NULL: pmd_offset(pud, address);
83282 }
83283+
83284+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83285+{
83286+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83287+ NULL: pmd_offset(pud, address);
83288+}
83289 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83290
83291 #if USE_SPLIT_PTE_PTLOCKS
83292@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83293 bool *need_rmap_locks);
83294 extern void exit_mmap(struct mm_struct *);
83295
83296+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83297+extern void gr_learn_resource(const struct task_struct *task, const int res,
83298+ const unsigned long wanted, const int gt);
83299+#else
83300+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83301+ const unsigned long wanted, const int gt)
83302+{
83303+}
83304+#endif
83305+
83306 static inline int check_data_rlimit(unsigned long rlim,
83307 unsigned long new,
83308 unsigned long start,
83309 unsigned long end_data,
83310 unsigned long start_data)
83311 {
83312+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83313 if (rlim < RLIM_INFINITY) {
83314 if (((new - start) + (end_data - start_data)) > rlim)
83315 return -ENOSPC;
83316@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83317 unsigned long addr, unsigned long len,
83318 unsigned long flags, struct page **pages);
83319
83320-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83321+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83322
83323 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83324 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83325@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83326 unsigned long len, unsigned long prot, unsigned long flags,
83327 unsigned long pgoff, unsigned long *populate);
83328 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83329+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83330
83331 #ifdef CONFIG_MMU
83332 extern int __mm_populate(unsigned long addr, unsigned long len,
83333@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
83334 unsigned long high_limit;
83335 unsigned long align_mask;
83336 unsigned long align_offset;
83337+ unsigned long threadstack_offset;
83338 };
83339
83340-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83341-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83342+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83343+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83344
83345 /*
83346 * Search for an unmapped address range.
83347@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83348 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83349 */
83350 static inline unsigned long
83351-vm_unmapped_area(struct vm_unmapped_area_info *info)
83352+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83353 {
83354 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83355 return unmapped_area(info);
83356@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83357 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83358 struct vm_area_struct **pprev);
83359
83360+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83361+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83362+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83363+
83364 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83365 NULL if none. Assume start_addr < end_addr. */
83366 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83367@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83368 }
83369
83370 #ifdef CONFIG_MMU
83371-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83372+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83373 void vma_set_page_prot(struct vm_area_struct *vma);
83374 #else
83375-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83376+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83377 {
83378 return __pgprot(0);
83379 }
83380@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83381 static inline void vm_stat_account(struct mm_struct *mm,
83382 unsigned long flags, struct file *file, long pages)
83383 {
83384+
83385+#ifdef CONFIG_PAX_RANDMMAP
83386+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83387+#endif
83388+
83389 mm->total_vm += pages;
83390 }
83391 #endif /* CONFIG_PROC_FS */
83392@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
83393 extern int sysctl_memory_failure_early_kill;
83394 extern int sysctl_memory_failure_recovery;
83395 extern void shake_page(struct page *p, int access);
83396-extern atomic_long_t num_poisoned_pages;
83397+extern atomic_long_unchecked_t num_poisoned_pages;
83398 extern int soft_offline_page(struct page *page, int flags);
83399
83400 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83401@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
83402 static inline void setup_nr_node_ids(void) {}
83403 #endif
83404
83405+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83406+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83407+#else
83408+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83409+#endif
83410+
83411 #endif /* __KERNEL__ */
83412 #endif /* _LINUX_MM_H */
83413diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83414index 6d34aa2..d73d848 100644
83415--- a/include/linux/mm_types.h
83416+++ b/include/linux/mm_types.h
83417@@ -309,7 +309,9 @@ struct vm_area_struct {
83418 #ifdef CONFIG_NUMA
83419 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83420 #endif
83421-};
83422+
83423+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83424+} __randomize_layout;
83425
83426 struct core_thread {
83427 struct task_struct *task;
83428@@ -459,7 +461,25 @@ struct mm_struct {
83429 /* address of the bounds directory */
83430 void __user *bd_addr;
83431 #endif
83432-};
83433+
83434+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83435+ unsigned long pax_flags;
83436+#endif
83437+
83438+#ifdef CONFIG_PAX_DLRESOLVE
83439+ unsigned long call_dl_resolve;
83440+#endif
83441+
83442+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83443+ unsigned long call_syscall;
83444+#endif
83445+
83446+#ifdef CONFIG_PAX_ASLR
83447+ unsigned long delta_mmap; /* randomized offset */
83448+ unsigned long delta_stack; /* randomized offset */
83449+#endif
83450+
83451+} __randomize_layout;
83452
83453 static inline void mm_init_cpumask(struct mm_struct *mm)
83454 {
83455diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83456index c5d5278..f0b68c8 100644
83457--- a/include/linux/mmiotrace.h
83458+++ b/include/linux/mmiotrace.h
83459@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83460 /* Called from ioremap.c */
83461 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83462 void __iomem *addr);
83463-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83464+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83465
83466 /* For anyone to insert markers. Remember trailing newline. */
83467 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83468@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83469 {
83470 }
83471
83472-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83473+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83474 {
83475 }
83476
83477diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83478index 2f0856d..5a4bc1e 100644
83479--- a/include/linux/mmzone.h
83480+++ b/include/linux/mmzone.h
83481@@ -527,7 +527,7 @@ struct zone {
83482
83483 ZONE_PADDING(_pad3_)
83484 /* Zone statistics */
83485- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83486+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83487 } ____cacheline_internodealigned_in_smp;
83488
83489 enum zone_flags {
83490diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83491index 745def8..08a820b 100644
83492--- a/include/linux/mod_devicetable.h
83493+++ b/include/linux/mod_devicetable.h
83494@@ -139,7 +139,7 @@ struct usb_device_id {
83495 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83496 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83497
83498-#define HID_ANY_ID (~0)
83499+#define HID_ANY_ID (~0U)
83500 #define HID_BUS_ANY 0xffff
83501 #define HID_GROUP_ANY 0x0000
83502
83503@@ -475,7 +475,7 @@ struct dmi_system_id {
83504 const char *ident;
83505 struct dmi_strmatch matches[4];
83506 void *driver_data;
83507-};
83508+} __do_const;
83509 /*
83510 * struct dmi_device_id appears during expansion of
83511 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83512diff --git a/include/linux/module.h b/include/linux/module.h
83513index b653d7c..22a238f 100644
83514--- a/include/linux/module.h
83515+++ b/include/linux/module.h
83516@@ -17,9 +17,11 @@
83517 #include <linux/moduleparam.h>
83518 #include <linux/jump_label.h>
83519 #include <linux/export.h>
83520+#include <linux/fs.h>
83521
83522 #include <linux/percpu.h>
83523 #include <asm/module.h>
83524+#include <asm/pgtable.h>
83525
83526 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83527 #define MODULE_SIG_STRING "~Module signature appended~\n"
83528@@ -42,7 +44,7 @@ struct module_kobject {
83529 struct kobject *drivers_dir;
83530 struct module_param_attrs *mp;
83531 struct completion *kobj_completion;
83532-};
83533+} __randomize_layout;
83534
83535 struct module_attribute {
83536 struct attribute attr;
83537@@ -54,12 +56,13 @@ struct module_attribute {
83538 int (*test)(struct module *);
83539 void (*free)(struct module *);
83540 };
83541+typedef struct module_attribute __no_const module_attribute_no_const;
83542
83543 struct module_version_attribute {
83544 struct module_attribute mattr;
83545 const char *module_name;
83546 const char *version;
83547-} __attribute__ ((__aligned__(sizeof(void *))));
83548+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83549
83550 extern ssize_t __modver_version_show(struct module_attribute *,
83551 struct module_kobject *, char *);
83552@@ -221,7 +224,7 @@ struct module {
83553
83554 /* Sysfs stuff. */
83555 struct module_kobject mkobj;
83556- struct module_attribute *modinfo_attrs;
83557+ module_attribute_no_const *modinfo_attrs;
83558 const char *version;
83559 const char *srcversion;
83560 struct kobject *holders_dir;
83561@@ -270,19 +273,16 @@ struct module {
83562 int (*init)(void);
83563
83564 /* If this is non-NULL, vfree after init() returns */
83565- void *module_init;
83566+ void *module_init_rx, *module_init_rw;
83567
83568 /* Here is the actual code + data, vfree'd on unload. */
83569- void *module_core;
83570+ void *module_core_rx, *module_core_rw;
83571
83572 /* Here are the sizes of the init and core sections */
83573- unsigned int init_size, core_size;
83574+ unsigned int init_size_rw, core_size_rw;
83575
83576 /* The size of the executable code in each section. */
83577- unsigned int init_text_size, core_text_size;
83578-
83579- /* Size of RO sections of the module (text+rodata) */
83580- unsigned int init_ro_size, core_ro_size;
83581+ unsigned int init_size_rx, core_size_rx;
83582
83583 /* Arch-specific module values */
83584 struct mod_arch_specific arch;
83585@@ -338,6 +338,10 @@ struct module {
83586 #ifdef CONFIG_EVENT_TRACING
83587 struct ftrace_event_call **trace_events;
83588 unsigned int num_trace_events;
83589+ struct file_operations trace_id;
83590+ struct file_operations trace_enable;
83591+ struct file_operations trace_format;
83592+ struct file_operations trace_filter;
83593 #endif
83594 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83595 unsigned int num_ftrace_callsites;
83596@@ -361,7 +365,7 @@ struct module {
83597 ctor_fn_t *ctors;
83598 unsigned int num_ctors;
83599 #endif
83600-};
83601+} __randomize_layout;
83602 #ifndef MODULE_ARCH_INIT
83603 #define MODULE_ARCH_INIT {}
83604 #endif
83605@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
83606 bool is_module_percpu_address(unsigned long addr);
83607 bool is_module_text_address(unsigned long addr);
83608
83609+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83610+{
83611+
83612+#ifdef CONFIG_PAX_KERNEXEC
83613+ if (ktla_ktva(addr) >= (unsigned long)start &&
83614+ ktla_ktva(addr) < (unsigned long)start + size)
83615+ return 1;
83616+#endif
83617+
83618+ return ((void *)addr >= start && (void *)addr < start + size);
83619+}
83620+
83621+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83622+{
83623+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83624+}
83625+
83626+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83627+{
83628+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83629+}
83630+
83631+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83632+{
83633+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83634+}
83635+
83636+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83637+{
83638+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83639+}
83640+
83641 static inline bool within_module_core(unsigned long addr,
83642 const struct module *mod)
83643 {
83644- return (unsigned long)mod->module_core <= addr &&
83645- addr < (unsigned long)mod->module_core + mod->core_size;
83646+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83647 }
83648
83649 static inline bool within_module_init(unsigned long addr,
83650 const struct module *mod)
83651 {
83652- return (unsigned long)mod->module_init <= addr &&
83653- addr < (unsigned long)mod->module_init + mod->init_size;
83654+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83655 }
83656
83657 static inline bool within_module(unsigned long addr, const struct module *mod)
83658diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83659index f755626..641f822 100644
83660--- a/include/linux/moduleloader.h
83661+++ b/include/linux/moduleloader.h
83662@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83663 sections. Returns NULL on failure. */
83664 void *module_alloc(unsigned long size);
83665
83666+#ifdef CONFIG_PAX_KERNEXEC
83667+void *module_alloc_exec(unsigned long size);
83668+#else
83669+#define module_alloc_exec(x) module_alloc(x)
83670+#endif
83671+
83672 /* Free memory returned from module_alloc. */
83673 void module_memfree(void *module_region);
83674
83675+#ifdef CONFIG_PAX_KERNEXEC
83676+void module_memfree_exec(void *module_region);
83677+#else
83678+#define module_memfree_exec(x) module_memfree((x))
83679+#endif
83680+
83681 /*
83682 * Apply the given relocation to the (simplified) ELF. Return -error
83683 * or 0.
83684@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83685 unsigned int relsec,
83686 struct module *me)
83687 {
83688+#ifdef CONFIG_MODULES
83689 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83690 module_name(me));
83691+#endif
83692 return -ENOEXEC;
83693 }
83694 #endif
83695@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83696 unsigned int relsec,
83697 struct module *me)
83698 {
83699+#ifdef CONFIG_MODULES
83700 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83701 module_name(me));
83702+#endif
83703 return -ENOEXEC;
83704 }
83705 #endif
83706diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83707index 1c9effa..1160bdd 100644
83708--- a/include/linux/moduleparam.h
83709+++ b/include/linux/moduleparam.h
83710@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
83711 * @len is usually just sizeof(string).
83712 */
83713 #define module_param_string(name, string, len, perm) \
83714- static const struct kparam_string __param_string_##name \
83715+ static const struct kparam_string __param_string_##name __used \
83716 = { len, string }; \
83717 __module_param_call(MODULE_PARAM_PREFIX, name, \
83718 &param_ops_string, \
83719@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83720 */
83721 #define module_param_array_named(name, array, type, nump, perm) \
83722 param_check_##type(name, &(array)[0]); \
83723- static const struct kparam_array __param_arr_##name \
83724+ static const struct kparam_array __param_arr_##name __used \
83725 = { .max = ARRAY_SIZE(array), .num = nump, \
83726 .ops = &param_ops_##type, \
83727 .elemsize = sizeof(array[0]), .elem = array }; \
83728diff --git a/include/linux/mount.h b/include/linux/mount.h
83729index c2c561d..a5f2a8c 100644
83730--- a/include/linux/mount.h
83731+++ b/include/linux/mount.h
83732@@ -66,7 +66,7 @@ struct vfsmount {
83733 struct dentry *mnt_root; /* root of the mounted tree */
83734 struct super_block *mnt_sb; /* pointer to superblock */
83735 int mnt_flags;
83736-};
83737+} __randomize_layout;
83738
83739 struct file; /* forward dec */
83740 struct path;
83741diff --git a/include/linux/namei.h b/include/linux/namei.h
83742index c899077..b9a2010 100644
83743--- a/include/linux/namei.h
83744+++ b/include/linux/namei.h
83745@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
83746 extern void unlock_rename(struct dentry *, struct dentry *);
83747
83748 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83749-extern void nd_set_link(struct nameidata *nd, char *path);
83750-extern char *nd_get_link(struct nameidata *nd);
83751+extern void nd_set_link(struct nameidata *nd, const char *path);
83752+extern const char *nd_get_link(const struct nameidata *nd);
83753
83754 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
83755 {
83756diff --git a/include/linux/net.h b/include/linux/net.h
83757index 17d8339..81656c0 100644
83758--- a/include/linux/net.h
83759+++ b/include/linux/net.h
83760@@ -192,7 +192,7 @@ struct net_proto_family {
83761 int (*create)(struct net *net, struct socket *sock,
83762 int protocol, int kern);
83763 struct module *owner;
83764-};
83765+} __do_const;
83766
83767 struct iovec;
83768 struct kvec;
83769diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83770index 52fd8e8..19430a1 100644
83771--- a/include/linux/netdevice.h
83772+++ b/include/linux/netdevice.h
83773@@ -1191,6 +1191,7 @@ struct net_device_ops {
83774 u8 state);
83775 #endif
83776 };
83777+typedef struct net_device_ops __no_const net_device_ops_no_const;
83778
83779 /**
83780 * enum net_device_priv_flags - &struct net_device priv_flags
83781@@ -1537,10 +1538,10 @@ struct net_device {
83782
83783 struct net_device_stats stats;
83784
83785- atomic_long_t rx_dropped;
83786- atomic_long_t tx_dropped;
83787+ atomic_long_unchecked_t rx_dropped;
83788+ atomic_long_unchecked_t tx_dropped;
83789
83790- atomic_t carrier_changes;
83791+ atomic_unchecked_t carrier_changes;
83792
83793 #ifdef CONFIG_WIRELESS_EXT
83794 const struct iw_handler_def * wireless_handlers;
83795diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83796index 2517ece..0bbfcfb 100644
83797--- a/include/linux/netfilter.h
83798+++ b/include/linux/netfilter.h
83799@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83800 #endif
83801 /* Use the module struct to lock set/get code in place */
83802 struct module *owner;
83803-};
83804+} __do_const;
83805
83806 /* Function to register/unregister hook points. */
83807 int nf_register_hook(struct nf_hook_ops *reg);
83808diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
83809index e955d47..04a5338 100644
83810--- a/include/linux/netfilter/nfnetlink.h
83811+++ b/include/linux/netfilter/nfnetlink.h
83812@@ -19,7 +19,7 @@ struct nfnl_callback {
83813 const struct nlattr * const cda[]);
83814 const struct nla_policy *policy; /* netlink attribute policy */
83815 const u_int16_t attr_count; /* number of nlattr's */
83816-};
83817+} __do_const;
83818
83819 struct nfnetlink_subsystem {
83820 const char *name;
83821diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
83822new file mode 100644
83823index 0000000..33f4af8
83824--- /dev/null
83825+++ b/include/linux/netfilter/xt_gradm.h
83826@@ -0,0 +1,9 @@
83827+#ifndef _LINUX_NETFILTER_XT_GRADM_H
83828+#define _LINUX_NETFILTER_XT_GRADM_H 1
83829+
83830+struct xt_gradm_mtinfo {
83831+ __u16 flags;
83832+ __u16 invflags;
83833+};
83834+
83835+#endif
83836diff --git a/include/linux/nls.h b/include/linux/nls.h
83837index 520681b..2b7fabb 100644
83838--- a/include/linux/nls.h
83839+++ b/include/linux/nls.h
83840@@ -31,7 +31,7 @@ struct nls_table {
83841 const unsigned char *charset2upper;
83842 struct module *owner;
83843 struct nls_table *next;
83844-};
83845+} __do_const;
83846
83847 /* this value hold the maximum octet of charset */
83848 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
83849@@ -46,7 +46,7 @@ enum utf16_endian {
83850 /* nls_base.c */
83851 extern int __register_nls(struct nls_table *, struct module *);
83852 extern int unregister_nls(struct nls_table *);
83853-extern struct nls_table *load_nls(char *);
83854+extern struct nls_table *load_nls(const char *);
83855 extern void unload_nls(struct nls_table *);
83856 extern struct nls_table *load_nls_default(void);
83857 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
83858diff --git a/include/linux/notifier.h b/include/linux/notifier.h
83859index d14a4c3..a078786 100644
83860--- a/include/linux/notifier.h
83861+++ b/include/linux/notifier.h
83862@@ -54,7 +54,8 @@ struct notifier_block {
83863 notifier_fn_t notifier_call;
83864 struct notifier_block __rcu *next;
83865 int priority;
83866-};
83867+} __do_const;
83868+typedef struct notifier_block __no_const notifier_block_no_const;
83869
83870 struct atomic_notifier_head {
83871 spinlock_t lock;
83872diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
83873index b2a0f15..4d7da32 100644
83874--- a/include/linux/oprofile.h
83875+++ b/include/linux/oprofile.h
83876@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
83877 int oprofilefs_create_ro_ulong(struct dentry * root,
83878 char const * name, ulong * val);
83879
83880-/** Create a file for read-only access to an atomic_t. */
83881+/** Create a file for read-only access to an atomic_unchecked_t. */
83882 int oprofilefs_create_ro_atomic(struct dentry * root,
83883- char const * name, atomic_t * val);
83884+ char const * name, atomic_unchecked_t * val);
83885
83886 /** create a directory */
83887 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
83888diff --git a/include/linux/padata.h b/include/linux/padata.h
83889index 4386946..f50c615 100644
83890--- a/include/linux/padata.h
83891+++ b/include/linux/padata.h
83892@@ -129,7 +129,7 @@ struct parallel_data {
83893 struct padata_serial_queue __percpu *squeue;
83894 atomic_t reorder_objects;
83895 atomic_t refcnt;
83896- atomic_t seq_nr;
83897+ atomic_unchecked_t seq_nr;
83898 struct padata_cpumask cpumask;
83899 spinlock_t lock ____cacheline_aligned;
83900 unsigned int processed;
83901diff --git a/include/linux/path.h b/include/linux/path.h
83902index d137218..be0c176 100644
83903--- a/include/linux/path.h
83904+++ b/include/linux/path.h
83905@@ -1,13 +1,15 @@
83906 #ifndef _LINUX_PATH_H
83907 #define _LINUX_PATH_H
83908
83909+#include <linux/compiler.h>
83910+
83911 struct dentry;
83912 struct vfsmount;
83913
83914 struct path {
83915 struct vfsmount *mnt;
83916 struct dentry *dentry;
83917-};
83918+} __randomize_layout;
83919
83920 extern void path_get(const struct path *);
83921 extern void path_put(const struct path *);
83922diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
83923index 8c78950..0d74ed9 100644
83924--- a/include/linux/pci_hotplug.h
83925+++ b/include/linux/pci_hotplug.h
83926@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
83927 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
83928 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
83929 int (*reset_slot) (struct hotplug_slot *slot, int probe);
83930-};
83931+} __do_const;
83932+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
83933
83934 /**
83935 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
83936diff --git a/include/linux/percpu.h b/include/linux/percpu.h
83937index caebf2a..4c3ae9d 100644
83938--- a/include/linux/percpu.h
83939+++ b/include/linux/percpu.h
83940@@ -34,7 +34,7 @@
83941 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
83942 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
83943 */
83944-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
83945+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
83946 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
83947
83948 /*
83949diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
83950index 664de5a..b3e1bf4 100644
83951--- a/include/linux/perf_event.h
83952+++ b/include/linux/perf_event.h
83953@@ -336,8 +336,8 @@ struct perf_event {
83954
83955 enum perf_event_active_state state;
83956 unsigned int attach_state;
83957- local64_t count;
83958- atomic64_t child_count;
83959+ local64_t count; /* PaX: fix it one day */
83960+ atomic64_unchecked_t child_count;
83961
83962 /*
83963 * These are the total time in nanoseconds that the event
83964@@ -388,8 +388,8 @@ struct perf_event {
83965 * These accumulate total time (in nanoseconds) that children
83966 * events have been enabled and running, respectively.
83967 */
83968- atomic64_t child_total_time_enabled;
83969- atomic64_t child_total_time_running;
83970+ atomic64_unchecked_t child_total_time_enabled;
83971+ atomic64_unchecked_t child_total_time_running;
83972
83973 /*
83974 * Protect attach/detach and child_list:
83975@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
83976 entry->ip[entry->nr++] = ip;
83977 }
83978
83979-extern int sysctl_perf_event_paranoid;
83980+extern int sysctl_perf_event_legitimately_concerned;
83981 extern int sysctl_perf_event_mlock;
83982 extern int sysctl_perf_event_sample_rate;
83983 extern int sysctl_perf_cpu_time_max_percent;
83984@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
83985 loff_t *ppos);
83986
83987
83988+static inline bool perf_paranoid_any(void)
83989+{
83990+ return sysctl_perf_event_legitimately_concerned > 2;
83991+}
83992+
83993 static inline bool perf_paranoid_tracepoint_raw(void)
83994 {
83995- return sysctl_perf_event_paranoid > -1;
83996+ return sysctl_perf_event_legitimately_concerned > -1;
83997 }
83998
83999 static inline bool perf_paranoid_cpu(void)
84000 {
84001- return sysctl_perf_event_paranoid > 0;
84002+ return sysctl_perf_event_legitimately_concerned > 0;
84003 }
84004
84005 static inline bool perf_paranoid_kernel(void)
84006 {
84007- return sysctl_perf_event_paranoid > 1;
84008+ return sysctl_perf_event_legitimately_concerned > 1;
84009 }
84010
84011 extern void perf_event_init(void);
84012@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
84013 struct device_attribute attr;
84014 u64 id;
84015 const char *event_str;
84016-};
84017+} __do_const;
84018
84019 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84020 static struct perf_pmu_events_attr _var = { \
84021diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84022index b9cf6c5..5462472 100644
84023--- a/include/linux/pid_namespace.h
84024+++ b/include/linux/pid_namespace.h
84025@@ -45,7 +45,7 @@ struct pid_namespace {
84026 int hide_pid;
84027 int reboot; /* group exit code if this pidns was rebooted */
84028 struct ns_common ns;
84029-};
84030+} __randomize_layout;
84031
84032 extern struct pid_namespace init_pid_ns;
84033
84034diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84035index eb8b8ac..62649e1 100644
84036--- a/include/linux/pipe_fs_i.h
84037+++ b/include/linux/pipe_fs_i.h
84038@@ -47,10 +47,10 @@ struct pipe_inode_info {
84039 struct mutex mutex;
84040 wait_queue_head_t wait;
84041 unsigned int nrbufs, curbuf, buffers;
84042- unsigned int readers;
84043- unsigned int writers;
84044- unsigned int files;
84045- unsigned int waiting_writers;
84046+ atomic_t readers;
84047+ atomic_t writers;
84048+ atomic_t files;
84049+ atomic_t waiting_writers;
84050 unsigned int r_counter;
84051 unsigned int w_counter;
84052 struct page *tmp_page;
84053diff --git a/include/linux/pm.h b/include/linux/pm.h
84054index 8b59763..8a05939 100644
84055--- a/include/linux/pm.h
84056+++ b/include/linux/pm.h
84057@@ -608,6 +608,7 @@ struct dev_pm_domain {
84058 struct dev_pm_ops ops;
84059 void (*detach)(struct device *dev, bool power_off);
84060 };
84061+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84062
84063 /*
84064 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84065diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84066index a9edab2..8bada56 100644
84067--- a/include/linux/pm_domain.h
84068+++ b/include/linux/pm_domain.h
84069@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84070 int (*save_state)(struct device *dev);
84071 int (*restore_state)(struct device *dev);
84072 bool (*active_wakeup)(struct device *dev);
84073-};
84074+} __no_const;
84075
84076 struct gpd_cpuidle_data {
84077 unsigned int saved_exit_latency;
84078- struct cpuidle_state *idle_state;
84079+ cpuidle_state_no_const *idle_state;
84080 };
84081
84082 struct generic_pm_domain {
84083diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84084index 30e84d4..22278b4 100644
84085--- a/include/linux/pm_runtime.h
84086+++ b/include/linux/pm_runtime.h
84087@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84088
84089 static inline void pm_runtime_mark_last_busy(struct device *dev)
84090 {
84091- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84092+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84093 }
84094
84095 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84096diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84097index 195aafc..49a7bc2 100644
84098--- a/include/linux/pnp.h
84099+++ b/include/linux/pnp.h
84100@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84101 struct pnp_fixup {
84102 char id[7];
84103 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84104-};
84105+} __do_const;
84106
84107 /* config parameters */
84108 #define PNP_CONFIG_NORMAL 0x0001
84109diff --git a/include/linux/poison.h b/include/linux/poison.h
84110index 2110a81..13a11bb 100644
84111--- a/include/linux/poison.h
84112+++ b/include/linux/poison.h
84113@@ -19,8 +19,8 @@
84114 * under normal circumstances, used to verify that nobody uses
84115 * non-initialized list entries.
84116 */
84117-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84118-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84119+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84120+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84121
84122 /********** include/linux/timer.h **********/
84123 /*
84124diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84125index d8b187c3..9a9257a 100644
84126--- a/include/linux/power/smartreflex.h
84127+++ b/include/linux/power/smartreflex.h
84128@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84129 int (*notify)(struct omap_sr *sr, u32 status);
84130 u8 notify_flags;
84131 u8 class_type;
84132-};
84133+} __do_const;
84134
84135 /**
84136 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84137diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84138index 4ea1d37..80f4b33 100644
84139--- a/include/linux/ppp-comp.h
84140+++ b/include/linux/ppp-comp.h
84141@@ -84,7 +84,7 @@ struct compressor {
84142 struct module *owner;
84143 /* Extra skb space needed by the compressor algorithm */
84144 unsigned int comp_extra;
84145-};
84146+} __do_const;
84147
84148 /*
84149 * The return value from decompress routine is the length of the
84150diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84151index de83b4e..c4b997d 100644
84152--- a/include/linux/preempt.h
84153+++ b/include/linux/preempt.h
84154@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84155 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84156 #endif
84157
84158+#define raw_preempt_count_add(val) __preempt_count_add(val)
84159+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84160+
84161 #define __preempt_count_inc() __preempt_count_add(1)
84162 #define __preempt_count_dec() __preempt_count_sub(1)
84163
84164 #define preempt_count_inc() preempt_count_add(1)
84165+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84166 #define preempt_count_dec() preempt_count_sub(1)
84167+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84168
84169 #ifdef CONFIG_PREEMPT_COUNT
84170
84171@@ -41,6 +46,12 @@ do { \
84172 barrier(); \
84173 } while (0)
84174
84175+#define raw_preempt_disable() \
84176+do { \
84177+ raw_preempt_count_inc(); \
84178+ barrier(); \
84179+} while (0)
84180+
84181 #define sched_preempt_enable_no_resched() \
84182 do { \
84183 barrier(); \
84184@@ -49,6 +60,12 @@ do { \
84185
84186 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84187
84188+#define raw_preempt_enable_no_resched() \
84189+do { \
84190+ barrier(); \
84191+ raw_preempt_count_dec(); \
84192+} while (0)
84193+
84194 #ifdef CONFIG_PREEMPT
84195 #define preempt_enable() \
84196 do { \
84197@@ -113,8 +130,10 @@ do { \
84198 * region.
84199 */
84200 #define preempt_disable() barrier()
84201+#define raw_preempt_disable() barrier()
84202 #define sched_preempt_enable_no_resched() barrier()
84203 #define preempt_enable_no_resched() barrier()
84204+#define raw_preempt_enable_no_resched() barrier()
84205 #define preempt_enable() barrier()
84206 #define preempt_check_resched() do { } while (0)
84207
84208@@ -128,11 +147,13 @@ do { \
84209 /*
84210 * Modules have no business playing preemption tricks.
84211 */
84212+#ifndef CONFIG_PAX_KERNEXEC
84213 #undef sched_preempt_enable_no_resched
84214 #undef preempt_enable_no_resched
84215 #undef preempt_enable_no_resched_notrace
84216 #undef preempt_check_resched
84217 #endif
84218+#endif
84219
84220 #define preempt_set_need_resched() \
84221 do { \
84222diff --git a/include/linux/printk.h b/include/linux/printk.h
84223index 4d5bf57..d94eccf 100644
84224--- a/include/linux/printk.h
84225+++ b/include/linux/printk.h
84226@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84227 #endif
84228
84229 typedef int(*printk_func_t)(const char *fmt, va_list args);
84230+extern int kptr_restrict;
84231
84232 #ifdef CONFIG_PRINTK
84233 asmlinkage __printf(5, 0)
84234@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84235
84236 extern int printk_delay_msec;
84237 extern int dmesg_restrict;
84238-extern int kptr_restrict;
84239
84240 extern void wake_up_klogd(void);
84241
84242diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84243index b97bf2e..f14c92d4 100644
84244--- a/include/linux/proc_fs.h
84245+++ b/include/linux/proc_fs.h
84246@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84247 extern struct proc_dir_entry *proc_symlink(const char *,
84248 struct proc_dir_entry *, const char *);
84249 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84250+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84251 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84252 struct proc_dir_entry *, void *);
84253+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84254+ struct proc_dir_entry *, void *);
84255 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84256 struct proc_dir_entry *);
84257
84258@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84259 return proc_create_data(name, mode, parent, proc_fops, NULL);
84260 }
84261
84262+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84263+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84264+{
84265+#ifdef CONFIG_GRKERNSEC_PROC_USER
84266+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84267+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84268+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84269+#else
84270+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84271+#endif
84272+}
84273+
84274+
84275 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84276 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84277 extern void *PDE_DATA(const struct inode *);
84278@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84279 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84280 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84281 struct proc_dir_entry *parent) {return NULL;}
84282+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84283+ struct proc_dir_entry *parent) { return NULL; }
84284 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84285 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84286+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84287+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84288 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84289 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84290 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84291@@ -79,7 +99,7 @@ struct net;
84292 static inline struct proc_dir_entry *proc_net_mkdir(
84293 struct net *net, const char *name, struct proc_dir_entry *parent)
84294 {
84295- return proc_mkdir_data(name, 0, parent, net);
84296+ return proc_mkdir_data_restrict(name, 0, parent, net);
84297 }
84298
84299 #endif /* _LINUX_PROC_FS_H */
84300diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84301index 42dfc61..8113a99 100644
84302--- a/include/linux/proc_ns.h
84303+++ b/include/linux/proc_ns.h
84304@@ -16,7 +16,7 @@ struct proc_ns_operations {
84305 struct ns_common *(*get)(struct task_struct *task);
84306 void (*put)(struct ns_common *ns);
84307 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84308-};
84309+} __do_const __randomize_layout;
84310
84311 extern const struct proc_ns_operations netns_operations;
84312 extern const struct proc_ns_operations utsns_operations;
84313diff --git a/include/linux/quota.h b/include/linux/quota.h
84314index b86df49..8002997 100644
84315--- a/include/linux/quota.h
84316+++ b/include/linux/quota.h
84317@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84318
84319 extern bool qid_eq(struct kqid left, struct kqid right);
84320 extern bool qid_lt(struct kqid left, struct kqid right);
84321-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84322+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84323 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84324 extern bool qid_valid(struct kqid qid);
84325
84326diff --git a/include/linux/random.h b/include/linux/random.h
84327index b05856e..0a9f14e 100644
84328--- a/include/linux/random.h
84329+++ b/include/linux/random.h
84330@@ -9,9 +9,19 @@
84331 #include <uapi/linux/random.h>
84332
84333 extern void add_device_randomness(const void *, unsigned int);
84334+
84335+static inline void add_latent_entropy(void)
84336+{
84337+
84338+#ifdef LATENT_ENTROPY_PLUGIN
84339+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84340+#endif
84341+
84342+}
84343+
84344 extern void add_input_randomness(unsigned int type, unsigned int code,
84345- unsigned int value);
84346-extern void add_interrupt_randomness(int irq, int irq_flags);
84347+ unsigned int value) __latent_entropy;
84348+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84349
84350 extern void get_random_bytes(void *buf, int nbytes);
84351 extern void get_random_bytes_arch(void *buf, int nbytes);
84352@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84353 extern const struct file_operations random_fops, urandom_fops;
84354 #endif
84355
84356-unsigned int get_random_int(void);
84357+unsigned int __intentional_overflow(-1) get_random_int(void);
84358 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84359
84360-u32 prandom_u32(void);
84361+u32 prandom_u32(void) __intentional_overflow(-1);
84362 void prandom_bytes(void *buf, size_t nbytes);
84363 void prandom_seed(u32 seed);
84364 void prandom_reseed_late(void);
84365@@ -37,6 +47,11 @@ struct rnd_state {
84366 u32 prandom_u32_state(struct rnd_state *state);
84367 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84368
84369+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84370+{
84371+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84372+}
84373+
84374 /**
84375 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84376 * @ep_ro: right open interval endpoint
84377@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84378 *
84379 * Returns: pseudo-random number in interval [0, ep_ro)
84380 */
84381-static inline u32 prandom_u32_max(u32 ep_ro)
84382+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84383 {
84384 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84385 }
84386diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84387index 378c5ee..aa84a47 100644
84388--- a/include/linux/rbtree_augmented.h
84389+++ b/include/linux/rbtree_augmented.h
84390@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84391 old->rbaugmented = rbcompute(old); \
84392 } \
84393 rbstatic const struct rb_augment_callbacks rbname = { \
84394- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84395+ .propagate = rbname ## _propagate, \
84396+ .copy = rbname ## _copy, \
84397+ .rotate = rbname ## _rotate \
84398 };
84399
84400
84401diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84402index 529bc94..82ce778 100644
84403--- a/include/linux/rculist.h
84404+++ b/include/linux/rculist.h
84405@@ -29,8 +29,8 @@
84406 */
84407 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84408 {
84409- ACCESS_ONCE(list->next) = list;
84410- ACCESS_ONCE(list->prev) = list;
84411+ ACCESS_ONCE_RW(list->next) = list;
84412+ ACCESS_ONCE_RW(list->prev) = list;
84413 }
84414
84415 /*
84416@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84417 struct list_head *prev, struct list_head *next);
84418 #endif
84419
84420+void __pax_list_add_rcu(struct list_head *new,
84421+ struct list_head *prev, struct list_head *next);
84422+
84423 /**
84424 * list_add_rcu - add a new entry to rcu-protected list
84425 * @new: new entry to be added
84426@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84427 __list_add_rcu(new, head, head->next);
84428 }
84429
84430+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84431+{
84432+ __pax_list_add_rcu(new, head, head->next);
84433+}
84434+
84435 /**
84436 * list_add_tail_rcu - add a new entry to rcu-protected list
84437 * @new: new entry to be added
84438@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84439 __list_add_rcu(new, head->prev, head);
84440 }
84441
84442+static inline void pax_list_add_tail_rcu(struct list_head *new,
84443+ struct list_head *head)
84444+{
84445+ __pax_list_add_rcu(new, head->prev, head);
84446+}
84447+
84448 /**
84449 * list_del_rcu - deletes entry from list without re-initialization
84450 * @entry: the element to delete from the list.
84451@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84452 entry->prev = LIST_POISON2;
84453 }
84454
84455+extern void pax_list_del_rcu(struct list_head *entry);
84456+
84457 /**
84458 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84459 * @n: the element to delete from the hash list.
84460diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
84461index ed4f593..8a51501 100644
84462--- a/include/linux/rcupdate.h
84463+++ b/include/linux/rcupdate.h
84464@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
84465 #define rcu_note_voluntary_context_switch(t) \
84466 do { \
84467 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
84468- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
84469+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
84470 } while (0)
84471 #else /* #ifdef CONFIG_TASKS_RCU */
84472 #define TASKS_RCU(x) do { } while (0)
84473diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84474index 67fc8fc..a90f7d8 100644
84475--- a/include/linux/reboot.h
84476+++ b/include/linux/reboot.h
84477@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
84478 */
84479
84480 extern void migrate_to_reboot_cpu(void);
84481-extern void machine_restart(char *cmd);
84482-extern void machine_halt(void);
84483-extern void machine_power_off(void);
84484+extern void machine_restart(char *cmd) __noreturn;
84485+extern void machine_halt(void) __noreturn;
84486+extern void machine_power_off(void) __noreturn;
84487
84488 extern void machine_shutdown(void);
84489 struct pt_regs;
84490@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84491 */
84492
84493 extern void kernel_restart_prepare(char *cmd);
84494-extern void kernel_restart(char *cmd);
84495-extern void kernel_halt(void);
84496-extern void kernel_power_off(void);
84497+extern void kernel_restart(char *cmd) __noreturn;
84498+extern void kernel_halt(void) __noreturn;
84499+extern void kernel_power_off(void) __noreturn;
84500
84501 extern int C_A_D; /* for sysctl */
84502 void ctrl_alt_del(void);
84503@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
84504 * Emergency restart, callable from an interrupt handler.
84505 */
84506
84507-extern void emergency_restart(void);
84508+extern void emergency_restart(void) __noreturn;
84509 #include <asm/emergency-restart.h>
84510
84511 #endif /* _LINUX_REBOOT_H */
84512diff --git a/include/linux/regset.h b/include/linux/regset.h
84513index 8e0c9fe..ac4d221 100644
84514--- a/include/linux/regset.h
84515+++ b/include/linux/regset.h
84516@@ -161,7 +161,8 @@ struct user_regset {
84517 unsigned int align;
84518 unsigned int bias;
84519 unsigned int core_note_type;
84520-};
84521+} __do_const;
84522+typedef struct user_regset __no_const user_regset_no_const;
84523
84524 /**
84525 * struct user_regset_view - available regsets
84526diff --git a/include/linux/relay.h b/include/linux/relay.h
84527index d7c8359..818daf5 100644
84528--- a/include/linux/relay.h
84529+++ b/include/linux/relay.h
84530@@ -157,7 +157,7 @@ struct rchan_callbacks
84531 * The callback should return 0 if successful, negative if not.
84532 */
84533 int (*remove_buf_file)(struct dentry *dentry);
84534-};
84535+} __no_const;
84536
84537 /*
84538 * CONFIG_RELAY kernel API, kernel/relay.c
84539diff --git a/include/linux/rio.h b/include/linux/rio.h
84540index 6bda06f..bf39a9b 100644
84541--- a/include/linux/rio.h
84542+++ b/include/linux/rio.h
84543@@ -358,7 +358,7 @@ struct rio_ops {
84544 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84545 u64 rstart, u32 size, u32 flags);
84546 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84547-};
84548+} __no_const;
84549
84550 #define RIO_RESOURCE_MEM 0x00000100
84551 #define RIO_RESOURCE_DOORBELL 0x00000200
84552diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84553index d9d7e7e..86f47ac 100644
84554--- a/include/linux/rmap.h
84555+++ b/include/linux/rmap.h
84556@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84557 void anon_vma_init(void); /* create anon_vma_cachep */
84558 int anon_vma_prepare(struct vm_area_struct *);
84559 void unlink_anon_vmas(struct vm_area_struct *);
84560-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84561-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84562+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84563+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84564
84565 static inline void anon_vma_merge(struct vm_area_struct *vma,
84566 struct vm_area_struct *next)
84567diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84568index ed8f9e7..999bc96 100644
84569--- a/include/linux/scatterlist.h
84570+++ b/include/linux/scatterlist.h
84571@@ -1,6 +1,7 @@
84572 #ifndef _LINUX_SCATTERLIST_H
84573 #define _LINUX_SCATTERLIST_H
84574
84575+#include <linux/sched.h>
84576 #include <linux/string.h>
84577 #include <linux/bug.h>
84578 #include <linux/mm.h>
84579@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84580 #ifdef CONFIG_DEBUG_SG
84581 BUG_ON(!virt_addr_valid(buf));
84582 #endif
84583+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84584+ if (object_starts_on_stack(buf)) {
84585+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84586+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84587+ } else
84588+#endif
84589 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84590 }
84591
84592diff --git a/include/linux/sched.h b/include/linux/sched.h
84593index 8db31ef..0af1f81 100644
84594--- a/include/linux/sched.h
84595+++ b/include/linux/sched.h
84596@@ -133,6 +133,7 @@ struct fs_struct;
84597 struct perf_event_context;
84598 struct blk_plug;
84599 struct filename;
84600+struct linux_binprm;
84601
84602 #define VMACACHE_BITS 2
84603 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84604@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
84605 extern int in_sched_functions(unsigned long addr);
84606
84607 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84608-extern signed long schedule_timeout(signed long timeout);
84609+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84610 extern signed long schedule_timeout_interruptible(signed long timeout);
84611 extern signed long schedule_timeout_killable(signed long timeout);
84612 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84613@@ -426,6 +427,19 @@ struct nsproxy;
84614 struct user_namespace;
84615
84616 #ifdef CONFIG_MMU
84617+
84618+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84619+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84620+#else
84621+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84622+{
84623+ return 0;
84624+}
84625+#endif
84626+
84627+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84628+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84629+
84630 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84631 extern unsigned long
84632 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84633@@ -724,6 +738,17 @@ struct signal_struct {
84634 #ifdef CONFIG_TASKSTATS
84635 struct taskstats *stats;
84636 #endif
84637+
84638+#ifdef CONFIG_GRKERNSEC
84639+ u32 curr_ip;
84640+ u32 saved_ip;
84641+ u32 gr_saddr;
84642+ u32 gr_daddr;
84643+ u16 gr_sport;
84644+ u16 gr_dport;
84645+ u8 used_accept:1;
84646+#endif
84647+
84648 #ifdef CONFIG_AUDIT
84649 unsigned audit_tty;
84650 unsigned audit_tty_log_passwd;
84651@@ -750,7 +775,7 @@ struct signal_struct {
84652 struct mutex cred_guard_mutex; /* guard against foreign influences on
84653 * credential calculations
84654 * (notably. ptrace) */
84655-};
84656+} __randomize_layout;
84657
84658 /*
84659 * Bits in flags field of signal_struct.
84660@@ -803,6 +828,14 @@ struct user_struct {
84661 struct key *session_keyring; /* UID's default session keyring */
84662 #endif
84663
84664+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84665+ unsigned char kernel_banned;
84666+#endif
84667+#ifdef CONFIG_GRKERNSEC_BRUTE
84668+ unsigned char suid_banned;
84669+ unsigned long suid_ban_expires;
84670+#endif
84671+
84672 /* Hash table maintenance information */
84673 struct hlist_node uidhash_node;
84674 kuid_t uid;
84675@@ -810,7 +843,7 @@ struct user_struct {
84676 #ifdef CONFIG_PERF_EVENTS
84677 atomic_long_t locked_vm;
84678 #endif
84679-};
84680+} __randomize_layout;
84681
84682 extern int uids_sysfs_init(void);
84683
84684@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
84685 struct task_struct {
84686 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84687 void *stack;
84688+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84689+ void *lowmem_stack;
84690+#endif
84691 atomic_t usage;
84692 unsigned int flags; /* per process flags, defined below */
84693 unsigned int ptrace;
84694@@ -1405,8 +1441,8 @@ struct task_struct {
84695 struct list_head thread_node;
84696
84697 struct completion *vfork_done; /* for vfork() */
84698- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84699- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84700+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84701+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84702
84703 cputime_t utime, stime, utimescaled, stimescaled;
84704 cputime_t gtime;
84705@@ -1431,11 +1467,6 @@ struct task_struct {
84706 struct task_cputime cputime_expires;
84707 struct list_head cpu_timers[3];
84708
84709-/* process credentials */
84710- const struct cred __rcu *real_cred; /* objective and real subjective task
84711- * credentials (COW) */
84712- const struct cred __rcu *cred; /* effective (overridable) subjective task
84713- * credentials (COW) */
84714 char comm[TASK_COMM_LEN]; /* executable name excluding path
84715 - access with [gs]et_task_comm (which lock
84716 it with task_lock())
84717@@ -1453,6 +1484,10 @@ struct task_struct {
84718 #endif
84719 /* CPU-specific state of this task */
84720 struct thread_struct thread;
84721+/* thread_info moved to task_struct */
84722+#ifdef CONFIG_X86
84723+ struct thread_info tinfo;
84724+#endif
84725 /* filesystem information */
84726 struct fs_struct *fs;
84727 /* open file information */
84728@@ -1527,6 +1562,10 @@ struct task_struct {
84729 gfp_t lockdep_reclaim_gfp;
84730 #endif
84731
84732+/* process credentials */
84733+ const struct cred __rcu *real_cred; /* objective and real subjective task
84734+ * credentials (COW) */
84735+
84736 /* journalling filesystem info */
84737 void *journal_info;
84738
84739@@ -1565,6 +1604,10 @@ struct task_struct {
84740 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84741 struct list_head cg_list;
84742 #endif
84743+
84744+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84745+ * credentials (COW) */
84746+
84747 #ifdef CONFIG_FUTEX
84748 struct robust_list_head __user *robust_list;
84749 #ifdef CONFIG_COMPAT
84750@@ -1673,7 +1716,7 @@ struct task_struct {
84751 * Number of functions that haven't been traced
84752 * because of depth overrun.
84753 */
84754- atomic_t trace_overrun;
84755+ atomic_unchecked_t trace_overrun;
84756 /* Pause for the tracing */
84757 atomic_t tracing_graph_pause;
84758 #endif
84759@@ -1701,7 +1744,78 @@ struct task_struct {
84760 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
84761 unsigned long task_state_change;
84762 #endif
84763-};
84764+
84765+#ifdef CONFIG_GRKERNSEC
84766+ /* grsecurity */
84767+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84768+ u64 exec_id;
84769+#endif
84770+#ifdef CONFIG_GRKERNSEC_SETXID
84771+ const struct cred *delayed_cred;
84772+#endif
84773+ struct dentry *gr_chroot_dentry;
84774+ struct acl_subject_label *acl;
84775+ struct acl_subject_label *tmpacl;
84776+ struct acl_role_label *role;
84777+ struct file *exec_file;
84778+ unsigned long brute_expires;
84779+ u16 acl_role_id;
84780+ u8 inherited;
84781+ /* is this the task that authenticated to the special role */
84782+ u8 acl_sp_role;
84783+ u8 is_writable;
84784+ u8 brute;
84785+ u8 gr_is_chrooted;
84786+#endif
84787+
84788+} __randomize_layout;
84789+
84790+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84791+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84792+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84793+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84794+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84795+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84796+
84797+#ifdef CONFIG_PAX_SOFTMODE
84798+extern int pax_softmode;
84799+#endif
84800+
84801+extern int pax_check_flags(unsigned long *);
84802+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84803+
84804+/* if tsk != current then task_lock must be held on it */
84805+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84806+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84807+{
84808+ if (likely(tsk->mm))
84809+ return tsk->mm->pax_flags;
84810+ else
84811+ return 0UL;
84812+}
84813+
84814+/* if tsk != current then task_lock must be held on it */
84815+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84816+{
84817+ if (likely(tsk->mm)) {
84818+ tsk->mm->pax_flags = flags;
84819+ return 0;
84820+ }
84821+ return -EINVAL;
84822+}
84823+#endif
84824+
84825+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84826+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84827+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84828+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84829+#endif
84830+
84831+struct path;
84832+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
84833+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
84834+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
84835+extern void pax_report_refcount_overflow(struct pt_regs *regs);
84836
84837 /* Future-safe accessor for struct task_struct's cpus_allowed. */
84838 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
84839@@ -1783,7 +1897,7 @@ struct pid_namespace;
84840 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
84841 struct pid_namespace *ns);
84842
84843-static inline pid_t task_pid_nr(struct task_struct *tsk)
84844+static inline pid_t task_pid_nr(const struct task_struct *tsk)
84845 {
84846 return tsk->pid;
84847 }
84848@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
84849
84850 extern void sched_clock_init(void);
84851
84852+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84853+static inline void populate_stack(void)
84854+{
84855+ struct task_struct *curtask = current;
84856+ int c;
84857+ int *ptr = curtask->stack;
84858+ int *end = curtask->stack + THREAD_SIZE;
84859+
84860+ while (ptr < end) {
84861+ c = *(volatile int *)ptr;
84862+ ptr += PAGE_SIZE/sizeof(int);
84863+ }
84864+}
84865+#else
84866+static inline void populate_stack(void)
84867+{
84868+}
84869+#endif
84870+
84871 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
84872 static inline void sched_clock_tick(void)
84873 {
84874@@ -2283,7 +2416,9 @@ void yield(void);
84875 extern struct exec_domain default_exec_domain;
84876
84877 union thread_union {
84878+#ifndef CONFIG_X86
84879 struct thread_info thread_info;
84880+#endif
84881 unsigned long stack[THREAD_SIZE/sizeof(long)];
84882 };
84883
84884@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
84885 */
84886
84887 extern struct task_struct *find_task_by_vpid(pid_t nr);
84888+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
84889 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
84890 struct pid_namespace *ns);
84891
84892@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
84893 extern void exit_itimers(struct signal_struct *);
84894 extern void flush_itimer_signals(void);
84895
84896-extern void do_group_exit(int);
84897+extern __noreturn void do_group_exit(int);
84898
84899 extern int do_execve(struct filename *,
84900 const char __user * const __user *,
84901@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
84902 #define task_stack_end_corrupted(task) \
84903 (*(end_of_stack(task)) != STACK_END_MAGIC)
84904
84905-static inline int object_is_on_stack(void *obj)
84906+static inline int object_starts_on_stack(const void *obj)
84907 {
84908- void *stack = task_stack_page(current);
84909+ const void *stack = task_stack_page(current);
84910
84911 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
84912 }
84913diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
84914index 596a0e0..bea77ec 100644
84915--- a/include/linux/sched/sysctl.h
84916+++ b/include/linux/sched/sysctl.h
84917@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
84918 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
84919
84920 extern int sysctl_max_map_count;
84921+extern unsigned long sysctl_heap_stack_gap;
84922
84923 extern unsigned int sysctl_sched_latency;
84924 extern unsigned int sysctl_sched_min_granularity;
84925diff --git a/include/linux/security.h b/include/linux/security.h
84926index ba96471..74fb3f6 100644
84927--- a/include/linux/security.h
84928+++ b/include/linux/security.h
84929@@ -27,6 +27,7 @@
84930 #include <linux/slab.h>
84931 #include <linux/err.h>
84932 #include <linux/string.h>
84933+#include <linux/grsecurity.h>
84934
84935 struct linux_binprm;
84936 struct cred;
84937@@ -116,8 +117,6 @@ struct seq_file;
84938
84939 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
84940
84941-void reset_security_ops(void);
84942-
84943 #ifdef CONFIG_MMU
84944 extern unsigned long mmap_min_addr;
84945 extern unsigned long dac_mmap_min_addr;
84946@@ -1729,7 +1728,7 @@ struct security_operations {
84947 struct audit_context *actx);
84948 void (*audit_rule_free) (void *lsmrule);
84949 #endif /* CONFIG_AUDIT */
84950-};
84951+} __randomize_layout;
84952
84953 /* prototypes */
84954 extern int security_init(void);
84955diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
84956index dc368b8..e895209 100644
84957--- a/include/linux/semaphore.h
84958+++ b/include/linux/semaphore.h
84959@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
84960 }
84961
84962 extern void down(struct semaphore *sem);
84963-extern int __must_check down_interruptible(struct semaphore *sem);
84964+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
84965 extern int __must_check down_killable(struct semaphore *sem);
84966 extern int __must_check down_trylock(struct semaphore *sem);
84967 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
84968diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
84969index cf6a9da..bd86b1f 100644
84970--- a/include/linux/seq_file.h
84971+++ b/include/linux/seq_file.h
84972@@ -27,6 +27,9 @@ struct seq_file {
84973 struct mutex lock;
84974 const struct seq_operations *op;
84975 int poll_event;
84976+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84977+ u64 exec_id;
84978+#endif
84979 #ifdef CONFIG_USER_NS
84980 struct user_namespace *user_ns;
84981 #endif
84982@@ -39,6 +42,7 @@ struct seq_operations {
84983 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
84984 int (*show) (struct seq_file *m, void *v);
84985 };
84986+typedef struct seq_operations __no_const seq_operations_no_const;
84987
84988 #define SEQ_SKIP 1
84989
84990@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
84991
84992 char *mangle_path(char *s, const char *p, const char *esc);
84993 int seq_open(struct file *, const struct seq_operations *);
84994+int seq_open_restrict(struct file *, const struct seq_operations *);
84995 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
84996 loff_t seq_lseek(struct file *, loff_t, int);
84997 int seq_release(struct inode *, struct file *);
84998@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
84999 }
85000
85001 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85002+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85003 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85004 int single_release(struct inode *, struct file *);
85005 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85006diff --git a/include/linux/shm.h b/include/linux/shm.h
85007index 6fb8016..ab4465e 100644
85008--- a/include/linux/shm.h
85009+++ b/include/linux/shm.h
85010@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85011 /* The task created the shm object. NULL if the task is dead. */
85012 struct task_struct *shm_creator;
85013 struct list_head shm_clist; /* list by creator */
85014+#ifdef CONFIG_GRKERNSEC
85015+ u64 shm_createtime;
85016+ pid_t shm_lapid;
85017+#endif
85018 };
85019
85020 /* shm_mode upper byte flags */
85021diff --git a/include/linux/signal.h b/include/linux/signal.h
85022index ab1e039..ad4229e 100644
85023--- a/include/linux/signal.h
85024+++ b/include/linux/signal.h
85025@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85026 * know it'll be handled, so that they don't get converted to
85027 * SIGKILL or just silently dropped.
85028 */
85029- kernel_sigaction(sig, (__force __sighandler_t)2);
85030+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85031 }
85032
85033 static inline void disallow_signal(int sig)
85034diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85035index 85ab7d7..eb1585a 100644
85036--- a/include/linux/skbuff.h
85037+++ b/include/linux/skbuff.h
85038@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85039 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85040 int node);
85041 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85042-static inline struct sk_buff *alloc_skb(unsigned int size,
85043+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85044 gfp_t priority)
85045 {
85046 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85047@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85048 return skb->inner_transport_header - skb->inner_network_header;
85049 }
85050
85051-static inline int skb_network_offset(const struct sk_buff *skb)
85052+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85053 {
85054 return skb_network_header(skb) - skb->data;
85055 }
85056@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85057 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85058 */
85059 #ifndef NET_SKB_PAD
85060-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85061+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85062 #endif
85063
85064 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85065@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85066 int *err);
85067 unsigned int datagram_poll(struct file *file, struct socket *sock,
85068 struct poll_table_struct *wait);
85069-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85070+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85071 struct iov_iter *to, int size);
85072-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85073+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85074 struct msghdr *msg, int size)
85075 {
85076 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85077@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
85078 nf_bridge_put(skb->nf_bridge);
85079 skb->nf_bridge = NULL;
85080 #endif
85081+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85082+ skb->nf_trace = 0;
85083+#endif
85084 }
85085
85086 static inline void nf_reset_trace(struct sk_buff *skb)
85087diff --git a/include/linux/slab.h b/include/linux/slab.h
85088index 9a139b6..aab37b4 100644
85089--- a/include/linux/slab.h
85090+++ b/include/linux/slab.h
85091@@ -14,15 +14,29 @@
85092 #include <linux/gfp.h>
85093 #include <linux/types.h>
85094 #include <linux/workqueue.h>
85095-
85096+#include <linux/err.h>
85097
85098 /*
85099 * Flags to pass to kmem_cache_create().
85100 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85101 */
85102 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85103+
85104+#ifdef CONFIG_PAX_USERCOPY_SLABS
85105+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85106+#else
85107+#define SLAB_USERCOPY 0x00000000UL
85108+#endif
85109+
85110 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85111 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85112+
85113+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85114+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85115+#else
85116+#define SLAB_NO_SANITIZE 0x00000000UL
85117+#endif
85118+
85119 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85120 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85121 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85122@@ -98,10 +112,13 @@
85123 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85124 * Both make kfree a no-op.
85125 */
85126-#define ZERO_SIZE_PTR ((void *)16)
85127+#define ZERO_SIZE_PTR \
85128+({ \
85129+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85130+ (void *)(-MAX_ERRNO-1L); \
85131+})
85132
85133-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85134- (unsigned long)ZERO_SIZE_PTR)
85135+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85136
85137 #include <linux/kmemleak.h>
85138
85139@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85140 void kfree(const void *);
85141 void kzfree(const void *);
85142 size_t ksize(const void *);
85143+const char *check_heap_object(const void *ptr, unsigned long n);
85144+bool is_usercopy_object(const void *ptr);
85145
85146 /*
85147 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85148@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85149 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85150 #endif
85151
85152+#ifdef CONFIG_PAX_USERCOPY_SLABS
85153+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85154+#endif
85155+
85156 /*
85157 * Figure out which kmalloc slab an allocation of a certain size
85158 * belongs to.
85159@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85160 * 2 = 120 .. 192 bytes
85161 * n = 2^(n-1) .. 2^n -1
85162 */
85163-static __always_inline int kmalloc_index(size_t size)
85164+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85165 {
85166 if (!size)
85167 return 0;
85168@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
85169 }
85170 #endif /* !CONFIG_SLOB */
85171
85172-void *__kmalloc(size_t size, gfp_t flags);
85173+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85174 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85175
85176 #ifdef CONFIG_NUMA
85177-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85178+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85179 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85180 #else
85181-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85182+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85183 {
85184 return __kmalloc(size, flags);
85185 }
85186diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85187index b869d16..1453c73 100644
85188--- a/include/linux/slab_def.h
85189+++ b/include/linux/slab_def.h
85190@@ -40,7 +40,7 @@ struct kmem_cache {
85191 /* 4) cache creation/removal */
85192 const char *name;
85193 struct list_head list;
85194- int refcount;
85195+ atomic_t refcount;
85196 int object_size;
85197 int align;
85198
85199@@ -56,10 +56,14 @@ struct kmem_cache {
85200 unsigned long node_allocs;
85201 unsigned long node_frees;
85202 unsigned long node_overflow;
85203- atomic_t allochit;
85204- atomic_t allocmiss;
85205- atomic_t freehit;
85206- atomic_t freemiss;
85207+ atomic_unchecked_t allochit;
85208+ atomic_unchecked_t allocmiss;
85209+ atomic_unchecked_t freehit;
85210+ atomic_unchecked_t freemiss;
85211+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85212+ atomic_unchecked_t sanitized;
85213+ atomic_unchecked_t not_sanitized;
85214+#endif
85215
85216 /*
85217 * If debugging is enabled, then the allocator can add additional
85218diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85219index d82abd4..408c3a0 100644
85220--- a/include/linux/slub_def.h
85221+++ b/include/linux/slub_def.h
85222@@ -74,7 +74,7 @@ struct kmem_cache {
85223 struct kmem_cache_order_objects max;
85224 struct kmem_cache_order_objects min;
85225 gfp_t allocflags; /* gfp flags to use on each alloc */
85226- int refcount; /* Refcount for slab cache destroy */
85227+ atomic_t refcount; /* Refcount for slab cache destroy */
85228 void (*ctor)(void *);
85229 int inuse; /* Offset to metadata */
85230 int align; /* Alignment */
85231diff --git a/include/linux/smp.h b/include/linux/smp.h
85232index 93dff5f..933c561 100644
85233--- a/include/linux/smp.h
85234+++ b/include/linux/smp.h
85235@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) { }
85236 #endif
85237
85238 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85239+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85240 #define put_cpu() preempt_enable()
85241+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85242
85243 /*
85244 * Callback to arch code if there's nosmp or maxcpus=0 on the
85245diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85246index 46cca4c..3323536 100644
85247--- a/include/linux/sock_diag.h
85248+++ b/include/linux/sock_diag.h
85249@@ -11,7 +11,7 @@ struct sock;
85250 struct sock_diag_handler {
85251 __u8 family;
85252 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85253-};
85254+} __do_const;
85255
85256 int sock_diag_register(const struct sock_diag_handler *h);
85257 void sock_diag_unregister(const struct sock_diag_handler *h);
85258diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85259index 680f9a3..f13aeb0 100644
85260--- a/include/linux/sonet.h
85261+++ b/include/linux/sonet.h
85262@@ -7,7 +7,7 @@
85263 #include <uapi/linux/sonet.h>
85264
85265 struct k_sonet_stats {
85266-#define __HANDLE_ITEM(i) atomic_t i
85267+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85268 __SONET_ITEMS
85269 #undef __HANDLE_ITEM
85270 };
85271diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85272index 07d8e53..dc934c9 100644
85273--- a/include/linux/sunrpc/addr.h
85274+++ b/include/linux/sunrpc/addr.h
85275@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85276 {
85277 switch (sap->sa_family) {
85278 case AF_INET:
85279- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85280+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85281 case AF_INET6:
85282- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85283+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85284 }
85285 return 0;
85286 }
85287@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85288 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85289 const struct sockaddr *src)
85290 {
85291- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85292+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85293 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85294
85295 dsin->sin_family = ssin->sin_family;
85296@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85297 if (sa->sa_family != AF_INET6)
85298 return 0;
85299
85300- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85301+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85302 }
85303
85304 #endif /* _LINUX_SUNRPC_ADDR_H */
85305diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85306index 598ba80..d90cba6 100644
85307--- a/include/linux/sunrpc/clnt.h
85308+++ b/include/linux/sunrpc/clnt.h
85309@@ -100,7 +100,7 @@ struct rpc_procinfo {
85310 unsigned int p_timer; /* Which RTT timer to use */
85311 u32 p_statidx; /* Which procedure to account */
85312 const char * p_name; /* name of procedure */
85313-};
85314+} __do_const;
85315
85316 #ifdef __KERNEL__
85317
85318diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85319index 6f22cfe..9fd0909 100644
85320--- a/include/linux/sunrpc/svc.h
85321+++ b/include/linux/sunrpc/svc.h
85322@@ -420,7 +420,7 @@ struct svc_procedure {
85323 unsigned int pc_count; /* call count */
85324 unsigned int pc_cachetype; /* cache info (NFS) */
85325 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85326-};
85327+} __do_const;
85328
85329 /*
85330 * Function prototypes.
85331diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85332index 975da75..318c083 100644
85333--- a/include/linux/sunrpc/svc_rdma.h
85334+++ b/include/linux/sunrpc/svc_rdma.h
85335@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85336 extern unsigned int svcrdma_max_requests;
85337 extern unsigned int svcrdma_max_req_size;
85338
85339-extern atomic_t rdma_stat_recv;
85340-extern atomic_t rdma_stat_read;
85341-extern atomic_t rdma_stat_write;
85342-extern atomic_t rdma_stat_sq_starve;
85343-extern atomic_t rdma_stat_rq_starve;
85344-extern atomic_t rdma_stat_rq_poll;
85345-extern atomic_t rdma_stat_rq_prod;
85346-extern atomic_t rdma_stat_sq_poll;
85347-extern atomic_t rdma_stat_sq_prod;
85348+extern atomic_unchecked_t rdma_stat_recv;
85349+extern atomic_unchecked_t rdma_stat_read;
85350+extern atomic_unchecked_t rdma_stat_write;
85351+extern atomic_unchecked_t rdma_stat_sq_starve;
85352+extern atomic_unchecked_t rdma_stat_rq_starve;
85353+extern atomic_unchecked_t rdma_stat_rq_poll;
85354+extern atomic_unchecked_t rdma_stat_rq_prod;
85355+extern atomic_unchecked_t rdma_stat_sq_poll;
85356+extern atomic_unchecked_t rdma_stat_sq_prod;
85357
85358 #define RPCRDMA_VERSION 1
85359
85360diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85361index 8d71d65..f79586e 100644
85362--- a/include/linux/sunrpc/svcauth.h
85363+++ b/include/linux/sunrpc/svcauth.h
85364@@ -120,7 +120,7 @@ struct auth_ops {
85365 int (*release)(struct svc_rqst *rq);
85366 void (*domain_release)(struct auth_domain *);
85367 int (*set_client)(struct svc_rqst *rq);
85368-};
85369+} __do_const;
85370
85371 #define SVC_GARBAGE 1
85372 #define SVC_SYSERR 2
85373diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85374index e7a018e..49f8b17 100644
85375--- a/include/linux/swiotlb.h
85376+++ b/include/linux/swiotlb.h
85377@@ -60,7 +60,8 @@ extern void
85378
85379 extern void
85380 swiotlb_free_coherent(struct device *hwdev, size_t size,
85381- void *vaddr, dma_addr_t dma_handle);
85382+ void *vaddr, dma_addr_t dma_handle,
85383+ struct dma_attrs *attrs);
85384
85385 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85386 unsigned long offset, size_t size,
85387diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85388index 85893d7..4923581 100644
85389--- a/include/linux/syscalls.h
85390+++ b/include/linux/syscalls.h
85391@@ -99,10 +99,16 @@ union bpf_attr;
85392 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
85393
85394 #define __SC_DECL(t, a) t a
85395+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
85396 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85397 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85398 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85399-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85400+#define __SC_LONG(t, a) __typeof( \
85401+ __builtin_choose_expr( \
85402+ sizeof(t) > sizeof(int), \
85403+ (t) 0, \
85404+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
85405+ )) a
85406 #define __SC_CAST(t, a) (t) a
85407 #define __SC_ARGS(t, a) a
85408 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85409@@ -384,11 +390,11 @@ asmlinkage long sys_sync(void);
85410 asmlinkage long sys_fsync(unsigned int fd);
85411 asmlinkage long sys_fdatasync(unsigned int fd);
85412 asmlinkage long sys_bdflush(int func, long data);
85413-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85414- char __user *type, unsigned long flags,
85415+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85416+ const char __user *type, unsigned long flags,
85417 void __user *data);
85418-asmlinkage long sys_umount(char __user *name, int flags);
85419-asmlinkage long sys_oldumount(char __user *name);
85420+asmlinkage long sys_umount(const char __user *name, int flags);
85421+asmlinkage long sys_oldumount(const char __user *name);
85422 asmlinkage long sys_truncate(const char __user *path, long length);
85423 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85424 asmlinkage long sys_stat(const char __user *filename,
85425@@ -600,7 +606,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85426 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85427 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85428 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85429- struct sockaddr __user *, int);
85430+ struct sockaddr __user *, int) __intentional_overflow(0);
85431 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
85432 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85433 unsigned int vlen, unsigned flags);
85434diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85435index 27b3b0b..e093dd9 100644
85436--- a/include/linux/syscore_ops.h
85437+++ b/include/linux/syscore_ops.h
85438@@ -16,7 +16,7 @@ struct syscore_ops {
85439 int (*suspend)(void);
85440 void (*resume)(void);
85441 void (*shutdown)(void);
85442-};
85443+} __do_const;
85444
85445 extern void register_syscore_ops(struct syscore_ops *ops);
85446 extern void unregister_syscore_ops(struct syscore_ops *ops);
85447diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85448index b7361f8..341a15a 100644
85449--- a/include/linux/sysctl.h
85450+++ b/include/linux/sysctl.h
85451@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85452
85453 extern int proc_dostring(struct ctl_table *, int,
85454 void __user *, size_t *, loff_t *);
85455+extern int proc_dostring_modpriv(struct ctl_table *, int,
85456+ void __user *, size_t *, loff_t *);
85457 extern int proc_dointvec(struct ctl_table *, int,
85458 void __user *, size_t *, loff_t *);
85459 extern int proc_dointvec_minmax(struct ctl_table *, int,
85460@@ -113,7 +115,8 @@ struct ctl_table
85461 struct ctl_table_poll *poll;
85462 void *extra1;
85463 void *extra2;
85464-};
85465+} __do_const __randomize_layout;
85466+typedef struct ctl_table __no_const ctl_table_no_const;
85467
85468 struct ctl_node {
85469 struct rb_node node;
85470diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85471index ddad161..a3efd26 100644
85472--- a/include/linux/sysfs.h
85473+++ b/include/linux/sysfs.h
85474@@ -34,7 +34,8 @@ struct attribute {
85475 struct lock_class_key *key;
85476 struct lock_class_key skey;
85477 #endif
85478-};
85479+} __do_const;
85480+typedef struct attribute __no_const attribute_no_const;
85481
85482 /**
85483 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85484@@ -63,7 +64,8 @@ struct attribute_group {
85485 struct attribute *, int);
85486 struct attribute **attrs;
85487 struct bin_attribute **bin_attrs;
85488-};
85489+} __do_const;
85490+typedef struct attribute_group __no_const attribute_group_no_const;
85491
85492 /**
85493 * Use these macros to make defining attributes easier. See include/linux/device.h
85494@@ -137,7 +139,8 @@ struct bin_attribute {
85495 char *, loff_t, size_t);
85496 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85497 struct vm_area_struct *vma);
85498-};
85499+} __do_const;
85500+typedef struct bin_attribute __no_const bin_attribute_no_const;
85501
85502 /**
85503 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85504diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85505index 387fa7d..3fcde6b 100644
85506--- a/include/linux/sysrq.h
85507+++ b/include/linux/sysrq.h
85508@@ -16,6 +16,7 @@
85509
85510 #include <linux/errno.h>
85511 #include <linux/types.h>
85512+#include <linux/compiler.h>
85513
85514 /* Possible values of bitmask for enabling sysrq functions */
85515 /* 0x0001 is reserved for enable everything */
85516@@ -33,7 +34,7 @@ struct sysrq_key_op {
85517 char *help_msg;
85518 char *action_msg;
85519 int enable_mask;
85520-};
85521+} __do_const;
85522
85523 #ifdef CONFIG_MAGIC_SYSRQ
85524
85525diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85526index ff307b5..f1a4468 100644
85527--- a/include/linux/thread_info.h
85528+++ b/include/linux/thread_info.h
85529@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85530 #error "no set_restore_sigmask() provided and default one won't work"
85531 #endif
85532
85533+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85534+
85535+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85536+{
85537+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85538+}
85539+
85540 #endif /* __KERNEL__ */
85541
85542 #endif /* _LINUX_THREAD_INFO_H */
85543diff --git a/include/linux/tty.h b/include/linux/tty.h
85544index 7d66ae5..0327149 100644
85545--- a/include/linux/tty.h
85546+++ b/include/linux/tty.h
85547@@ -202,7 +202,7 @@ struct tty_port {
85548 const struct tty_port_operations *ops; /* Port operations */
85549 spinlock_t lock; /* Lock protecting tty field */
85550 int blocked_open; /* Waiting to open */
85551- int count; /* Usage count */
85552+ atomic_t count; /* Usage count */
85553 wait_queue_head_t open_wait; /* Open waiters */
85554 wait_queue_head_t close_wait; /* Close waiters */
85555 wait_queue_head_t delta_msr_wait; /* Modem status change */
85556@@ -290,7 +290,7 @@ struct tty_struct {
85557 /* If the tty has a pending do_SAK, queue it here - akpm */
85558 struct work_struct SAK_work;
85559 struct tty_port *port;
85560-};
85561+} __randomize_layout;
85562
85563 /* Each of a tty's open files has private_data pointing to tty_file_private */
85564 struct tty_file_private {
85565@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
85566 struct tty_struct *tty, struct file *filp);
85567 static inline int tty_port_users(struct tty_port *port)
85568 {
85569- return port->count + port->blocked_open;
85570+ return atomic_read(&port->count) + port->blocked_open;
85571 }
85572
85573 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85574diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85575index 92e337c..f46757b 100644
85576--- a/include/linux/tty_driver.h
85577+++ b/include/linux/tty_driver.h
85578@@ -291,7 +291,7 @@ struct tty_operations {
85579 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85580 #endif
85581 const struct file_operations *proc_fops;
85582-};
85583+} __do_const __randomize_layout;
85584
85585 struct tty_driver {
85586 int magic; /* magic number for this structure */
85587@@ -325,7 +325,7 @@ struct tty_driver {
85588
85589 const struct tty_operations *ops;
85590 struct list_head tty_drivers;
85591-};
85592+} __randomize_layout;
85593
85594 extern struct list_head tty_drivers;
85595
85596diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85597index 00c9d68..bc0188b 100644
85598--- a/include/linux/tty_ldisc.h
85599+++ b/include/linux/tty_ldisc.h
85600@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85601
85602 struct module *owner;
85603
85604- int refcount;
85605+ atomic_t refcount;
85606 };
85607
85608 struct tty_ldisc {
85609diff --git a/include/linux/types.h b/include/linux/types.h
85610index a0bb704..f511c77 100644
85611--- a/include/linux/types.h
85612+++ b/include/linux/types.h
85613@@ -177,10 +177,26 @@ typedef struct {
85614 int counter;
85615 } atomic_t;
85616
85617+#ifdef CONFIG_PAX_REFCOUNT
85618+typedef struct {
85619+ int counter;
85620+} atomic_unchecked_t;
85621+#else
85622+typedef atomic_t atomic_unchecked_t;
85623+#endif
85624+
85625 #ifdef CONFIG_64BIT
85626 typedef struct {
85627 long counter;
85628 } atomic64_t;
85629+
85630+#ifdef CONFIG_PAX_REFCOUNT
85631+typedef struct {
85632+ long counter;
85633+} atomic64_unchecked_t;
85634+#else
85635+typedef atomic64_t atomic64_unchecked_t;
85636+#endif
85637 #endif
85638
85639 struct list_head {
85640diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85641index ecd3319..8a36ded 100644
85642--- a/include/linux/uaccess.h
85643+++ b/include/linux/uaccess.h
85644@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85645 long ret; \
85646 mm_segment_t old_fs = get_fs(); \
85647 \
85648- set_fs(KERNEL_DS); \
85649 pagefault_disable(); \
85650- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85651- pagefault_enable(); \
85652+ set_fs(KERNEL_DS); \
85653+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85654 set_fs(old_fs); \
85655+ pagefault_enable(); \
85656 ret; \
85657 })
85658
85659diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85660index 2d1f9b6..d7a9fce 100644
85661--- a/include/linux/uidgid.h
85662+++ b/include/linux/uidgid.h
85663@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85664
85665 #endif /* CONFIG_USER_NS */
85666
85667+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85668+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85669+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85670+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85671+
85672 #endif /* _LINUX_UIDGID_H */
85673diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
85674index 32c0e83..671eb35 100644
85675--- a/include/linux/uio_driver.h
85676+++ b/include/linux/uio_driver.h
85677@@ -67,7 +67,7 @@ struct uio_device {
85678 struct module *owner;
85679 struct device *dev;
85680 int minor;
85681- atomic_t event;
85682+ atomic_unchecked_t event;
85683 struct fasync_struct *async_queue;
85684 wait_queue_head_t wait;
85685 struct uio_info *info;
85686diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85687index 99c1b4d..562e6f3 100644
85688--- a/include/linux/unaligned/access_ok.h
85689+++ b/include/linux/unaligned/access_ok.h
85690@@ -4,34 +4,34 @@
85691 #include <linux/kernel.h>
85692 #include <asm/byteorder.h>
85693
85694-static inline u16 get_unaligned_le16(const void *p)
85695+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85696 {
85697- return le16_to_cpup((__le16 *)p);
85698+ return le16_to_cpup((const __le16 *)p);
85699 }
85700
85701-static inline u32 get_unaligned_le32(const void *p)
85702+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85703 {
85704- return le32_to_cpup((__le32 *)p);
85705+ return le32_to_cpup((const __le32 *)p);
85706 }
85707
85708-static inline u64 get_unaligned_le64(const void *p)
85709+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85710 {
85711- return le64_to_cpup((__le64 *)p);
85712+ return le64_to_cpup((const __le64 *)p);
85713 }
85714
85715-static inline u16 get_unaligned_be16(const void *p)
85716+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85717 {
85718- return be16_to_cpup((__be16 *)p);
85719+ return be16_to_cpup((const __be16 *)p);
85720 }
85721
85722-static inline u32 get_unaligned_be32(const void *p)
85723+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85724 {
85725- return be32_to_cpup((__be32 *)p);
85726+ return be32_to_cpup((const __be32 *)p);
85727 }
85728
85729-static inline u64 get_unaligned_be64(const void *p)
85730+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85731 {
85732- return be64_to_cpup((__be64 *)p);
85733+ return be64_to_cpup((const __be64 *)p);
85734 }
85735
85736 static inline void put_unaligned_le16(u16 val, void *p)
85737diff --git a/include/linux/usb.h b/include/linux/usb.h
85738index 058a769..c17a1c2c 100644
85739--- a/include/linux/usb.h
85740+++ b/include/linux/usb.h
85741@@ -566,7 +566,7 @@ struct usb_device {
85742 int maxchild;
85743
85744 u32 quirks;
85745- atomic_t urbnum;
85746+ atomic_unchecked_t urbnum;
85747
85748 unsigned long active_duration;
85749
85750@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85751
85752 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85753 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85754- void *data, __u16 size, int timeout);
85755+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85756 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85757 void *data, int len, int *actual_length, int timeout);
85758 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85759diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85760index 9fd9e48..e2c5f35 100644
85761--- a/include/linux/usb/renesas_usbhs.h
85762+++ b/include/linux/usb/renesas_usbhs.h
85763@@ -39,7 +39,7 @@ enum {
85764 */
85765 struct renesas_usbhs_driver_callback {
85766 int (*notify_hotplug)(struct platform_device *pdev);
85767-};
85768+} __no_const;
85769
85770 /*
85771 * callback functions for platform
85772diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85773index 8297e5b..0dfae27 100644
85774--- a/include/linux/user_namespace.h
85775+++ b/include/linux/user_namespace.h
85776@@ -39,7 +39,7 @@ struct user_namespace {
85777 struct key *persistent_keyring_register;
85778 struct rw_semaphore persistent_keyring_register_sem;
85779 #endif
85780-};
85781+} __randomize_layout;
85782
85783 extern struct user_namespace init_user_ns;
85784
85785diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85786index 5093f58..c103e58 100644
85787--- a/include/linux/utsname.h
85788+++ b/include/linux/utsname.h
85789@@ -25,7 +25,7 @@ struct uts_namespace {
85790 struct new_utsname name;
85791 struct user_namespace *user_ns;
85792 struct ns_common ns;
85793-};
85794+} __randomize_layout;
85795 extern struct uts_namespace init_uts_ns;
85796
85797 #ifdef CONFIG_UTS_NS
85798diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85799index 6f8fbcf..4efc177 100644
85800--- a/include/linux/vermagic.h
85801+++ b/include/linux/vermagic.h
85802@@ -25,9 +25,42 @@
85803 #define MODULE_ARCH_VERMAGIC ""
85804 #endif
85805
85806+#ifdef CONFIG_PAX_REFCOUNT
85807+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85808+#else
85809+#define MODULE_PAX_REFCOUNT ""
85810+#endif
85811+
85812+#ifdef CONSTIFY_PLUGIN
85813+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85814+#else
85815+#define MODULE_CONSTIFY_PLUGIN ""
85816+#endif
85817+
85818+#ifdef STACKLEAK_PLUGIN
85819+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
85820+#else
85821+#define MODULE_STACKLEAK_PLUGIN ""
85822+#endif
85823+
85824+#ifdef RANDSTRUCT_PLUGIN
85825+#include <generated/randomize_layout_hash.h>
85826+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
85827+#else
85828+#define MODULE_RANDSTRUCT_PLUGIN
85829+#endif
85830+
85831+#ifdef CONFIG_GRKERNSEC
85832+#define MODULE_GRSEC "GRSEC "
85833+#else
85834+#define MODULE_GRSEC ""
85835+#endif
85836+
85837 #define VERMAGIC_STRING \
85838 UTS_RELEASE " " \
85839 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
85840 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
85841- MODULE_ARCH_VERMAGIC
85842+ MODULE_ARCH_VERMAGIC \
85843+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
85844+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
85845
85846diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
85847index b483abd..af305ad 100644
85848--- a/include/linux/vga_switcheroo.h
85849+++ b/include/linux/vga_switcheroo.h
85850@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
85851
85852 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
85853
85854-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
85855+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
85856 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
85857-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
85858+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
85859 #else
85860
85861 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
85862@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
85863
85864 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
85865
85866-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85867+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85868 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
85869-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85870+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85871
85872 #endif
85873 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
85874diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
85875index b87696f..1d11de7 100644
85876--- a/include/linux/vmalloc.h
85877+++ b/include/linux/vmalloc.h
85878@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
85879 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
85880 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
85881 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
85882+
85883+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85884+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
85885+#endif
85886+
85887 /* bits [20..32] reserved for arch specific ioremap internals */
85888
85889 /*
85890@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
85891 unsigned long flags, pgprot_t prot);
85892 extern void vunmap(const void *addr);
85893
85894+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85895+extern void unmap_process_stacks(struct task_struct *task);
85896+#endif
85897+
85898 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
85899 unsigned long uaddr, void *kaddr,
85900 unsigned long size);
85901@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
85902
85903 /* for /dev/kmem */
85904 extern long vread(char *buf, char *addr, unsigned long count);
85905-extern long vwrite(char *buf, char *addr, unsigned long count);
85906+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
85907
85908 /*
85909 * Internals. Dont't use..
85910diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
85911index 82e7db7..f8ce3d0 100644
85912--- a/include/linux/vmstat.h
85913+++ b/include/linux/vmstat.h
85914@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
85915 /*
85916 * Zone based page accounting with per cpu differentials.
85917 */
85918-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85919+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85920
85921 static inline void zone_page_state_add(long x, struct zone *zone,
85922 enum zone_stat_item item)
85923 {
85924- atomic_long_add(x, &zone->vm_stat[item]);
85925- atomic_long_add(x, &vm_stat[item]);
85926+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
85927+ atomic_long_add_unchecked(x, &vm_stat[item]);
85928 }
85929
85930-static inline unsigned long global_page_state(enum zone_stat_item item)
85931+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
85932 {
85933- long x = atomic_long_read(&vm_stat[item]);
85934+ long x = atomic_long_read_unchecked(&vm_stat[item]);
85935 #ifdef CONFIG_SMP
85936 if (x < 0)
85937 x = 0;
85938@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
85939 return x;
85940 }
85941
85942-static inline unsigned long zone_page_state(struct zone *zone,
85943+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
85944 enum zone_stat_item item)
85945 {
85946- long x = atomic_long_read(&zone->vm_stat[item]);
85947+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
85948 #ifdef CONFIG_SMP
85949 if (x < 0)
85950 x = 0;
85951@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
85952 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
85953 enum zone_stat_item item)
85954 {
85955- long x = atomic_long_read(&zone->vm_stat[item]);
85956+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
85957
85958 #ifdef CONFIG_SMP
85959 int cpu;
85960@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
85961
85962 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
85963 {
85964- atomic_long_inc(&zone->vm_stat[item]);
85965- atomic_long_inc(&vm_stat[item]);
85966+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
85967+ atomic_long_inc_unchecked(&vm_stat[item]);
85968 }
85969
85970 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
85971 {
85972- atomic_long_dec(&zone->vm_stat[item]);
85973- atomic_long_dec(&vm_stat[item]);
85974+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
85975+ atomic_long_dec_unchecked(&vm_stat[item]);
85976 }
85977
85978 static inline void __inc_zone_page_state(struct page *page,
85979diff --git a/include/linux/xattr.h b/include/linux/xattr.h
85980index 91b0a68..0e9adf6 100644
85981--- a/include/linux/xattr.h
85982+++ b/include/linux/xattr.h
85983@@ -28,7 +28,7 @@ struct xattr_handler {
85984 size_t size, int handler_flags);
85985 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
85986 size_t size, int flags, int handler_flags);
85987-};
85988+} __do_const;
85989
85990 struct xattr {
85991 const char *name;
85992@@ -37,6 +37,9 @@ struct xattr {
85993 };
85994
85995 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
85996+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
85997+ssize_t pax_getxattr(struct dentry *, void *, size_t);
85998+#endif
85999 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86000 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86001 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86002diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86003index 92dbbd3..13ab0b3 100644
86004--- a/include/linux/zlib.h
86005+++ b/include/linux/zlib.h
86006@@ -31,6 +31,7 @@
86007 #define _ZLIB_H
86008
86009 #include <linux/zconf.h>
86010+#include <linux/compiler.h>
86011
86012 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86013 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86014@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86015
86016 /* basic functions */
86017
86018-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86019+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86020 /*
86021 Returns the number of bytes that needs to be allocated for a per-
86022 stream workspace with the specified parameters. A pointer to this
86023diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86024index eb76cfd..9fd0e7c 100644
86025--- a/include/media/v4l2-dev.h
86026+++ b/include/media/v4l2-dev.h
86027@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86028 int (*mmap) (struct file *, struct vm_area_struct *);
86029 int (*open) (struct file *);
86030 int (*release) (struct file *);
86031-};
86032+} __do_const;
86033
86034 /*
86035 * Newer version of video_device, handled by videodev2.c
86036diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86037index ffb69da..040393e 100644
86038--- a/include/media/v4l2-device.h
86039+++ b/include/media/v4l2-device.h
86040@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86041 this function returns 0. If the name ends with a digit (e.g. cx18),
86042 then the name will be set to cx18-0 since cx180 looks really odd. */
86043 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86044- atomic_t *instance);
86045+ atomic_unchecked_t *instance);
86046
86047 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86048 Since the parent disappears this ensures that v4l2_dev doesn't have an
86049diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86050index 2a25dec..bf6dd8a 100644
86051--- a/include/net/9p/transport.h
86052+++ b/include/net/9p/transport.h
86053@@ -62,7 +62,7 @@ struct p9_trans_module {
86054 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86055 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86056 char *, char *, int , int, int, int);
86057-};
86058+} __do_const;
86059
86060 void v9fs_register_trans(struct p9_trans_module *m);
86061 void v9fs_unregister_trans(struct p9_trans_module *m);
86062diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86063index a175ba4..196eb8242 100644
86064--- a/include/net/af_unix.h
86065+++ b/include/net/af_unix.h
86066@@ -36,7 +36,7 @@ struct unix_skb_parms {
86067 u32 secid; /* Security ID */
86068 #endif
86069 u32 consumed;
86070-};
86071+} __randomize_layout;
86072
86073 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86074 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86075diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86076index d1bb342..e12f7d2 100644
86077--- a/include/net/bluetooth/l2cap.h
86078+++ b/include/net/bluetooth/l2cap.h
86079@@ -608,7 +608,7 @@ struct l2cap_ops {
86080 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86081 unsigned long hdr_len,
86082 unsigned long len, int nb);
86083-};
86084+} __do_const;
86085
86086 struct l2cap_conn {
86087 struct hci_conn *hcon;
86088diff --git a/include/net/bonding.h b/include/net/bonding.h
86089index 983a94b..7aa9b16 100644
86090--- a/include/net/bonding.h
86091+++ b/include/net/bonding.h
86092@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
86093
86094 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86095 {
86096- atomic_long_inc(&dev->tx_dropped);
86097+ atomic_long_inc_unchecked(&dev->tx_dropped);
86098 dev_kfree_skb_any(skb);
86099 }
86100
86101diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86102index f2ae33d..c457cf0 100644
86103--- a/include/net/caif/cfctrl.h
86104+++ b/include/net/caif/cfctrl.h
86105@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86106 void (*radioset_rsp)(void);
86107 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86108 struct cflayer *client_layer);
86109-};
86110+} __no_const;
86111
86112 /* Link Setup Parameters for CAIF-Links. */
86113 struct cfctrl_link_param {
86114@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86115 struct cfctrl {
86116 struct cfsrvl serv;
86117 struct cfctrl_rsp res;
86118- atomic_t req_seq_no;
86119- atomic_t rsp_seq_no;
86120+ atomic_unchecked_t req_seq_no;
86121+ atomic_unchecked_t rsp_seq_no;
86122 struct list_head list;
86123 /* Protects from simultaneous access to first_req list */
86124 spinlock_t info_list_lock;
86125diff --git a/include/net/flow.h b/include/net/flow.h
86126index 8109a15..504466d 100644
86127--- a/include/net/flow.h
86128+++ b/include/net/flow.h
86129@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86130
86131 void flow_cache_flush(struct net *net);
86132 void flow_cache_flush_deferred(struct net *net);
86133-extern atomic_t flow_cache_genid;
86134+extern atomic_unchecked_t flow_cache_genid;
86135
86136 #endif
86137diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86138index 6c92415..3a352d8 100644
86139--- a/include/net/genetlink.h
86140+++ b/include/net/genetlink.h
86141@@ -130,7 +130,7 @@ struct genl_ops {
86142 u8 cmd;
86143 u8 internal_flags;
86144 u8 flags;
86145-};
86146+} __do_const;
86147
86148 int __genl_register_family(struct genl_family *family);
86149
86150diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86151index 734d9b5..48a9a4b 100644
86152--- a/include/net/gro_cells.h
86153+++ b/include/net/gro_cells.h
86154@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86155 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86156
86157 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86158- atomic_long_inc(&dev->rx_dropped);
86159+ atomic_long_inc_unchecked(&dev->rx_dropped);
86160 kfree_skb(skb);
86161 return;
86162 }
86163diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86164index 848e85c..051c7de 100644
86165--- a/include/net/inet_connection_sock.h
86166+++ b/include/net/inet_connection_sock.h
86167@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86168 int (*bind_conflict)(const struct sock *sk,
86169 const struct inet_bind_bucket *tb, bool relax);
86170 void (*mtu_reduced)(struct sock *sk);
86171-};
86172+} __do_const;
86173
86174 /** inet_connection_sock - INET connection oriented sock
86175 *
86176diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86177index 80479ab..0c3f647 100644
86178--- a/include/net/inetpeer.h
86179+++ b/include/net/inetpeer.h
86180@@ -47,7 +47,7 @@ struct inet_peer {
86181 */
86182 union {
86183 struct {
86184- atomic_t rid; /* Frag reception counter */
86185+ atomic_unchecked_t rid; /* Frag reception counter */
86186 };
86187 struct rcu_head rcu;
86188 struct inet_peer *gc_next;
86189diff --git a/include/net/ip.h b/include/net/ip.h
86190index 09cf5ae..ab62fcf 100644
86191--- a/include/net/ip.h
86192+++ b/include/net/ip.h
86193@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86194 }
86195 }
86196
86197-u32 ip_idents_reserve(u32 hash, int segs);
86198+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86199 void __ip_select_ident(struct iphdr *iph, int segs);
86200
86201 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86202diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86203index 09a819e..3ab9e14 100644
86204--- a/include/net/ip_fib.h
86205+++ b/include/net/ip_fib.h
86206@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86207
86208 #define FIB_RES_SADDR(net, res) \
86209 ((FIB_RES_NH(res).nh_saddr_genid == \
86210- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86211+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86212 FIB_RES_NH(res).nh_saddr : \
86213 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86214 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86215diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86216index 615b20b..fd4cbd8 100644
86217--- a/include/net/ip_vs.h
86218+++ b/include/net/ip_vs.h
86219@@ -534,7 +534,7 @@ struct ip_vs_conn {
86220 struct ip_vs_conn *control; /* Master control connection */
86221 atomic_t n_control; /* Number of controlled ones */
86222 struct ip_vs_dest *dest; /* real server */
86223- atomic_t in_pkts; /* incoming packet counter */
86224+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86225
86226 /* Packet transmitter for different forwarding methods. If it
86227 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86228@@ -682,7 +682,7 @@ struct ip_vs_dest {
86229 __be16 port; /* port number of the server */
86230 union nf_inet_addr addr; /* IP address of the server */
86231 volatile unsigned int flags; /* dest status flags */
86232- atomic_t conn_flags; /* flags to copy to conn */
86233+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86234 atomic_t weight; /* server weight */
86235
86236 atomic_t refcnt; /* reference counter */
86237@@ -928,11 +928,11 @@ struct netns_ipvs {
86238 /* ip_vs_lblc */
86239 int sysctl_lblc_expiration;
86240 struct ctl_table_header *lblc_ctl_header;
86241- struct ctl_table *lblc_ctl_table;
86242+ ctl_table_no_const *lblc_ctl_table;
86243 /* ip_vs_lblcr */
86244 int sysctl_lblcr_expiration;
86245 struct ctl_table_header *lblcr_ctl_header;
86246- struct ctl_table *lblcr_ctl_table;
86247+ ctl_table_no_const *lblcr_ctl_table;
86248 /* ip_vs_est */
86249 struct list_head est_list; /* estimator list */
86250 spinlock_t est_lock;
86251diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86252index 8d4f588..2e37ad2 100644
86253--- a/include/net/irda/ircomm_tty.h
86254+++ b/include/net/irda/ircomm_tty.h
86255@@ -33,6 +33,7 @@
86256 #include <linux/termios.h>
86257 #include <linux/timer.h>
86258 #include <linux/tty.h> /* struct tty_struct */
86259+#include <asm/local.h>
86260
86261 #include <net/irda/irias_object.h>
86262 #include <net/irda/ircomm_core.h>
86263diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86264index 714cc9a..ea05f3e 100644
86265--- a/include/net/iucv/af_iucv.h
86266+++ b/include/net/iucv/af_iucv.h
86267@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86268 struct iucv_sock_list {
86269 struct hlist_head head;
86270 rwlock_t lock;
86271- atomic_t autobind_name;
86272+ atomic_unchecked_t autobind_name;
86273 };
86274
86275 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86276diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86277index f3be818..bf46196 100644
86278--- a/include/net/llc_c_ac.h
86279+++ b/include/net/llc_c_ac.h
86280@@ -87,7 +87,7 @@
86281 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86282 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86283
86284-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86285+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86286
86287 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86288 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86289diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86290index 3948cf1..83b28c4 100644
86291--- a/include/net/llc_c_ev.h
86292+++ b/include/net/llc_c_ev.h
86293@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86294 return (struct llc_conn_state_ev *)skb->cb;
86295 }
86296
86297-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86298-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86299+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86300+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86301
86302 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86303 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86304diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86305index 48f3f89..0e92c50 100644
86306--- a/include/net/llc_c_st.h
86307+++ b/include/net/llc_c_st.h
86308@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86309 u8 next_state;
86310 const llc_conn_ev_qfyr_t *ev_qualifiers;
86311 const llc_conn_action_t *ev_actions;
86312-};
86313+} __do_const;
86314
86315 struct llc_conn_state {
86316 u8 current_state;
86317diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86318index a61b98c..aade1eb 100644
86319--- a/include/net/llc_s_ac.h
86320+++ b/include/net/llc_s_ac.h
86321@@ -23,7 +23,7 @@
86322 #define SAP_ACT_TEST_IND 9
86323
86324 /* All action functions must look like this */
86325-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86326+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86327
86328 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86329 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86330diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86331index c4359e2..76dbc4a 100644
86332--- a/include/net/llc_s_st.h
86333+++ b/include/net/llc_s_st.h
86334@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86335 llc_sap_ev_t ev;
86336 u8 next_state;
86337 const llc_sap_action_t *ev_actions;
86338-};
86339+} __do_const;
86340
86341 struct llc_sap_state {
86342 u8 curr_state;
86343diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86344index 29c7be8..746bd73 100644
86345--- a/include/net/mac80211.h
86346+++ b/include/net/mac80211.h
86347@@ -4869,7 +4869,7 @@ struct rate_control_ops {
86348 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86349
86350 u32 (*get_expected_throughput)(void *priv_sta);
86351-};
86352+} __do_const;
86353
86354 static inline int rate_supported(struct ieee80211_sta *sta,
86355 enum ieee80211_band band,
86356diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86357index 76f7084..8f36e39 100644
86358--- a/include/net/neighbour.h
86359+++ b/include/net/neighbour.h
86360@@ -163,7 +163,7 @@ struct neigh_ops {
86361 void (*error_report)(struct neighbour *, struct sk_buff *);
86362 int (*output)(struct neighbour *, struct sk_buff *);
86363 int (*connected_output)(struct neighbour *, struct sk_buff *);
86364-};
86365+} __do_const;
86366
86367 struct pneigh_entry {
86368 struct pneigh_entry *next;
86369@@ -217,7 +217,7 @@ struct neigh_table {
86370 struct neigh_statistics __percpu *stats;
86371 struct neigh_hash_table __rcu *nht;
86372 struct pneigh_entry **phash_buckets;
86373-};
86374+} __randomize_layout;
86375
86376 enum {
86377 NEIGH_ARP_TABLE = 0,
86378diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86379index 2e8756b8..0bd0083 100644
86380--- a/include/net/net_namespace.h
86381+++ b/include/net/net_namespace.h
86382@@ -130,8 +130,8 @@ struct net {
86383 struct netns_ipvs *ipvs;
86384 #endif
86385 struct sock *diag_nlsk;
86386- atomic_t fnhe_genid;
86387-};
86388+ atomic_unchecked_t fnhe_genid;
86389+} __randomize_layout;
86390
86391 #include <linux/seq_file_net.h>
86392
86393@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86394 #define __net_init __init
86395 #define __net_exit __exit_refok
86396 #define __net_initdata __initdata
86397+#ifdef CONSTIFY_PLUGIN
86398 #define __net_initconst __initconst
86399+#else
86400+#define __net_initconst __initdata
86401+#endif
86402 #endif
86403
86404 struct pernet_operations {
86405@@ -297,7 +301,7 @@ struct pernet_operations {
86406 void (*exit_batch)(struct list_head *net_exit_list);
86407 int *id;
86408 size_t size;
86409-};
86410+} __do_const;
86411
86412 /*
86413 * Use these carefully. If you implement a network device and it
86414@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86415
86416 static inline int rt_genid_ipv4(struct net *net)
86417 {
86418- return atomic_read(&net->ipv4.rt_genid);
86419+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86420 }
86421
86422 static inline void rt_genid_bump_ipv4(struct net *net)
86423 {
86424- atomic_inc(&net->ipv4.rt_genid);
86425+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86426 }
86427
86428 extern void (*__fib6_flush_trees)(struct net *net);
86429@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
86430
86431 static inline int fnhe_genid(struct net *net)
86432 {
86433- return atomic_read(&net->fnhe_genid);
86434+ return atomic_read_unchecked(&net->fnhe_genid);
86435 }
86436
86437 static inline void fnhe_genid_bump(struct net *net)
86438 {
86439- atomic_inc(&net->fnhe_genid);
86440+ atomic_inc_unchecked(&net->fnhe_genid);
86441 }
86442
86443 #endif /* __NET_NET_NAMESPACE_H */
86444diff --git a/include/net/netlink.h b/include/net/netlink.h
86445index 6415835..ab96d87 100644
86446--- a/include/net/netlink.h
86447+++ b/include/net/netlink.h
86448@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86449 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86450 {
86451 if (mark)
86452- skb_trim(skb, (unsigned char *) mark - skb->data);
86453+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86454 }
86455
86456 /**
86457diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86458index 29d6a94..235d3d84 100644
86459--- a/include/net/netns/conntrack.h
86460+++ b/include/net/netns/conntrack.h
86461@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86462 struct nf_proto_net {
86463 #ifdef CONFIG_SYSCTL
86464 struct ctl_table_header *ctl_table_header;
86465- struct ctl_table *ctl_table;
86466+ ctl_table_no_const *ctl_table;
86467 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86468 struct ctl_table_header *ctl_compat_header;
86469- struct ctl_table *ctl_compat_table;
86470+ ctl_table_no_const *ctl_compat_table;
86471 #endif
86472 #endif
86473 unsigned int users;
86474@@ -60,7 +60,7 @@ struct nf_ip_net {
86475 struct nf_icmp_net icmpv6;
86476 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86477 struct ctl_table_header *ctl_table_header;
86478- struct ctl_table *ctl_table;
86479+ ctl_table_no_const *ctl_table;
86480 #endif
86481 };
86482
86483diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86484index 0ffef1a..2ce1ceb 100644
86485--- a/include/net/netns/ipv4.h
86486+++ b/include/net/netns/ipv4.h
86487@@ -84,7 +84,7 @@ struct netns_ipv4 {
86488
86489 struct ping_group_range ping_group_range;
86490
86491- atomic_t dev_addr_genid;
86492+ atomic_unchecked_t dev_addr_genid;
86493
86494 #ifdef CONFIG_SYSCTL
86495 unsigned long *sysctl_local_reserved_ports;
86496@@ -98,6 +98,6 @@ struct netns_ipv4 {
86497 struct fib_rules_ops *mr_rules_ops;
86498 #endif
86499 #endif
86500- atomic_t rt_genid;
86501+ atomic_unchecked_t rt_genid;
86502 };
86503 #endif
86504diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86505index 69ae41f..4f94868 100644
86506--- a/include/net/netns/ipv6.h
86507+++ b/include/net/netns/ipv6.h
86508@@ -75,8 +75,8 @@ struct netns_ipv6 {
86509 struct fib_rules_ops *mr6_rules_ops;
86510 #endif
86511 #endif
86512- atomic_t dev_addr_genid;
86513- atomic_t fib6_sernum;
86514+ atomic_unchecked_t dev_addr_genid;
86515+ atomic_unchecked_t fib6_sernum;
86516 };
86517
86518 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86519diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86520index 730d82a..045f2c4 100644
86521--- a/include/net/netns/xfrm.h
86522+++ b/include/net/netns/xfrm.h
86523@@ -78,7 +78,7 @@ struct netns_xfrm {
86524
86525 /* flow cache part */
86526 struct flow_cache flow_cache_global;
86527- atomic_t flow_cache_genid;
86528+ atomic_unchecked_t flow_cache_genid;
86529 struct list_head flow_cache_gc_list;
86530 spinlock_t flow_cache_gc_lock;
86531 struct work_struct flow_cache_gc_work;
86532diff --git a/include/net/ping.h b/include/net/ping.h
86533index f074060..830fba0 100644
86534--- a/include/net/ping.h
86535+++ b/include/net/ping.h
86536@@ -54,7 +54,7 @@ struct ping_iter_state {
86537
86538 extern struct proto ping_prot;
86539 #if IS_ENABLED(CONFIG_IPV6)
86540-extern struct pingv6_ops pingv6_ops;
86541+extern struct pingv6_ops *pingv6_ops;
86542 #endif
86543
86544 struct pingfakehdr {
86545diff --git a/include/net/protocol.h b/include/net/protocol.h
86546index d6fcc1f..ca277058 100644
86547--- a/include/net/protocol.h
86548+++ b/include/net/protocol.h
86549@@ -49,7 +49,7 @@ struct net_protocol {
86550 * socket lookup?
86551 */
86552 icmp_strict_tag_validation:1;
86553-};
86554+} __do_const;
86555
86556 #if IS_ENABLED(CONFIG_IPV6)
86557 struct inet6_protocol {
86558@@ -62,7 +62,7 @@ struct inet6_protocol {
86559 u8 type, u8 code, int offset,
86560 __be32 info);
86561 unsigned int flags; /* INET6_PROTO_xxx */
86562-};
86563+} __do_const;
86564
86565 #define INET6_PROTO_NOPOLICY 0x1
86566 #define INET6_PROTO_FINAL 0x2
86567diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86568index e21b9f9..0191ef0 100644
86569--- a/include/net/rtnetlink.h
86570+++ b/include/net/rtnetlink.h
86571@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86572 int (*fill_slave_info)(struct sk_buff *skb,
86573 const struct net_device *dev,
86574 const struct net_device *slave_dev);
86575-};
86576+} __do_const;
86577
86578 int __rtnl_link_register(struct rtnl_link_ops *ops);
86579 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86580diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86581index 4a5b9a3..ca27d73 100644
86582--- a/include/net/sctp/checksum.h
86583+++ b/include/net/sctp/checksum.h
86584@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86585 unsigned int offset)
86586 {
86587 struct sctphdr *sh = sctp_hdr(skb);
86588- __le32 ret, old = sh->checksum;
86589- const struct skb_checksum_ops ops = {
86590+ __le32 ret, old = sh->checksum;
86591+ static const struct skb_checksum_ops ops = {
86592 .update = sctp_csum_update,
86593 .combine = sctp_csum_combine,
86594 };
86595diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86596index 487ef34..d457f98 100644
86597--- a/include/net/sctp/sm.h
86598+++ b/include/net/sctp/sm.h
86599@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86600 typedef struct {
86601 sctp_state_fn_t *fn;
86602 const char *name;
86603-} sctp_sm_table_entry_t;
86604+} __do_const sctp_sm_table_entry_t;
86605
86606 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86607 * currently in use.
86608@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86609 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86610
86611 /* Extern declarations for major data structures. */
86612-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86613+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86614
86615
86616 /* Get the size of a DATA chunk payload. */
86617diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86618index 2bb2fcf..d17c291 100644
86619--- a/include/net/sctp/structs.h
86620+++ b/include/net/sctp/structs.h
86621@@ -509,7 +509,7 @@ struct sctp_pf {
86622 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86623 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86624 struct sctp_af *af;
86625-};
86626+} __do_const;
86627
86628
86629 /* Structure to track chunk fragments that have been acked, but peer
86630diff --git a/include/net/sock.h b/include/net/sock.h
86631index 2210fec..2249ad0 100644
86632--- a/include/net/sock.h
86633+++ b/include/net/sock.h
86634@@ -362,7 +362,7 @@ struct sock {
86635 unsigned int sk_napi_id;
86636 unsigned int sk_ll_usec;
86637 #endif
86638- atomic_t sk_drops;
86639+ atomic_unchecked_t sk_drops;
86640 int sk_rcvbuf;
86641
86642 struct sk_filter __rcu *sk_filter;
86643@@ -1061,7 +1061,7 @@ struct proto {
86644 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86645 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86646 #endif
86647-};
86648+} __randomize_layout;
86649
86650 /*
86651 * Bits in struct cg_proto.flags
86652@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
86653 page_counter_uncharge(&prot->memory_allocated, amt);
86654 }
86655
86656-static inline long
86657+static inline long __intentional_overflow(-1)
86658 sk_memory_allocated(const struct sock *sk)
86659 {
86660 struct proto *prot = sk->sk_prot;
86661@@ -1385,7 +1385,7 @@ struct sock_iocb {
86662 struct scm_cookie *scm;
86663 struct msghdr *msg, async_msg;
86664 struct kiocb *kiocb;
86665-};
86666+} __randomize_layout;
86667
86668 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86669 {
86670@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86671 }
86672
86673 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86674- char __user *from, char *to,
86675+ char __user *from, unsigned char *to,
86676 int copy, int offset)
86677 {
86678 if (skb->ip_summed == CHECKSUM_NONE) {
86679@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86680 }
86681 }
86682
86683-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86684+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86685
86686 /**
86687 * sk_page_frag - return an appropriate page_frag
86688diff --git a/include/net/tcp.h b/include/net/tcp.h
86689index 9d9111e..349c847 100644
86690--- a/include/net/tcp.h
86691+++ b/include/net/tcp.h
86692@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
86693 void tcp_xmit_retransmit_queue(struct sock *);
86694 void tcp_simple_retransmit(struct sock *);
86695 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86696-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86697+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86698
86699 void tcp_send_probe0(struct sock *);
86700 void tcp_send_partial(struct sock *);
86701@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
86702 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
86703 */
86704 struct tcp_skb_cb {
86705- __u32 seq; /* Starting sequence number */
86706- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86707+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86708+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86709 union {
86710 /* Note : tcp_tw_isn is used in input path only
86711 * (isn chosen by tcp_timewait_state_process())
86712@@ -715,7 +715,7 @@ struct tcp_skb_cb {
86713
86714 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86715 /* 1 byte hole */
86716- __u32 ack_seq; /* Sequence number ACK'd */
86717+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86718 union {
86719 struct inet_skb_parm h4;
86720 #if IS_ENABLED(CONFIG_IPV6)
86721diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86722index dc4865e..152ee4c 100644
86723--- a/include/net/xfrm.h
86724+++ b/include/net/xfrm.h
86725@@ -285,7 +285,6 @@ struct xfrm_dst;
86726 struct xfrm_policy_afinfo {
86727 unsigned short family;
86728 struct dst_ops *dst_ops;
86729- void (*garbage_collect)(struct net *net);
86730 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86731 const xfrm_address_t *saddr,
86732 const xfrm_address_t *daddr);
86733@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86734 struct net_device *dev,
86735 const struct flowi *fl);
86736 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86737-};
86738+} __do_const;
86739
86740 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86741 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86742@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86743 int (*transport_finish)(struct sk_buff *skb,
86744 int async);
86745 void (*local_error)(struct sk_buff *skb, u32 mtu);
86746-};
86747+} __do_const;
86748
86749 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86750 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86751@@ -437,7 +436,7 @@ struct xfrm_mode {
86752 struct module *owner;
86753 unsigned int encap;
86754 int flags;
86755-};
86756+} __do_const;
86757
86758 /* Flags for xfrm_mode. */
86759 enum {
86760@@ -534,7 +533,7 @@ struct xfrm_policy {
86761 struct timer_list timer;
86762
86763 struct flow_cache_object flo;
86764- atomic_t genid;
86765+ atomic_unchecked_t genid;
86766 u32 priority;
86767 u32 index;
86768 struct xfrm_mark mark;
86769@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86770 }
86771
86772 void xfrm_garbage_collect(struct net *net);
86773+void xfrm_garbage_collect_deferred(struct net *net);
86774
86775 #else
86776
86777@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86778 static inline void xfrm_garbage_collect(struct net *net)
86779 {
86780 }
86781+static inline void xfrm_garbage_collect_deferred(struct net *net)
86782+{
86783+}
86784 #endif
86785
86786 static __inline__
86787diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86788index 1017e0b..227aa4d 100644
86789--- a/include/rdma/iw_cm.h
86790+++ b/include/rdma/iw_cm.h
86791@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86792 int backlog);
86793
86794 int (*destroy_listen)(struct iw_cm_id *cm_id);
86795-};
86796+} __no_const;
86797
86798 /**
86799 * iw_create_cm_id - Create an IW CM identifier.
86800diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86801index 93d14da..734b3d8 100644
86802--- a/include/scsi/libfc.h
86803+++ b/include/scsi/libfc.h
86804@@ -771,6 +771,7 @@ struct libfc_function_template {
86805 */
86806 void (*disc_stop_final) (struct fc_lport *);
86807 };
86808+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86809
86810 /**
86811 * struct fc_disc - Discovery context
86812@@ -875,7 +876,7 @@ struct fc_lport {
86813 struct fc_vport *vport;
86814
86815 /* Operational Information */
86816- struct libfc_function_template tt;
86817+ libfc_function_template_no_const tt;
86818 u8 link_up;
86819 u8 qfull;
86820 enum fc_lport_state state;
86821diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
86822index 3a4edd1..feb2e3e 100644
86823--- a/include/scsi/scsi_device.h
86824+++ b/include/scsi/scsi_device.h
86825@@ -185,9 +185,9 @@ struct scsi_device {
86826 unsigned int max_device_blocked; /* what device_blocked counts down from */
86827 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
86828
86829- atomic_t iorequest_cnt;
86830- atomic_t iodone_cnt;
86831- atomic_t ioerr_cnt;
86832+ atomic_unchecked_t iorequest_cnt;
86833+ atomic_unchecked_t iodone_cnt;
86834+ atomic_unchecked_t ioerr_cnt;
86835
86836 struct device sdev_gendev,
86837 sdev_dev;
86838diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
86839index 007a0bc..7188db8 100644
86840--- a/include/scsi/scsi_transport_fc.h
86841+++ b/include/scsi/scsi_transport_fc.h
86842@@ -756,7 +756,8 @@ struct fc_function_template {
86843 unsigned long show_host_system_hostname:1;
86844
86845 unsigned long disable_target_scan:1;
86846-};
86847+} __do_const;
86848+typedef struct fc_function_template __no_const fc_function_template_no_const;
86849
86850
86851 /**
86852diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
86853index 396e8f7..b037e89 100644
86854--- a/include/sound/compress_driver.h
86855+++ b/include/sound/compress_driver.h
86856@@ -129,7 +129,7 @@ struct snd_compr_ops {
86857 struct snd_compr_caps *caps);
86858 int (*get_codec_caps) (struct snd_compr_stream *stream,
86859 struct snd_compr_codec_caps *codec);
86860-};
86861+} __no_const;
86862
86863 /**
86864 * struct snd_compr: Compressed device
86865diff --git a/include/sound/soc.h b/include/sound/soc.h
86866index ac8b333..59c3692 100644
86867--- a/include/sound/soc.h
86868+++ b/include/sound/soc.h
86869@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
86870 enum snd_soc_dapm_type, int);
86871
86872 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
86873-};
86874+} __do_const;
86875
86876 /* SoC platform interface */
86877 struct snd_soc_platform_driver {
86878@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
86879 const struct snd_compr_ops *compr_ops;
86880
86881 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
86882-};
86883+} __do_const;
86884
86885 struct snd_soc_dai_link_component {
86886 const char *name;
86887diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
86888index 4a8795a..53d8119 100644
86889--- a/include/target/target_core_base.h
86890+++ b/include/target/target_core_base.h
86891@@ -767,7 +767,7 @@ struct se_device {
86892 atomic_long_t write_bytes;
86893 /* Active commands on this virtual SE device */
86894 atomic_t simple_cmds;
86895- atomic_t dev_ordered_id;
86896+ atomic_unchecked_t dev_ordered_id;
86897 atomic_t dev_ordered_sync;
86898 atomic_t dev_qf_count;
86899 int export_count;
86900diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
86901new file mode 100644
86902index 0000000..fb634b7
86903--- /dev/null
86904+++ b/include/trace/events/fs.h
86905@@ -0,0 +1,53 @@
86906+#undef TRACE_SYSTEM
86907+#define TRACE_SYSTEM fs
86908+
86909+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
86910+#define _TRACE_FS_H
86911+
86912+#include <linux/fs.h>
86913+#include <linux/tracepoint.h>
86914+
86915+TRACE_EVENT(do_sys_open,
86916+
86917+ TP_PROTO(const char *filename, int flags, int mode),
86918+
86919+ TP_ARGS(filename, flags, mode),
86920+
86921+ TP_STRUCT__entry(
86922+ __string( filename, filename )
86923+ __field( int, flags )
86924+ __field( int, mode )
86925+ ),
86926+
86927+ TP_fast_assign(
86928+ __assign_str(filename, filename);
86929+ __entry->flags = flags;
86930+ __entry->mode = mode;
86931+ ),
86932+
86933+ TP_printk("\"%s\" %x %o",
86934+ __get_str(filename), __entry->flags, __entry->mode)
86935+);
86936+
86937+TRACE_EVENT(open_exec,
86938+
86939+ TP_PROTO(const char *filename),
86940+
86941+ TP_ARGS(filename),
86942+
86943+ TP_STRUCT__entry(
86944+ __string( filename, filename )
86945+ ),
86946+
86947+ TP_fast_assign(
86948+ __assign_str(filename, filename);
86949+ ),
86950+
86951+ TP_printk("\"%s\"",
86952+ __get_str(filename))
86953+);
86954+
86955+#endif /* _TRACE_FS_H */
86956+
86957+/* This part must be outside protection */
86958+#include <trace/define_trace.h>
86959diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
86960index 3608beb..df39d8a 100644
86961--- a/include/trace/events/irq.h
86962+++ b/include/trace/events/irq.h
86963@@ -36,7 +36,7 @@ struct softirq_action;
86964 */
86965 TRACE_EVENT(irq_handler_entry,
86966
86967- TP_PROTO(int irq, struct irqaction *action),
86968+ TP_PROTO(int irq, const struct irqaction *action),
86969
86970 TP_ARGS(irq, action),
86971
86972@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
86973 */
86974 TRACE_EVENT(irq_handler_exit,
86975
86976- TP_PROTO(int irq, struct irqaction *action, int ret),
86977+ TP_PROTO(int irq, const struct irqaction *action, int ret),
86978
86979 TP_ARGS(irq, action, ret),
86980
86981diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
86982index 7caf44c..23c6f27 100644
86983--- a/include/uapi/linux/a.out.h
86984+++ b/include/uapi/linux/a.out.h
86985@@ -39,6 +39,14 @@ enum machine_type {
86986 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
86987 };
86988
86989+/* Constants for the N_FLAGS field */
86990+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
86991+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
86992+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
86993+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
86994+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
86995+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
86996+
86997 #if !defined (N_MAGIC)
86998 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
86999 #endif
87000diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87001index 22b6ad3..aeba37e 100644
87002--- a/include/uapi/linux/bcache.h
87003+++ b/include/uapi/linux/bcache.h
87004@@ -5,6 +5,7 @@
87005 * Bcache on disk data structures
87006 */
87007
87008+#include <linux/compiler.h>
87009 #include <asm/types.h>
87010
87011 #define BITMASK(name, type, field, offset, size) \
87012@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87013 /* Btree keys - all units are in sectors */
87014
87015 struct bkey {
87016- __u64 high;
87017- __u64 low;
87018+ __u64 high __intentional_overflow(-1);
87019+ __u64 low __intentional_overflow(-1);
87020 __u64 ptr[];
87021 };
87022
87023diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87024index d876736..ccce5c0 100644
87025--- a/include/uapi/linux/byteorder/little_endian.h
87026+++ b/include/uapi/linux/byteorder/little_endian.h
87027@@ -42,51 +42,51 @@
87028
87029 static inline __le64 __cpu_to_le64p(const __u64 *p)
87030 {
87031- return (__force __le64)*p;
87032+ return (__force const __le64)*p;
87033 }
87034-static inline __u64 __le64_to_cpup(const __le64 *p)
87035+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87036 {
87037- return (__force __u64)*p;
87038+ return (__force const __u64)*p;
87039 }
87040 static inline __le32 __cpu_to_le32p(const __u32 *p)
87041 {
87042- return (__force __le32)*p;
87043+ return (__force const __le32)*p;
87044 }
87045 static inline __u32 __le32_to_cpup(const __le32 *p)
87046 {
87047- return (__force __u32)*p;
87048+ return (__force const __u32)*p;
87049 }
87050 static inline __le16 __cpu_to_le16p(const __u16 *p)
87051 {
87052- return (__force __le16)*p;
87053+ return (__force const __le16)*p;
87054 }
87055 static inline __u16 __le16_to_cpup(const __le16 *p)
87056 {
87057- return (__force __u16)*p;
87058+ return (__force const __u16)*p;
87059 }
87060 static inline __be64 __cpu_to_be64p(const __u64 *p)
87061 {
87062- return (__force __be64)__swab64p(p);
87063+ return (__force const __be64)__swab64p(p);
87064 }
87065 static inline __u64 __be64_to_cpup(const __be64 *p)
87066 {
87067- return __swab64p((__u64 *)p);
87068+ return __swab64p((const __u64 *)p);
87069 }
87070 static inline __be32 __cpu_to_be32p(const __u32 *p)
87071 {
87072- return (__force __be32)__swab32p(p);
87073+ return (__force const __be32)__swab32p(p);
87074 }
87075-static inline __u32 __be32_to_cpup(const __be32 *p)
87076+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87077 {
87078- return __swab32p((__u32 *)p);
87079+ return __swab32p((const __u32 *)p);
87080 }
87081 static inline __be16 __cpu_to_be16p(const __u16 *p)
87082 {
87083- return (__force __be16)__swab16p(p);
87084+ return (__force const __be16)__swab16p(p);
87085 }
87086 static inline __u16 __be16_to_cpup(const __be16 *p)
87087 {
87088- return __swab16p((__u16 *)p);
87089+ return __swab16p((const __u16 *)p);
87090 }
87091 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87092 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87093diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87094index 71e1d0e..6cc9caf 100644
87095--- a/include/uapi/linux/elf.h
87096+++ b/include/uapi/linux/elf.h
87097@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87098 #define PT_GNU_EH_FRAME 0x6474e550
87099
87100 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87101+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87102+
87103+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87104+
87105+/* Constants for the e_flags field */
87106+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87107+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87108+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87109+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87110+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87111+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87112
87113 /*
87114 * Extended Numbering
87115@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87116 #define DT_DEBUG 21
87117 #define DT_TEXTREL 22
87118 #define DT_JMPREL 23
87119+#define DT_FLAGS 30
87120+ #define DF_TEXTREL 0x00000004
87121 #define DT_ENCODING 32
87122 #define OLD_DT_LOOS 0x60000000
87123 #define DT_LOOS 0x6000000d
87124@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87125 #define PF_W 0x2
87126 #define PF_X 0x1
87127
87128+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87129+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87130+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87131+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87132+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87133+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87134+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87135+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87136+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87137+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87138+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87139+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87140+
87141 typedef struct elf32_phdr{
87142 Elf32_Word p_type;
87143 Elf32_Off p_offset;
87144@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87145 #define EI_OSABI 7
87146 #define EI_PAD 8
87147
87148+#define EI_PAX 14
87149+
87150 #define ELFMAG0 0x7f /* EI_MAG */
87151 #define ELFMAG1 'E'
87152 #define ELFMAG2 'L'
87153diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87154index aa169c4..6a2771d 100644
87155--- a/include/uapi/linux/personality.h
87156+++ b/include/uapi/linux/personality.h
87157@@ -30,6 +30,7 @@ enum {
87158 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87159 ADDR_NO_RANDOMIZE | \
87160 ADDR_COMPAT_LAYOUT | \
87161+ ADDR_LIMIT_3GB | \
87162 MMAP_PAGE_ZERO)
87163
87164 /*
87165diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87166index 7530e74..e714828 100644
87167--- a/include/uapi/linux/screen_info.h
87168+++ b/include/uapi/linux/screen_info.h
87169@@ -43,7 +43,8 @@ struct screen_info {
87170 __u16 pages; /* 0x32 */
87171 __u16 vesa_attributes; /* 0x34 */
87172 __u32 capabilities; /* 0x36 */
87173- __u8 _reserved[6]; /* 0x3a */
87174+ __u16 vesapm_size; /* 0x3a */
87175+ __u8 _reserved[4]; /* 0x3c */
87176 } __attribute__((packed));
87177
87178 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87179diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87180index 0e011eb..82681b1 100644
87181--- a/include/uapi/linux/swab.h
87182+++ b/include/uapi/linux/swab.h
87183@@ -43,7 +43,7 @@
87184 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87185 */
87186
87187-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87188+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87189 {
87190 #ifdef __HAVE_BUILTIN_BSWAP16__
87191 return __builtin_bswap16(val);
87192@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87193 #endif
87194 }
87195
87196-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87197+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87198 {
87199 #ifdef __HAVE_BUILTIN_BSWAP32__
87200 return __builtin_bswap32(val);
87201@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87202 #endif
87203 }
87204
87205-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87206+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87207 {
87208 #ifdef __HAVE_BUILTIN_BSWAP64__
87209 return __builtin_bswap64(val);
87210diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87211index 1590c49..5eab462 100644
87212--- a/include/uapi/linux/xattr.h
87213+++ b/include/uapi/linux/xattr.h
87214@@ -73,5 +73,9 @@
87215 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87216 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87217
87218+/* User namespace */
87219+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87220+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87221+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87222
87223 #endif /* _UAPI_LINUX_XATTR_H */
87224diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87225index f9466fa..f4e2b81 100644
87226--- a/include/video/udlfb.h
87227+++ b/include/video/udlfb.h
87228@@ -53,10 +53,10 @@ struct dlfb_data {
87229 u32 pseudo_palette[256];
87230 int blank_mode; /*one of FB_BLANK_ */
87231 /* blit-only rendering path metrics, exposed through sysfs */
87232- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87233- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87234- atomic_t bytes_sent; /* to usb, after compression including overhead */
87235- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87236+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87237+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87238+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87239+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87240 };
87241
87242 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87243diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87244index 30f5362..8ed8ac9 100644
87245--- a/include/video/uvesafb.h
87246+++ b/include/video/uvesafb.h
87247@@ -122,6 +122,7 @@ struct uvesafb_par {
87248 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87249 u8 pmi_setpal; /* PMI for palette changes */
87250 u16 *pmi_base; /* protected mode interface location */
87251+ u8 *pmi_code; /* protected mode code location */
87252 void *pmi_start;
87253 void *pmi_pal;
87254 u8 *vbe_state_orig; /*
87255diff --git a/init/Kconfig b/init/Kconfig
87256index 9afb971..27d6fca 100644
87257--- a/init/Kconfig
87258+++ b/init/Kconfig
87259@@ -1129,6 +1129,7 @@ endif # CGROUPS
87260
87261 config CHECKPOINT_RESTORE
87262 bool "Checkpoint/restore support" if EXPERT
87263+ depends on !GRKERNSEC
87264 default n
87265 help
87266 Enables additional kernel features in a sake of checkpoint/restore.
87267@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
87268
87269 config COMPAT_BRK
87270 bool "Disable heap randomization"
87271- default y
87272+ default n
87273 help
87274 Randomizing heap placement makes heap exploits harder, but it
87275 also breaks ancient binaries (including anything libc5 based).
87276@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
87277 config STOP_MACHINE
87278 bool
87279 default y
87280- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87281+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87282 help
87283 Need stop_machine() primitive.
87284
87285diff --git a/init/Makefile b/init/Makefile
87286index 7bc47ee..6da2dc7 100644
87287--- a/init/Makefile
87288+++ b/init/Makefile
87289@@ -2,6 +2,9 @@
87290 # Makefile for the linux kernel.
87291 #
87292
87293+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87294+asflags-y := $(GCC_PLUGINS_AFLAGS)
87295+
87296 obj-y := main.o version.o mounts.o
87297 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87298 obj-y += noinitramfs.o
87299diff --git a/init/do_mounts.c b/init/do_mounts.c
87300index eb41008..f5dbbf9 100644
87301--- a/init/do_mounts.c
87302+++ b/init/do_mounts.c
87303@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87304 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87305 {
87306 struct super_block *s;
87307- int err = sys_mount(name, "/root", fs, flags, data);
87308+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87309 if (err)
87310 return err;
87311
87312- sys_chdir("/root");
87313+ sys_chdir((const char __force_user *)"/root");
87314 s = current->fs->pwd.dentry->d_sb;
87315 ROOT_DEV = s->s_dev;
87316 printk(KERN_INFO
87317@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87318 va_start(args, fmt);
87319 vsprintf(buf, fmt, args);
87320 va_end(args);
87321- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87322+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87323 if (fd >= 0) {
87324 sys_ioctl(fd, FDEJECT, 0);
87325 sys_close(fd);
87326 }
87327 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87328- fd = sys_open("/dev/console", O_RDWR, 0);
87329+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87330 if (fd >= 0) {
87331 sys_ioctl(fd, TCGETS, (long)&termios);
87332 termios.c_lflag &= ~ICANON;
87333 sys_ioctl(fd, TCSETSF, (long)&termios);
87334- sys_read(fd, &c, 1);
87335+ sys_read(fd, (char __user *)&c, 1);
87336 termios.c_lflag |= ICANON;
87337 sys_ioctl(fd, TCSETSF, (long)&termios);
87338 sys_close(fd);
87339@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87340 mount_root();
87341 out:
87342 devtmpfs_mount("dev");
87343- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87344- sys_chroot(".");
87345+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87346+ sys_chroot((const char __force_user *)".");
87347 }
87348
87349 static bool is_tmpfs;
87350diff --git a/init/do_mounts.h b/init/do_mounts.h
87351index f5b978a..69dbfe8 100644
87352--- a/init/do_mounts.h
87353+++ b/init/do_mounts.h
87354@@ -15,15 +15,15 @@ extern int root_mountflags;
87355
87356 static inline int create_dev(char *name, dev_t dev)
87357 {
87358- sys_unlink(name);
87359- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87360+ sys_unlink((char __force_user *)name);
87361+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87362 }
87363
87364 #if BITS_PER_LONG == 32
87365 static inline u32 bstat(char *name)
87366 {
87367 struct stat64 stat;
87368- if (sys_stat64(name, &stat) != 0)
87369+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87370 return 0;
87371 if (!S_ISBLK(stat.st_mode))
87372 return 0;
87373@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87374 static inline u32 bstat(char *name)
87375 {
87376 struct stat stat;
87377- if (sys_newstat(name, &stat) != 0)
87378+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87379 return 0;
87380 if (!S_ISBLK(stat.st_mode))
87381 return 0;
87382diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87383index 3e0878e..8a9d7a0 100644
87384--- a/init/do_mounts_initrd.c
87385+++ b/init/do_mounts_initrd.c
87386@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87387 {
87388 sys_unshare(CLONE_FS | CLONE_FILES);
87389 /* stdin/stdout/stderr for /linuxrc */
87390- sys_open("/dev/console", O_RDWR, 0);
87391+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87392 sys_dup(0);
87393 sys_dup(0);
87394 /* move initrd over / and chdir/chroot in initrd root */
87395- sys_chdir("/root");
87396- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87397- sys_chroot(".");
87398+ sys_chdir((const char __force_user *)"/root");
87399+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87400+ sys_chroot((const char __force_user *)".");
87401 sys_setsid();
87402 return 0;
87403 }
87404@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87405 create_dev("/dev/root.old", Root_RAM0);
87406 /* mount initrd on rootfs' /root */
87407 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87408- sys_mkdir("/old", 0700);
87409- sys_chdir("/old");
87410+ sys_mkdir((const char __force_user *)"/old", 0700);
87411+ sys_chdir((const char __force_user *)"/old");
87412
87413 /* try loading default modules from initrd */
87414 load_default_modules();
87415@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87416 current->flags &= ~PF_FREEZER_SKIP;
87417
87418 /* move initrd to rootfs' /old */
87419- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87420+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87421 /* switch root and cwd back to / of rootfs */
87422- sys_chroot("..");
87423+ sys_chroot((const char __force_user *)"..");
87424
87425 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87426- sys_chdir("/old");
87427+ sys_chdir((const char __force_user *)"/old");
87428 return;
87429 }
87430
87431- sys_chdir("/");
87432+ sys_chdir((const char __force_user *)"/");
87433 ROOT_DEV = new_decode_dev(real_root_dev);
87434 mount_root();
87435
87436 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87437- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87438+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87439 if (!error)
87440 printk("okay\n");
87441 else {
87442- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87443+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87444 if (error == -ENOENT)
87445 printk("/initrd does not exist. Ignored.\n");
87446 else
87447 printk("failed\n");
87448 printk(KERN_NOTICE "Unmounting old root\n");
87449- sys_umount("/old", MNT_DETACH);
87450+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87451 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87452 if (fd < 0) {
87453 error = fd;
87454@@ -127,11 +127,11 @@ int __init initrd_load(void)
87455 * mounted in the normal path.
87456 */
87457 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87458- sys_unlink("/initrd.image");
87459+ sys_unlink((const char __force_user *)"/initrd.image");
87460 handle_initrd();
87461 return 1;
87462 }
87463 }
87464- sys_unlink("/initrd.image");
87465+ sys_unlink((const char __force_user *)"/initrd.image");
87466 return 0;
87467 }
87468diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87469index 8cb6db5..d729f50 100644
87470--- a/init/do_mounts_md.c
87471+++ b/init/do_mounts_md.c
87472@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87473 partitioned ? "_d" : "", minor,
87474 md_setup_args[ent].device_names);
87475
87476- fd = sys_open(name, 0, 0);
87477+ fd = sys_open((char __force_user *)name, 0, 0);
87478 if (fd < 0) {
87479 printk(KERN_ERR "md: open failed - cannot start "
87480 "array %s\n", name);
87481@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87482 * array without it
87483 */
87484 sys_close(fd);
87485- fd = sys_open(name, 0, 0);
87486+ fd = sys_open((char __force_user *)name, 0, 0);
87487 sys_ioctl(fd, BLKRRPART, 0);
87488 }
87489 sys_close(fd);
87490@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87491
87492 wait_for_device_probe();
87493
87494- fd = sys_open("/dev/md0", 0, 0);
87495+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87496 if (fd >= 0) {
87497 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87498 sys_close(fd);
87499diff --git a/init/init_task.c b/init/init_task.c
87500index ba0a7f36..2bcf1d5 100644
87501--- a/init/init_task.c
87502+++ b/init/init_task.c
87503@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87504 * Initial thread structure. Alignment of this is handled by a special
87505 * linker map entry.
87506 */
87507+#ifdef CONFIG_X86
87508+union thread_union init_thread_union __init_task_data;
87509+#else
87510 union thread_union init_thread_union __init_task_data =
87511 { INIT_THREAD_INFO(init_task) };
87512+#endif
87513diff --git a/init/initramfs.c b/init/initramfs.c
87514index ad1bd77..dca2c1b 100644
87515--- a/init/initramfs.c
87516+++ b/init/initramfs.c
87517@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87518
87519 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87520 while (count) {
87521- ssize_t rv = sys_write(fd, p, count);
87522+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87523
87524 if (rv < 0) {
87525 if (rv == -EINTR || rv == -EAGAIN)
87526@@ -107,7 +107,7 @@ static void __init free_hash(void)
87527 }
87528 }
87529
87530-static long __init do_utime(char *filename, time_t mtime)
87531+static long __init do_utime(char __force_user *filename, time_t mtime)
87532 {
87533 struct timespec t[2];
87534
87535@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87536 struct dir_entry *de, *tmp;
87537 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87538 list_del(&de->list);
87539- do_utime(de->name, de->mtime);
87540+ do_utime((char __force_user *)de->name, de->mtime);
87541 kfree(de->name);
87542 kfree(de);
87543 }
87544@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87545 if (nlink >= 2) {
87546 char *old = find_link(major, minor, ino, mode, collected);
87547 if (old)
87548- return (sys_link(old, collected) < 0) ? -1 : 1;
87549+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87550 }
87551 return 0;
87552 }
87553@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
87554 {
87555 struct stat st;
87556
87557- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
87558+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
87559 if (S_ISDIR(st.st_mode))
87560- sys_rmdir(path);
87561+ sys_rmdir((char __force_user *)path);
87562 else
87563- sys_unlink(path);
87564+ sys_unlink((char __force_user *)path);
87565 }
87566 }
87567
87568@@ -338,7 +338,7 @@ static int __init do_name(void)
87569 int openflags = O_WRONLY|O_CREAT;
87570 if (ml != 1)
87571 openflags |= O_TRUNC;
87572- wfd = sys_open(collected, openflags, mode);
87573+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87574
87575 if (wfd >= 0) {
87576 sys_fchown(wfd, uid, gid);
87577@@ -350,17 +350,17 @@ static int __init do_name(void)
87578 }
87579 }
87580 } else if (S_ISDIR(mode)) {
87581- sys_mkdir(collected, mode);
87582- sys_chown(collected, uid, gid);
87583- sys_chmod(collected, mode);
87584+ sys_mkdir((char __force_user *)collected, mode);
87585+ sys_chown((char __force_user *)collected, uid, gid);
87586+ sys_chmod((char __force_user *)collected, mode);
87587 dir_add(collected, mtime);
87588 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87589 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87590 if (maybe_link() == 0) {
87591- sys_mknod(collected, mode, rdev);
87592- sys_chown(collected, uid, gid);
87593- sys_chmod(collected, mode);
87594- do_utime(collected, mtime);
87595+ sys_mknod((char __force_user *)collected, mode, rdev);
87596+ sys_chown((char __force_user *)collected, uid, gid);
87597+ sys_chmod((char __force_user *)collected, mode);
87598+ do_utime((char __force_user *)collected, mtime);
87599 }
87600 }
87601 return 0;
87602@@ -372,7 +372,7 @@ static int __init do_copy(void)
87603 if (xwrite(wfd, victim, body_len) != body_len)
87604 error("write error");
87605 sys_close(wfd);
87606- do_utime(vcollected, mtime);
87607+ do_utime((char __force_user *)vcollected, mtime);
87608 kfree(vcollected);
87609 eat(body_len);
87610 state = SkipIt;
87611@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87612 {
87613 collected[N_ALIGN(name_len) + body_len] = '\0';
87614 clean_path(collected, 0);
87615- sys_symlink(collected + N_ALIGN(name_len), collected);
87616- sys_lchown(collected, uid, gid);
87617- do_utime(collected, mtime);
87618+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87619+ sys_lchown((char __force_user *)collected, uid, gid);
87620+ do_utime((char __force_user *)collected, mtime);
87621 state = SkipIt;
87622 next_state = Reset;
87623 return 0;
87624diff --git a/init/main.c b/init/main.c
87625index 61b99376..85893612d 100644
87626--- a/init/main.c
87627+++ b/init/main.c
87628@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
87629 static inline void mark_rodata_ro(void) { }
87630 #endif
87631
87632+extern void grsecurity_init(void);
87633+
87634 /*
87635 * Debug helper: via this flag we know that we are in 'early bootup code'
87636 * where only the boot processor is running with IRQ disabled. This means
87637@@ -161,6 +163,75 @@ static int __init set_reset_devices(char *str)
87638
87639 __setup("reset_devices", set_reset_devices);
87640
87641+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87642+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87643+static int __init setup_grsec_proc_gid(char *str)
87644+{
87645+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87646+ return 1;
87647+}
87648+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87649+#endif
87650+
87651+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87652+unsigned long pax_user_shadow_base __read_only;
87653+EXPORT_SYMBOL(pax_user_shadow_base);
87654+extern char pax_enter_kernel_user[];
87655+extern char pax_exit_kernel_user[];
87656+#endif
87657+
87658+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87659+static int __init setup_pax_nouderef(char *str)
87660+{
87661+#ifdef CONFIG_X86_32
87662+ unsigned int cpu;
87663+ struct desc_struct *gdt;
87664+
87665+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87666+ gdt = get_cpu_gdt_table(cpu);
87667+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87668+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87669+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87670+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87671+ }
87672+ loadsegment(ds, __KERNEL_DS);
87673+ loadsegment(es, __KERNEL_DS);
87674+ loadsegment(ss, __KERNEL_DS);
87675+#else
87676+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87677+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87678+ clone_pgd_mask = ~(pgdval_t)0UL;
87679+ pax_user_shadow_base = 0UL;
87680+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87681+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87682+#endif
87683+
87684+ return 0;
87685+}
87686+early_param("pax_nouderef", setup_pax_nouderef);
87687+
87688+#ifdef CONFIG_X86_64
87689+static int __init setup_pax_weakuderef(char *str)
87690+{
87691+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87692+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87693+ return 1;
87694+}
87695+__setup("pax_weakuderef", setup_pax_weakuderef);
87696+#endif
87697+#endif
87698+
87699+#ifdef CONFIG_PAX_SOFTMODE
87700+int pax_softmode;
87701+
87702+static int __init setup_pax_softmode(char *str)
87703+{
87704+ get_option(&str, &pax_softmode);
87705+ return 1;
87706+}
87707+__setup("pax_softmode=", setup_pax_softmode);
87708+#endif
87709+
87710 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87711 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87712 static const char *panic_later, *panic_param;
87713@@ -735,7 +806,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87714 struct blacklist_entry *entry;
87715 char *fn_name;
87716
87717- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87718+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87719 if (!fn_name)
87720 return false;
87721
87722@@ -787,7 +858,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87723 {
87724 int count = preempt_count();
87725 int ret;
87726- char msgbuf[64];
87727+ const char *msg1 = "", *msg2 = "";
87728
87729 if (initcall_blacklisted(fn))
87730 return -EPERM;
87731@@ -797,18 +868,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87732 else
87733 ret = fn();
87734
87735- msgbuf[0] = 0;
87736-
87737 if (preempt_count() != count) {
87738- sprintf(msgbuf, "preemption imbalance ");
87739+ msg1 = " preemption imbalance";
87740 preempt_count_set(count);
87741 }
87742 if (irqs_disabled()) {
87743- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87744+ msg2 = " disabled interrupts";
87745 local_irq_enable();
87746 }
87747- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87748+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87749
87750+ add_latent_entropy();
87751 return ret;
87752 }
87753
87754@@ -914,8 +984,8 @@ static int run_init_process(const char *init_filename)
87755 {
87756 argv_init[0] = init_filename;
87757 return do_execve(getname_kernel(init_filename),
87758- (const char __user *const __user *)argv_init,
87759- (const char __user *const __user *)envp_init);
87760+ (const char __user *const __force_user *)argv_init,
87761+ (const char __user *const __force_user *)envp_init);
87762 }
87763
87764 static int try_to_run_init_process(const char *init_filename)
87765@@ -932,6 +1002,10 @@ static int try_to_run_init_process(const char *init_filename)
87766 return ret;
87767 }
87768
87769+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87770+extern int gr_init_ran;
87771+#endif
87772+
87773 static noinline void __init kernel_init_freeable(void);
87774
87775 static int __ref kernel_init(void *unused)
87776@@ -956,6 +1030,11 @@ static int __ref kernel_init(void *unused)
87777 ramdisk_execute_command, ret);
87778 }
87779
87780+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87781+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87782+ gr_init_ran = 1;
87783+#endif
87784+
87785 /*
87786 * We try each of these until one succeeds.
87787 *
87788@@ -1016,7 +1095,7 @@ static noinline void __init kernel_init_freeable(void)
87789 do_basic_setup();
87790
87791 /* Open the /dev/console on the rootfs, this should never fail */
87792- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
87793+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
87794 pr_err("Warning: unable to open an initial console.\n");
87795
87796 (void) sys_dup(0);
87797@@ -1029,11 +1108,13 @@ static noinline void __init kernel_init_freeable(void)
87798 if (!ramdisk_execute_command)
87799 ramdisk_execute_command = "/init";
87800
87801- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
87802+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
87803 ramdisk_execute_command = NULL;
87804 prepare_namespace();
87805 }
87806
87807+ grsecurity_init();
87808+
87809 /*
87810 * Ok, we have completed the initial bootup, and
87811 * we're essentially up and running. Get rid of the
87812diff --git a/ipc/compat.c b/ipc/compat.c
87813index 9b3c85f..1c4d897 100644
87814--- a/ipc/compat.c
87815+++ b/ipc/compat.c
87816@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
87817 COMPAT_SHMLBA);
87818 if (err < 0)
87819 return err;
87820- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
87821+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
87822 }
87823 case SHMDT:
87824 return sys_shmdt(compat_ptr(ptr));
87825diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
87826index 8ad93c2..efd80f8 100644
87827--- a/ipc/ipc_sysctl.c
87828+++ b/ipc/ipc_sysctl.c
87829@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
87830 static int proc_ipc_dointvec(struct ctl_table *table, int write,
87831 void __user *buffer, size_t *lenp, loff_t *ppos)
87832 {
87833- struct ctl_table ipc_table;
87834+ ctl_table_no_const ipc_table;
87835
87836 memcpy(&ipc_table, table, sizeof(ipc_table));
87837 ipc_table.data = get_ipc(table);
87838@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
87839 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
87840 void __user *buffer, size_t *lenp, loff_t *ppos)
87841 {
87842- struct ctl_table ipc_table;
87843+ ctl_table_no_const ipc_table;
87844
87845 memcpy(&ipc_table, table, sizeof(ipc_table));
87846 ipc_table.data = get_ipc(table);
87847@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
87848 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87849 void __user *buffer, size_t *lenp, loff_t *ppos)
87850 {
87851- struct ctl_table ipc_table;
87852+ ctl_table_no_const ipc_table;
87853 memcpy(&ipc_table, table, sizeof(ipc_table));
87854 ipc_table.data = get_ipc(table);
87855
87856@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87857 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
87858 void __user *buffer, size_t *lenp, loff_t *ppos)
87859 {
87860- struct ctl_table ipc_table;
87861+ ctl_table_no_const ipc_table;
87862 int dummy = 0;
87863
87864 memcpy(&ipc_table, table, sizeof(ipc_table));
87865diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
87866index 68d4e95..1477ded 100644
87867--- a/ipc/mq_sysctl.c
87868+++ b/ipc/mq_sysctl.c
87869@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
87870 static int proc_mq_dointvec(struct ctl_table *table, int write,
87871 void __user *buffer, size_t *lenp, loff_t *ppos)
87872 {
87873- struct ctl_table mq_table;
87874+ ctl_table_no_const mq_table;
87875 memcpy(&mq_table, table, sizeof(mq_table));
87876 mq_table.data = get_mq(table);
87877
87878@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
87879 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
87880 void __user *buffer, size_t *lenp, loff_t *ppos)
87881 {
87882- struct ctl_table mq_table;
87883+ ctl_table_no_const mq_table;
87884 memcpy(&mq_table, table, sizeof(mq_table));
87885 mq_table.data = get_mq(table);
87886
87887diff --git a/ipc/mqueue.c b/ipc/mqueue.c
87888index 7635a1c..7432cb6 100644
87889--- a/ipc/mqueue.c
87890+++ b/ipc/mqueue.c
87891@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
87892 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
87893 info->attr.mq_msgsize);
87894
87895+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
87896 spin_lock(&mq_lock);
87897 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
87898 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
87899diff --git a/ipc/shm.c b/ipc/shm.c
87900index 19633b4..d454904 100644
87901--- a/ipc/shm.c
87902+++ b/ipc/shm.c
87903@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
87904 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
87905 #endif
87906
87907+#ifdef CONFIG_GRKERNSEC
87908+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
87909+ const u64 shm_createtime, const kuid_t cuid,
87910+ const int shmid);
87911+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
87912+ const u64 shm_createtime);
87913+#endif
87914+
87915 void shm_init_ns(struct ipc_namespace *ns)
87916 {
87917 ns->shm_ctlmax = SHMMAX;
87918@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
87919 shp->shm_lprid = 0;
87920 shp->shm_atim = shp->shm_dtim = 0;
87921 shp->shm_ctim = get_seconds();
87922+#ifdef CONFIG_GRKERNSEC
87923+ shp->shm_createtime = ktime_get_ns();
87924+#endif
87925 shp->shm_segsz = size;
87926 shp->shm_nattch = 0;
87927 shp->shm_file = file;
87928@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87929 f_mode = FMODE_READ | FMODE_WRITE;
87930 }
87931 if (shmflg & SHM_EXEC) {
87932+
87933+#ifdef CONFIG_PAX_MPROTECT
87934+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
87935+ goto out;
87936+#endif
87937+
87938 prot |= PROT_EXEC;
87939 acc_mode |= S_IXUGO;
87940 }
87941@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87942 if (err)
87943 goto out_unlock;
87944
87945+#ifdef CONFIG_GRKERNSEC
87946+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
87947+ shp->shm_perm.cuid, shmid) ||
87948+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
87949+ err = -EACCES;
87950+ goto out_unlock;
87951+ }
87952+#endif
87953+
87954 ipc_lock_object(&shp->shm_perm);
87955
87956 /* check if shm_destroy() is tearing down shp */
87957@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87958 path = shp->shm_file->f_path;
87959 path_get(&path);
87960 shp->shm_nattch++;
87961+#ifdef CONFIG_GRKERNSEC
87962+ shp->shm_lapid = current->pid;
87963+#endif
87964 size = i_size_read(path.dentry->d_inode);
87965 ipc_unlock_object(&shp->shm_perm);
87966 rcu_read_unlock();
87967diff --git a/ipc/util.c b/ipc/util.c
87968index 106bed0..f851429 100644
87969--- a/ipc/util.c
87970+++ b/ipc/util.c
87971@@ -71,6 +71,8 @@ struct ipc_proc_iface {
87972 int (*show)(struct seq_file *, void *);
87973 };
87974
87975+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
87976+
87977 /**
87978 * ipc_init - initialise ipc subsystem
87979 *
87980@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
87981 granted_mode >>= 6;
87982 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
87983 granted_mode >>= 3;
87984+
87985+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
87986+ return -1;
87987+
87988 /* is there some bit set in requested_mode but not in granted_mode? */
87989 if ((requested_mode & ~granted_mode & 0007) &&
87990 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
87991diff --git a/kernel/audit.c b/kernel/audit.c
87992index 72ab759..757deba 100644
87993--- a/kernel/audit.c
87994+++ b/kernel/audit.c
87995@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
87996 3) suppressed due to audit_rate_limit
87997 4) suppressed due to audit_backlog_limit
87998 */
87999-static atomic_t audit_lost = ATOMIC_INIT(0);
88000+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88001
88002 /* The netlink socket. */
88003 static struct sock *audit_sock;
88004@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88005 unsigned long now;
88006 int print;
88007
88008- atomic_inc(&audit_lost);
88009+ atomic_inc_unchecked(&audit_lost);
88010
88011 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88012
88013@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88014 if (print) {
88015 if (printk_ratelimit())
88016 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88017- atomic_read(&audit_lost),
88018+ atomic_read_unchecked(&audit_lost),
88019 audit_rate_limit,
88020 audit_backlog_limit);
88021 audit_panic(message);
88022@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88023 s.pid = audit_pid;
88024 s.rate_limit = audit_rate_limit;
88025 s.backlog_limit = audit_backlog_limit;
88026- s.lost = atomic_read(&audit_lost);
88027+ s.lost = atomic_read_unchecked(&audit_lost);
88028 s.backlog = skb_queue_len(&audit_skb_queue);
88029 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88030 s.backlog_wait_time = audit_backlog_wait_time;
88031diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88032index 072566d..1190489 100644
88033--- a/kernel/auditsc.c
88034+++ b/kernel/auditsc.c
88035@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88036 }
88037
88038 /* global counter which is incremented every time something logs in */
88039-static atomic_t session_id = ATOMIC_INIT(0);
88040+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88041
88042 static int audit_set_loginuid_perm(kuid_t loginuid)
88043 {
88044@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
88045
88046 /* are we setting or clearing? */
88047 if (uid_valid(loginuid))
88048- sessionid = (unsigned int)atomic_inc_return(&session_id);
88049+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88050
88051 task->sessionid = sessionid;
88052 task->loginuid = loginuid;
88053diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88054index a64e7a2..2e69448 100644
88055--- a/kernel/bpf/core.c
88056+++ b/kernel/bpf/core.c
88057@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88058 * random section of illegal instructions.
88059 */
88060 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88061- hdr = module_alloc(size);
88062+ hdr = module_alloc_exec(size);
88063 if (hdr == NULL)
88064 return NULL;
88065
88066 /* Fill space with illegal/arch-dep instructions. */
88067 bpf_fill_ill_insns(hdr, size);
88068
88069+ pax_open_kernel();
88070 hdr->pages = size / PAGE_SIZE;
88071+ pax_close_kernel();
88072+
88073 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88074 PAGE_SIZE - sizeof(*hdr));
88075 start = (prandom_u32() % hole) & ~(alignment - 1);
88076@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88077
88078 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88079 {
88080- module_memfree(hdr);
88081+ module_memfree_exec(hdr);
88082 }
88083 #endif /* CONFIG_BPF_JIT */
88084
88085diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88086index 536edc2..d28c85d 100644
88087--- a/kernel/bpf/syscall.c
88088+++ b/kernel/bpf/syscall.c
88089@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88090 int err;
88091
88092 /* the syscall is limited to root temporarily. This restriction will be
88093- * lifted when security audit is clean. Note that eBPF+tracing must have
88094- * this restriction, since it may pass kernel data to user space
88095+ * lifted by upstream when a half-assed security audit is clean. Note
88096+ * that eBPF+tracing must have this restriction, since it may pass
88097+ * kernel data to user space
88098 */
88099 if (!capable(CAP_SYS_ADMIN))
88100 return -EPERM;
88101+#ifdef CONFIG_GRKERNSEC
88102+ return -EPERM;
88103+#endif
88104
88105 if (!access_ok(VERIFY_READ, uattr, 1))
88106 return -EFAULT;
88107diff --git a/kernel/capability.c b/kernel/capability.c
88108index 989f5bf..d317ca0 100644
88109--- a/kernel/capability.c
88110+++ b/kernel/capability.c
88111@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88112 * before modification is attempted and the application
88113 * fails.
88114 */
88115+ if (tocopy > ARRAY_SIZE(kdata))
88116+ return -EFAULT;
88117+
88118 if (copy_to_user(dataptr, kdata, tocopy
88119 * sizeof(struct __user_cap_data_struct))) {
88120 return -EFAULT;
88121@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88122 int ret;
88123
88124 rcu_read_lock();
88125- ret = security_capable(__task_cred(t), ns, cap);
88126+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88127+ gr_task_is_capable(t, __task_cred(t), cap);
88128 rcu_read_unlock();
88129
88130- return (ret == 0);
88131+ return ret;
88132 }
88133
88134 /**
88135@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88136 int ret;
88137
88138 rcu_read_lock();
88139- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88140+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88141 rcu_read_unlock();
88142
88143- return (ret == 0);
88144+ return ret;
88145 }
88146
88147 /**
88148@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88149 BUG();
88150 }
88151
88152- if (security_capable(current_cred(), ns, cap) == 0) {
88153+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88154 current->flags |= PF_SUPERPRIV;
88155 return true;
88156 }
88157@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88158 }
88159 EXPORT_SYMBOL(ns_capable);
88160
88161+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88162+{
88163+ if (unlikely(!cap_valid(cap))) {
88164+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88165+ BUG();
88166+ }
88167+
88168+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88169+ current->flags |= PF_SUPERPRIV;
88170+ return true;
88171+ }
88172+ return false;
88173+}
88174+EXPORT_SYMBOL(ns_capable_nolog);
88175+
88176 /**
88177 * file_ns_capable - Determine if the file's opener had a capability in effect
88178 * @file: The file we want to check
88179@@ -427,6 +446,12 @@ bool capable(int cap)
88180 }
88181 EXPORT_SYMBOL(capable);
88182
88183+bool capable_nolog(int cap)
88184+{
88185+ return ns_capable_nolog(&init_user_ns, cap);
88186+}
88187+EXPORT_SYMBOL(capable_nolog);
88188+
88189 /**
88190 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88191 * @inode: The inode in question
88192@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88193 kgid_has_mapping(ns, inode->i_gid);
88194 }
88195 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88196+
88197+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88198+{
88199+ struct user_namespace *ns = current_user_ns();
88200+
88201+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88202+ kgid_has_mapping(ns, inode->i_gid);
88203+}
88204+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88205diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88206index 04cfe8a..adadcc0 100644
88207--- a/kernel/cgroup.c
88208+++ b/kernel/cgroup.c
88209@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
88210 if (!pathbuf || !agentbuf)
88211 goto out;
88212
88213+ if (agentbuf[0] == '\0')
88214+ goto out;
88215+
88216 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88217 if (!path)
88218 goto out;
88219@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88220 struct task_struct *task;
88221 int count = 0;
88222
88223- seq_printf(seq, "css_set %p\n", cset);
88224+ seq_printf(seq, "css_set %pK\n", cset);
88225
88226 list_for_each_entry(task, &cset->tasks, cg_list) {
88227 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88228diff --git a/kernel/compat.c b/kernel/compat.c
88229index ebb3c36..1df606e 100644
88230--- a/kernel/compat.c
88231+++ b/kernel/compat.c
88232@@ -13,6 +13,7 @@
88233
88234 #include <linux/linkage.h>
88235 #include <linux/compat.h>
88236+#include <linux/module.h>
88237 #include <linux/errno.h>
88238 #include <linux/time.h>
88239 #include <linux/signal.h>
88240@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88241 mm_segment_t oldfs;
88242 long ret;
88243
88244- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88245+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88246 oldfs = get_fs();
88247 set_fs(KERNEL_DS);
88248 ret = hrtimer_nanosleep_restart(restart);
88249@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88250 oldfs = get_fs();
88251 set_fs(KERNEL_DS);
88252 ret = hrtimer_nanosleep(&tu,
88253- rmtp ? (struct timespec __user *)&rmt : NULL,
88254+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88255 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88256 set_fs(oldfs);
88257
88258@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88259 mm_segment_t old_fs = get_fs();
88260
88261 set_fs(KERNEL_DS);
88262- ret = sys_sigpending((old_sigset_t __user *) &s);
88263+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88264 set_fs(old_fs);
88265 if (ret == 0)
88266 ret = put_user(s, set);
88267@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88268 mm_segment_t old_fs = get_fs();
88269
88270 set_fs(KERNEL_DS);
88271- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88272+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88273 set_fs(old_fs);
88274
88275 if (!ret) {
88276@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88277 set_fs (KERNEL_DS);
88278 ret = sys_wait4(pid,
88279 (stat_addr ?
88280- (unsigned int __user *) &status : NULL),
88281- options, (struct rusage __user *) &r);
88282+ (unsigned int __force_user *) &status : NULL),
88283+ options, (struct rusage __force_user *) &r);
88284 set_fs (old_fs);
88285
88286 if (ret > 0) {
88287@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88288 memset(&info, 0, sizeof(info));
88289
88290 set_fs(KERNEL_DS);
88291- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88292- uru ? (struct rusage __user *)&ru : NULL);
88293+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88294+ uru ? (struct rusage __force_user *)&ru : NULL);
88295 set_fs(old_fs);
88296
88297 if ((ret < 0) || (info.si_signo == 0))
88298@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88299 oldfs = get_fs();
88300 set_fs(KERNEL_DS);
88301 err = sys_timer_settime(timer_id, flags,
88302- (struct itimerspec __user *) &newts,
88303- (struct itimerspec __user *) &oldts);
88304+ (struct itimerspec __force_user *) &newts,
88305+ (struct itimerspec __force_user *) &oldts);
88306 set_fs(oldfs);
88307 if (!err && old && put_compat_itimerspec(old, &oldts))
88308 return -EFAULT;
88309@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88310 oldfs = get_fs();
88311 set_fs(KERNEL_DS);
88312 err = sys_timer_gettime(timer_id,
88313- (struct itimerspec __user *) &ts);
88314+ (struct itimerspec __force_user *) &ts);
88315 set_fs(oldfs);
88316 if (!err && put_compat_itimerspec(setting, &ts))
88317 return -EFAULT;
88318@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88319 oldfs = get_fs();
88320 set_fs(KERNEL_DS);
88321 err = sys_clock_settime(which_clock,
88322- (struct timespec __user *) &ts);
88323+ (struct timespec __force_user *) &ts);
88324 set_fs(oldfs);
88325 return err;
88326 }
88327@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88328 oldfs = get_fs();
88329 set_fs(KERNEL_DS);
88330 err = sys_clock_gettime(which_clock,
88331- (struct timespec __user *) &ts);
88332+ (struct timespec __force_user *) &ts);
88333 set_fs(oldfs);
88334 if (!err && compat_put_timespec(&ts, tp))
88335 return -EFAULT;
88336@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88337
88338 oldfs = get_fs();
88339 set_fs(KERNEL_DS);
88340- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88341+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88342 set_fs(oldfs);
88343
88344 err = compat_put_timex(utp, &txc);
88345@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88346 oldfs = get_fs();
88347 set_fs(KERNEL_DS);
88348 err = sys_clock_getres(which_clock,
88349- (struct timespec __user *) &ts);
88350+ (struct timespec __force_user *) &ts);
88351 set_fs(oldfs);
88352 if (!err && tp && compat_put_timespec(&ts, tp))
88353 return -EFAULT;
88354@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88355 struct timespec tu;
88356 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88357
88358- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88359+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88360 oldfs = get_fs();
88361 set_fs(KERNEL_DS);
88362 err = clock_nanosleep_restart(restart);
88363@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88364 oldfs = get_fs();
88365 set_fs(KERNEL_DS);
88366 err = sys_clock_nanosleep(which_clock, flags,
88367- (struct timespec __user *) &in,
88368- (struct timespec __user *) &out);
88369+ (struct timespec __force_user *) &in,
88370+ (struct timespec __force_user *) &out);
88371 set_fs(oldfs);
88372
88373 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88374@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88375 mm_segment_t old_fs = get_fs();
88376
88377 set_fs(KERNEL_DS);
88378- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88379+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88380 set_fs(old_fs);
88381 if (compat_put_timespec(&t, interval))
88382 return -EFAULT;
88383diff --git a/kernel/configs.c b/kernel/configs.c
88384index c18b1f1..b9a0132 100644
88385--- a/kernel/configs.c
88386+++ b/kernel/configs.c
88387@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88388 struct proc_dir_entry *entry;
88389
88390 /* create the current config file */
88391+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88392+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88393+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88394+ &ikconfig_file_ops);
88395+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88396+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88397+ &ikconfig_file_ops);
88398+#endif
88399+#else
88400 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88401 &ikconfig_file_ops);
88402+#endif
88403+
88404 if (!entry)
88405 return -ENOMEM;
88406
88407diff --git a/kernel/cred.c b/kernel/cred.c
88408index e0573a4..26c0fd3 100644
88409--- a/kernel/cred.c
88410+++ b/kernel/cred.c
88411@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88412 validate_creds(cred);
88413 alter_cred_subscribers(cred, -1);
88414 put_cred(cred);
88415+
88416+#ifdef CONFIG_GRKERNSEC_SETXID
88417+ cred = (struct cred *) tsk->delayed_cred;
88418+ if (cred != NULL) {
88419+ tsk->delayed_cred = NULL;
88420+ validate_creds(cred);
88421+ alter_cred_subscribers(cred, -1);
88422+ put_cred(cred);
88423+ }
88424+#endif
88425 }
88426
88427 /**
88428@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88429 * Always returns 0 thus allowing this function to be tail-called at the end
88430 * of, say, sys_setgid().
88431 */
88432-int commit_creds(struct cred *new)
88433+static int __commit_creds(struct cred *new)
88434 {
88435 struct task_struct *task = current;
88436 const struct cred *old = task->real_cred;
88437@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88438
88439 get_cred(new); /* we will require a ref for the subj creds too */
88440
88441+ gr_set_role_label(task, new->uid, new->gid);
88442+
88443 /* dumpability changes */
88444 if (!uid_eq(old->euid, new->euid) ||
88445 !gid_eq(old->egid, new->egid) ||
88446@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88447 put_cred(old);
88448 return 0;
88449 }
88450+#ifdef CONFIG_GRKERNSEC_SETXID
88451+extern int set_user(struct cred *new);
88452+
88453+void gr_delayed_cred_worker(void)
88454+{
88455+ const struct cred *new = current->delayed_cred;
88456+ struct cred *ncred;
88457+
88458+ current->delayed_cred = NULL;
88459+
88460+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88461+ // from doing get_cred on it when queueing this
88462+ put_cred(new);
88463+ return;
88464+ } else if (new == NULL)
88465+ return;
88466+
88467+ ncred = prepare_creds();
88468+ if (!ncred)
88469+ goto die;
88470+ // uids
88471+ ncred->uid = new->uid;
88472+ ncred->euid = new->euid;
88473+ ncred->suid = new->suid;
88474+ ncred->fsuid = new->fsuid;
88475+ // gids
88476+ ncred->gid = new->gid;
88477+ ncred->egid = new->egid;
88478+ ncred->sgid = new->sgid;
88479+ ncred->fsgid = new->fsgid;
88480+ // groups
88481+ set_groups(ncred, new->group_info);
88482+ // caps
88483+ ncred->securebits = new->securebits;
88484+ ncred->cap_inheritable = new->cap_inheritable;
88485+ ncred->cap_permitted = new->cap_permitted;
88486+ ncred->cap_effective = new->cap_effective;
88487+ ncred->cap_bset = new->cap_bset;
88488+
88489+ if (set_user(ncred)) {
88490+ abort_creds(ncred);
88491+ goto die;
88492+ }
88493+
88494+ // from doing get_cred on it when queueing this
88495+ put_cred(new);
88496+
88497+ __commit_creds(ncred);
88498+ return;
88499+die:
88500+ // from doing get_cred on it when queueing this
88501+ put_cred(new);
88502+ do_group_exit(SIGKILL);
88503+}
88504+#endif
88505+
88506+int commit_creds(struct cred *new)
88507+{
88508+#ifdef CONFIG_GRKERNSEC_SETXID
88509+ int ret;
88510+ int schedule_it = 0;
88511+ struct task_struct *t;
88512+ unsigned oldsecurebits = current_cred()->securebits;
88513+
88514+ /* we won't get called with tasklist_lock held for writing
88515+ and interrupts disabled as the cred struct in that case is
88516+ init_cred
88517+ */
88518+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88519+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88520+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88521+ schedule_it = 1;
88522+ }
88523+ ret = __commit_creds(new);
88524+ if (schedule_it) {
88525+ rcu_read_lock();
88526+ read_lock(&tasklist_lock);
88527+ for (t = next_thread(current); t != current;
88528+ t = next_thread(t)) {
88529+ /* we'll check if the thread has uid 0 in
88530+ * the delayed worker routine
88531+ */
88532+ if (task_securebits(t) == oldsecurebits &&
88533+ t->delayed_cred == NULL) {
88534+ t->delayed_cred = get_cred(new);
88535+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88536+ set_tsk_need_resched(t);
88537+ }
88538+ }
88539+ read_unlock(&tasklist_lock);
88540+ rcu_read_unlock();
88541+ }
88542+
88543+ return ret;
88544+#else
88545+ return __commit_creds(new);
88546+#endif
88547+}
88548+
88549 EXPORT_SYMBOL(commit_creds);
88550
88551 /**
88552diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88553index ac5c0f9..4b1c6c2 100644
88554--- a/kernel/debug/debug_core.c
88555+++ b/kernel/debug/debug_core.c
88556@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88557 */
88558 static atomic_t masters_in_kgdb;
88559 static atomic_t slaves_in_kgdb;
88560-static atomic_t kgdb_break_tasklet_var;
88561+static atomic_unchecked_t kgdb_break_tasklet_var;
88562 atomic_t kgdb_setting_breakpoint;
88563
88564 struct task_struct *kgdb_usethread;
88565@@ -137,7 +137,7 @@ int kgdb_single_step;
88566 static pid_t kgdb_sstep_pid;
88567
88568 /* to keep track of the CPU which is doing the single stepping*/
88569-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88570+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88571
88572 /*
88573 * If you are debugging a problem where roundup (the collection of
88574@@ -552,7 +552,7 @@ return_normal:
88575 * kernel will only try for the value of sstep_tries before
88576 * giving up and continuing on.
88577 */
88578- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88579+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88580 (kgdb_info[cpu].task &&
88581 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88582 atomic_set(&kgdb_active, -1);
88583@@ -654,8 +654,8 @@ cpu_master_loop:
88584 }
88585
88586 kgdb_restore:
88587- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88588- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88589+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88590+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88591 if (kgdb_info[sstep_cpu].task)
88592 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88593 else
88594@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
88595 static void kgdb_tasklet_bpt(unsigned long ing)
88596 {
88597 kgdb_breakpoint();
88598- atomic_set(&kgdb_break_tasklet_var, 0);
88599+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88600 }
88601
88602 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88603
88604 void kgdb_schedule_breakpoint(void)
88605 {
88606- if (atomic_read(&kgdb_break_tasklet_var) ||
88607+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88608 atomic_read(&kgdb_active) != -1 ||
88609 atomic_read(&kgdb_setting_breakpoint))
88610 return;
88611- atomic_inc(&kgdb_break_tasklet_var);
88612+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88613 tasklet_schedule(&kgdb_tasklet_breakpoint);
88614 }
88615 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88616diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88617index 60f6bb8..104bb07 100644
88618--- a/kernel/debug/kdb/kdb_main.c
88619+++ b/kernel/debug/kdb/kdb_main.c
88620@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
88621 continue;
88622
88623 kdb_printf("%-20s%8u 0x%p ", mod->name,
88624- mod->core_size, (void *)mod);
88625+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88626 #ifdef CONFIG_MODULE_UNLOAD
88627 kdb_printf("%4d ", module_refcount(mod));
88628 #endif
88629@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
88630 kdb_printf(" (Loading)");
88631 else
88632 kdb_printf(" (Live)");
88633- kdb_printf(" 0x%p", mod->module_core);
88634+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88635
88636 #ifdef CONFIG_MODULE_UNLOAD
88637 {
88638diff --git a/kernel/events/core.c b/kernel/events/core.c
88639index 19efcf133..7c05c93 100644
88640--- a/kernel/events/core.c
88641+++ b/kernel/events/core.c
88642@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
88643 * 0 - disallow raw tracepoint access for unpriv
88644 * 1 - disallow cpu events for unpriv
88645 * 2 - disallow kernel profiling for unpriv
88646+ * 3 - disallow all unpriv perf event use
88647 */
88648-int sysctl_perf_event_paranoid __read_mostly = 1;
88649+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88650+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88651+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88652+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88653+#else
88654+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88655+#endif
88656
88657 /* Minimum for 512 kiB + 1 user control page */
88658 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88659@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
88660
88661 tmp *= sysctl_perf_cpu_time_max_percent;
88662 do_div(tmp, 100);
88663- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88664+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88665 }
88666
88667 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88668@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88669 }
88670 }
88671
88672-static atomic64_t perf_event_id;
88673+static atomic64_unchecked_t perf_event_id;
88674
88675 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88676 enum event_type_t event_type);
88677@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
88678
88679 static inline u64 perf_event_count(struct perf_event *event)
88680 {
88681- return local64_read(&event->count) + atomic64_read(&event->child_count);
88682+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88683 }
88684
88685 static u64 perf_event_read(struct perf_event *event)
88686@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88687 mutex_lock(&event->child_mutex);
88688 total += perf_event_read(event);
88689 *enabled += event->total_time_enabled +
88690- atomic64_read(&event->child_total_time_enabled);
88691+ atomic64_read_unchecked(&event->child_total_time_enabled);
88692 *running += event->total_time_running +
88693- atomic64_read(&event->child_total_time_running);
88694+ atomic64_read_unchecked(&event->child_total_time_running);
88695
88696 list_for_each_entry(child, &event->child_list, child_list) {
88697 total += perf_event_read(child);
88698@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
88699 userpg->offset -= local64_read(&event->hw.prev_count);
88700
88701 userpg->time_enabled = enabled +
88702- atomic64_read(&event->child_total_time_enabled);
88703+ atomic64_read_unchecked(&event->child_total_time_enabled);
88704
88705 userpg->time_running = running +
88706- atomic64_read(&event->child_total_time_running);
88707+ atomic64_read_unchecked(&event->child_total_time_running);
88708
88709 arch_perf_update_userpage(userpg, now);
88710
88711@@ -4568,7 +4575,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88712
88713 /* Data. */
88714 sp = perf_user_stack_pointer(regs);
88715- rem = __output_copy_user(handle, (void *) sp, dump_size);
88716+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88717 dyn_size = dump_size - rem;
88718
88719 perf_output_skip(handle, rem);
88720@@ -4659,11 +4666,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88721 values[n++] = perf_event_count(event);
88722 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88723 values[n++] = enabled +
88724- atomic64_read(&event->child_total_time_enabled);
88725+ atomic64_read_unchecked(&event->child_total_time_enabled);
88726 }
88727 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
88728 values[n++] = running +
88729- atomic64_read(&event->child_total_time_running);
88730+ atomic64_read_unchecked(&event->child_total_time_running);
88731 }
88732 if (read_format & PERF_FORMAT_ID)
88733 values[n++] = primary_event_id(event);
88734@@ -6994,7 +7001,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
88735 event->parent = parent_event;
88736
88737 event->ns = get_pid_ns(task_active_pid_ns(current));
88738- event->id = atomic64_inc_return(&perf_event_id);
88739+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
88740
88741 event->state = PERF_EVENT_STATE_INACTIVE;
88742
88743@@ -7275,6 +7282,11 @@ SYSCALL_DEFINE5(perf_event_open,
88744 if (flags & ~PERF_FLAG_ALL)
88745 return -EINVAL;
88746
88747+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88748+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
88749+ return -EACCES;
88750+#endif
88751+
88752 err = perf_copy_attr(attr_uptr, &attr);
88753 if (err)
88754 return err;
88755@@ -7642,10 +7654,10 @@ static void sync_child_event(struct perf_event *child_event,
88756 /*
88757 * Add back the child's count to the parent's count:
88758 */
88759- atomic64_add(child_val, &parent_event->child_count);
88760- atomic64_add(child_event->total_time_enabled,
88761+ atomic64_add_unchecked(child_val, &parent_event->child_count);
88762+ atomic64_add_unchecked(child_event->total_time_enabled,
88763 &parent_event->child_total_time_enabled);
88764- atomic64_add(child_event->total_time_running,
88765+ atomic64_add_unchecked(child_event->total_time_running,
88766 &parent_event->child_total_time_running);
88767
88768 /*
88769diff --git a/kernel/events/internal.h b/kernel/events/internal.h
88770index 569b2187..19940d9 100644
88771--- a/kernel/events/internal.h
88772+++ b/kernel/events/internal.h
88773@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
88774 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
88775 }
88776
88777-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
88778+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
88779 static inline unsigned long \
88780 func_name(struct perf_output_handle *handle, \
88781- const void *buf, unsigned long len) \
88782+ const void user *buf, unsigned long len) \
88783 { \
88784 unsigned long size, written; \
88785 \
88786@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
88787 return 0;
88788 }
88789
88790-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
88791+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
88792
88793 static inline unsigned long
88794 memcpy_skip(void *dst, const void *src, unsigned long n)
88795@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
88796 return 0;
88797 }
88798
88799-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
88800+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
88801
88802 #ifndef arch_perf_out_copy_user
88803 #define arch_perf_out_copy_user arch_perf_out_copy_user
88804@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
88805 }
88806 #endif
88807
88808-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
88809+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
88810
88811 /* Callchain handling */
88812 extern struct perf_callchain_entry *
88813diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
88814index cb346f2..e4dc317 100644
88815--- a/kernel/events/uprobes.c
88816+++ b/kernel/events/uprobes.c
88817@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
88818 {
88819 struct page *page;
88820 uprobe_opcode_t opcode;
88821- int result;
88822+ long result;
88823
88824 pagefault_disable();
88825 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
88826diff --git a/kernel/exit.c b/kernel/exit.c
88827index 6806c55..a5fb128 100644
88828--- a/kernel/exit.c
88829+++ b/kernel/exit.c
88830@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
88831 struct task_struct *leader;
88832 int zap_leader;
88833 repeat:
88834+#ifdef CONFIG_NET
88835+ gr_del_task_from_ip_table(p);
88836+#endif
88837+
88838 /* don't need to get the RCU readlock here - the process is dead and
88839 * can't be modifying its own credentials. But shut RCU-lockdep up */
88840 rcu_read_lock();
88841@@ -655,6 +659,8 @@ void do_exit(long code)
88842 int group_dead;
88843 TASKS_RCU(int tasks_rcu_i);
88844
88845+ set_fs(USER_DS);
88846+
88847 profile_task_exit(tsk);
88848
88849 WARN_ON(blk_needs_flush_plug(tsk));
88850@@ -671,7 +677,6 @@ void do_exit(long code)
88851 * mm_release()->clear_child_tid() from writing to a user-controlled
88852 * kernel address.
88853 */
88854- set_fs(USER_DS);
88855
88856 ptrace_event(PTRACE_EVENT_EXIT, code);
88857
88858@@ -729,6 +734,9 @@ void do_exit(long code)
88859 tsk->exit_code = code;
88860 taskstats_exit(tsk, group_dead);
88861
88862+ gr_acl_handle_psacct(tsk, code);
88863+ gr_acl_handle_exit();
88864+
88865 exit_mm(tsk);
88866
88867 if (group_dead)
88868@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
88869 * Take down every thread in the group. This is called by fatal signals
88870 * as well as by sys_exit_group (below).
88871 */
88872-void
88873+__noreturn void
88874 do_group_exit(int exit_code)
88875 {
88876 struct signal_struct *sig = current->signal;
88877diff --git a/kernel/fork.c b/kernel/fork.c
88878index 4dc2dda..651add0 100644
88879--- a/kernel/fork.c
88880+++ b/kernel/fork.c
88881@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
88882 void thread_info_cache_init(void)
88883 {
88884 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
88885- THREAD_SIZE, 0, NULL);
88886+ THREAD_SIZE, SLAB_USERCOPY, NULL);
88887 BUG_ON(thread_info_cache == NULL);
88888 }
88889 # endif
88890 #endif
88891
88892+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88893+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88894+ int node, void **lowmem_stack)
88895+{
88896+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
88897+ void *ret = NULL;
88898+ unsigned int i;
88899+
88900+ *lowmem_stack = alloc_thread_info_node(tsk, node);
88901+ if (*lowmem_stack == NULL)
88902+ goto out;
88903+
88904+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
88905+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
88906+
88907+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
88908+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
88909+ if (ret == NULL) {
88910+ free_thread_info(*lowmem_stack);
88911+ *lowmem_stack = NULL;
88912+ }
88913+
88914+out:
88915+ return ret;
88916+}
88917+
88918+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
88919+{
88920+ unmap_process_stacks(tsk);
88921+}
88922+#else
88923+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88924+ int node, void **lowmem_stack)
88925+{
88926+ return alloc_thread_info_node(tsk, node);
88927+}
88928+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
88929+{
88930+ free_thread_info(ti);
88931+}
88932+#endif
88933+
88934 /* SLAB cache for signal_struct structures (tsk->signal) */
88935 static struct kmem_cache *signal_cachep;
88936
88937@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
88938 /* SLAB cache for mm_struct structures (tsk->mm) */
88939 static struct kmem_cache *mm_cachep;
88940
88941-static void account_kernel_stack(struct thread_info *ti, int account)
88942+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
88943 {
88944+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88945+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
88946+#else
88947 struct zone *zone = page_zone(virt_to_page(ti));
88948+#endif
88949
88950 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
88951 }
88952
88953 void free_task(struct task_struct *tsk)
88954 {
88955- account_kernel_stack(tsk->stack, -1);
88956+ account_kernel_stack(tsk, tsk->stack, -1);
88957 arch_release_thread_info(tsk->stack);
88958- free_thread_info(tsk->stack);
88959+ gr_free_thread_info(tsk, tsk->stack);
88960 rt_mutex_debug_task_free(tsk);
88961 ftrace_graph_exit_task(tsk);
88962 put_seccomp_filter(tsk);
88963@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88964 {
88965 struct task_struct *tsk;
88966 struct thread_info *ti;
88967+ void *lowmem_stack;
88968 int node = tsk_fork_get_node(orig);
88969 int err;
88970
88971@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88972 if (!tsk)
88973 return NULL;
88974
88975- ti = alloc_thread_info_node(tsk, node);
88976+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
88977 if (!ti)
88978 goto free_tsk;
88979
88980@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88981 goto free_ti;
88982
88983 tsk->stack = ti;
88984+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88985+ tsk->lowmem_stack = lowmem_stack;
88986+#endif
88987 #ifdef CONFIG_SECCOMP
88988 /*
88989 * We must handle setting up seccomp filters once we're under
88990@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88991 set_task_stack_end_magic(tsk);
88992
88993 #ifdef CONFIG_CC_STACKPROTECTOR
88994- tsk->stack_canary = get_random_int();
88995+ tsk->stack_canary = pax_get_random_long();
88996 #endif
88997
88998 /*
88999@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89000 tsk->splice_pipe = NULL;
89001 tsk->task_frag.page = NULL;
89002
89003- account_kernel_stack(ti, 1);
89004+ account_kernel_stack(tsk, ti, 1);
89005
89006 return tsk;
89007
89008 free_ti:
89009- free_thread_info(ti);
89010+ gr_free_thread_info(tsk, ti);
89011 free_tsk:
89012 free_task_struct(tsk);
89013 return NULL;
89014 }
89015
89016 #ifdef CONFIG_MMU
89017-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89018+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89019+{
89020+ struct vm_area_struct *tmp;
89021+ unsigned long charge;
89022+ struct file *file;
89023+ int retval;
89024+
89025+ charge = 0;
89026+ if (mpnt->vm_flags & VM_ACCOUNT) {
89027+ unsigned long len = vma_pages(mpnt);
89028+
89029+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89030+ goto fail_nomem;
89031+ charge = len;
89032+ }
89033+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89034+ if (!tmp)
89035+ goto fail_nomem;
89036+ *tmp = *mpnt;
89037+ tmp->vm_mm = mm;
89038+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89039+ retval = vma_dup_policy(mpnt, tmp);
89040+ if (retval)
89041+ goto fail_nomem_policy;
89042+ if (anon_vma_fork(tmp, mpnt))
89043+ goto fail_nomem_anon_vma_fork;
89044+ tmp->vm_flags &= ~VM_LOCKED;
89045+ tmp->vm_next = tmp->vm_prev = NULL;
89046+ tmp->vm_mirror = NULL;
89047+ file = tmp->vm_file;
89048+ if (file) {
89049+ struct inode *inode = file_inode(file);
89050+ struct address_space *mapping = file->f_mapping;
89051+
89052+ get_file(file);
89053+ if (tmp->vm_flags & VM_DENYWRITE)
89054+ atomic_dec(&inode->i_writecount);
89055+ i_mmap_lock_write(mapping);
89056+ if (tmp->vm_flags & VM_SHARED)
89057+ atomic_inc(&mapping->i_mmap_writable);
89058+ flush_dcache_mmap_lock(mapping);
89059+ /* insert tmp into the share list, just after mpnt */
89060+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89061+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89062+ else
89063+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89064+ flush_dcache_mmap_unlock(mapping);
89065+ i_mmap_unlock_write(mapping);
89066+ }
89067+
89068+ /*
89069+ * Clear hugetlb-related page reserves for children. This only
89070+ * affects MAP_PRIVATE mappings. Faults generated by the child
89071+ * are not guaranteed to succeed, even if read-only
89072+ */
89073+ if (is_vm_hugetlb_page(tmp))
89074+ reset_vma_resv_huge_pages(tmp);
89075+
89076+ return tmp;
89077+
89078+fail_nomem_anon_vma_fork:
89079+ mpol_put(vma_policy(tmp));
89080+fail_nomem_policy:
89081+ kmem_cache_free(vm_area_cachep, tmp);
89082+fail_nomem:
89083+ vm_unacct_memory(charge);
89084+ return NULL;
89085+}
89086+
89087+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89088 {
89089 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89090 struct rb_node **rb_link, *rb_parent;
89091 int retval;
89092- unsigned long charge;
89093
89094 uprobe_start_dup_mmap();
89095 down_write(&oldmm->mmap_sem);
89096@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89097
89098 prev = NULL;
89099 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89100- struct file *file;
89101-
89102 if (mpnt->vm_flags & VM_DONTCOPY) {
89103 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89104 -vma_pages(mpnt));
89105 continue;
89106 }
89107- charge = 0;
89108- if (mpnt->vm_flags & VM_ACCOUNT) {
89109- unsigned long len = vma_pages(mpnt);
89110-
89111- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89112- goto fail_nomem;
89113- charge = len;
89114- }
89115- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89116- if (!tmp)
89117- goto fail_nomem;
89118- *tmp = *mpnt;
89119- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89120- retval = vma_dup_policy(mpnt, tmp);
89121- if (retval)
89122- goto fail_nomem_policy;
89123- tmp->vm_mm = mm;
89124- if (anon_vma_fork(tmp, mpnt))
89125- goto fail_nomem_anon_vma_fork;
89126- tmp->vm_flags &= ~VM_LOCKED;
89127- tmp->vm_next = tmp->vm_prev = NULL;
89128- file = tmp->vm_file;
89129- if (file) {
89130- struct inode *inode = file_inode(file);
89131- struct address_space *mapping = file->f_mapping;
89132-
89133- get_file(file);
89134- if (tmp->vm_flags & VM_DENYWRITE)
89135- atomic_dec(&inode->i_writecount);
89136- i_mmap_lock_write(mapping);
89137- if (tmp->vm_flags & VM_SHARED)
89138- atomic_inc(&mapping->i_mmap_writable);
89139- flush_dcache_mmap_lock(mapping);
89140- /* insert tmp into the share list, just after mpnt */
89141- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89142- vma_nonlinear_insert(tmp,
89143- &mapping->i_mmap_nonlinear);
89144- else
89145- vma_interval_tree_insert_after(tmp, mpnt,
89146- &mapping->i_mmap);
89147- flush_dcache_mmap_unlock(mapping);
89148- i_mmap_unlock_write(mapping);
89149+ tmp = dup_vma(mm, oldmm, mpnt);
89150+ if (!tmp) {
89151+ retval = -ENOMEM;
89152+ goto out;
89153 }
89154
89155 /*
89156@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89157 if (retval)
89158 goto out;
89159 }
89160+
89161+#ifdef CONFIG_PAX_SEGMEXEC
89162+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89163+ struct vm_area_struct *mpnt_m;
89164+
89165+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89166+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89167+
89168+ if (!mpnt->vm_mirror)
89169+ continue;
89170+
89171+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89172+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89173+ mpnt->vm_mirror = mpnt_m;
89174+ } else {
89175+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89176+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89177+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89178+ mpnt->vm_mirror->vm_mirror = mpnt;
89179+ }
89180+ }
89181+ BUG_ON(mpnt_m);
89182+ }
89183+#endif
89184+
89185 /* a new mm has just been created */
89186 arch_dup_mmap(oldmm, mm);
89187 retval = 0;
89188@@ -486,14 +589,6 @@ out:
89189 up_write(&oldmm->mmap_sem);
89190 uprobe_end_dup_mmap();
89191 return retval;
89192-fail_nomem_anon_vma_fork:
89193- mpol_put(vma_policy(tmp));
89194-fail_nomem_policy:
89195- kmem_cache_free(vm_area_cachep, tmp);
89196-fail_nomem:
89197- retval = -ENOMEM;
89198- vm_unacct_memory(charge);
89199- goto out;
89200 }
89201
89202 static inline int mm_alloc_pgd(struct mm_struct *mm)
89203@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89204 return ERR_PTR(err);
89205
89206 mm = get_task_mm(task);
89207- if (mm && mm != current->mm &&
89208- !ptrace_may_access(task, mode)) {
89209+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89210+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89211 mmput(mm);
89212 mm = ERR_PTR(-EACCES);
89213 }
89214@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89215 spin_unlock(&fs->lock);
89216 return -EAGAIN;
89217 }
89218- fs->users++;
89219+ atomic_inc(&fs->users);
89220 spin_unlock(&fs->lock);
89221 return 0;
89222 }
89223 tsk->fs = copy_fs_struct(fs);
89224 if (!tsk->fs)
89225 return -ENOMEM;
89226+ /* Carry through gr_chroot_dentry and is_chrooted instead
89227+ of recomputing it here. Already copied when the task struct
89228+ is duplicated. This allows pivot_root to not be treated as
89229+ a chroot
89230+ */
89231+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89232+
89233 return 0;
89234 }
89235
89236@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89237 * parts of the process environment (as per the clone
89238 * flags). The actual kick-off is left to the caller.
89239 */
89240-static struct task_struct *copy_process(unsigned long clone_flags,
89241+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89242 unsigned long stack_start,
89243 unsigned long stack_size,
89244 int __user *child_tidptr,
89245@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89246 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89247 #endif
89248 retval = -EAGAIN;
89249+
89250+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89251+
89252 if (atomic_read(&p->real_cred->user->processes) >=
89253 task_rlimit(p, RLIMIT_NPROC)) {
89254 if (p->real_cred->user != INIT_USER &&
89255@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89256 goto bad_fork_free_pid;
89257 }
89258
89259+ /* synchronizes with gr_set_acls()
89260+ we need to call this past the point of no return for fork()
89261+ */
89262+ gr_copy_label(p);
89263+
89264 if (likely(p->pid)) {
89265 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89266
89267@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
89268 bad_fork_free:
89269 free_task(p);
89270 fork_out:
89271+ gr_log_forkfail(retval);
89272+
89273 return ERR_PTR(retval);
89274 }
89275
89276@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
89277
89278 p = copy_process(clone_flags, stack_start, stack_size,
89279 child_tidptr, NULL, trace);
89280+ add_latent_entropy();
89281 /*
89282 * Do this prior waking up the new thread - the thread pointer
89283 * might get invalid after that point, if the thread exits quickly.
89284@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
89285 if (clone_flags & CLONE_PARENT_SETTID)
89286 put_user(nr, parent_tidptr);
89287
89288+ gr_handle_brute_check();
89289+
89290 if (clone_flags & CLONE_VFORK) {
89291 p->vfork_done = &vfork;
89292 init_completion(&vfork);
89293@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
89294 mm_cachep = kmem_cache_create("mm_struct",
89295 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89296 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89297- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89298+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89299 mmap_init();
89300 nsproxy_cache_init();
89301 }
89302@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89303 return 0;
89304
89305 /* don't need lock here; in the worst case we'll do useless copy */
89306- if (fs->users == 1)
89307+ if (atomic_read(&fs->users) == 1)
89308 return 0;
89309
89310 *new_fsp = copy_fs_struct(fs);
89311@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89312 fs = current->fs;
89313 spin_lock(&fs->lock);
89314 current->fs = new_fs;
89315- if (--fs->users)
89316+ gr_set_chroot_entries(current, &current->fs->root);
89317+ if (atomic_dec_return(&fs->users))
89318 new_fs = NULL;
89319 else
89320 new_fs = fs;
89321diff --git a/kernel/futex.c b/kernel/futex.c
89322index 63678b5..512f9af 100644
89323--- a/kernel/futex.c
89324+++ b/kernel/futex.c
89325@@ -201,7 +201,7 @@ struct futex_pi_state {
89326 atomic_t refcount;
89327
89328 union futex_key key;
89329-};
89330+} __randomize_layout;
89331
89332 /**
89333 * struct futex_q - The hashed futex queue entry, one per waiting task
89334@@ -235,7 +235,7 @@ struct futex_q {
89335 struct rt_mutex_waiter *rt_waiter;
89336 union futex_key *requeue_pi_key;
89337 u32 bitset;
89338-};
89339+} __randomize_layout;
89340
89341 static const struct futex_q futex_q_init = {
89342 /* list gets initialized in queue_me()*/
89343@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89344 struct page *page, *page_head;
89345 int err, ro = 0;
89346
89347+#ifdef CONFIG_PAX_SEGMEXEC
89348+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89349+ return -EFAULT;
89350+#endif
89351+
89352 /*
89353 * The futex address must be "naturally" aligned.
89354 */
89355@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89356
89357 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89358 {
89359- int ret;
89360+ unsigned long ret;
89361
89362 pagefault_disable();
89363 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89364@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
89365 {
89366 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89367 u32 curval;
89368+ mm_segment_t oldfs;
89369
89370 /*
89371 * This will fail and we want it. Some arch implementations do
89372@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
89373 * implementation, the non-functional ones will return
89374 * -ENOSYS.
89375 */
89376+ oldfs = get_fs();
89377+ set_fs(USER_DS);
89378 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89379 futex_cmpxchg_enabled = 1;
89380+ set_fs(oldfs);
89381 #endif
89382 }
89383
89384diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89385index 55c8c93..9ba7ad6 100644
89386--- a/kernel/futex_compat.c
89387+++ b/kernel/futex_compat.c
89388@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89389 return 0;
89390 }
89391
89392-static void __user *futex_uaddr(struct robust_list __user *entry,
89393+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89394 compat_long_t futex_offset)
89395 {
89396 compat_uptr_t base = ptr_to_compat(entry);
89397diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89398index b358a80..fc25240 100644
89399--- a/kernel/gcov/base.c
89400+++ b/kernel/gcov/base.c
89401@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89402 }
89403
89404 #ifdef CONFIG_MODULES
89405-static inline int within(void *addr, void *start, unsigned long size)
89406-{
89407- return ((addr >= start) && (addr < start + size));
89408-}
89409-
89410 /* Update list and generate events when modules are unloaded. */
89411 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89412 void *data)
89413@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89414
89415 /* Remove entries located in module from linked list. */
89416 while ((info = gcov_info_next(info))) {
89417- if (within(info, mod->module_core, mod->core_size)) {
89418+ if (within_module_core_rw((unsigned long)info, mod)) {
89419 gcov_info_unlink(prev, info);
89420 if (gcov_events_enabled)
89421 gcov_event(GCOV_REMOVE, info);
89422diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
89423index 8069237..fe712d0 100644
89424--- a/kernel/irq/manage.c
89425+++ b/kernel/irq/manage.c
89426@@ -871,7 +871,7 @@ static int irq_thread(void *data)
89427
89428 action_ret = handler_fn(desc, action);
89429 if (action_ret == IRQ_HANDLED)
89430- atomic_inc(&desc->threads_handled);
89431+ atomic_inc_unchecked(&desc->threads_handled);
89432
89433 wake_threads_waitq(desc);
89434 }
89435diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
89436index e2514b0..de3dfe0 100644
89437--- a/kernel/irq/spurious.c
89438+++ b/kernel/irq/spurious.c
89439@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
89440 * count. We just care about the count being
89441 * different than the one we saw before.
89442 */
89443- handled = atomic_read(&desc->threads_handled);
89444+ handled = atomic_read_unchecked(&desc->threads_handled);
89445 handled |= SPURIOUS_DEFERRED;
89446 if (handled != desc->threads_handled_last) {
89447 action_ret = IRQ_HANDLED;
89448diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89449index 9019f15..9a3c42e 100644
89450--- a/kernel/jump_label.c
89451+++ b/kernel/jump_label.c
89452@@ -14,6 +14,7 @@
89453 #include <linux/err.h>
89454 #include <linux/static_key.h>
89455 #include <linux/jump_label_ratelimit.h>
89456+#include <linux/mm.h>
89457
89458 #ifdef HAVE_JUMP_LABEL
89459
89460@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89461
89462 size = (((unsigned long)stop - (unsigned long)start)
89463 / sizeof(struct jump_entry));
89464+ pax_open_kernel();
89465 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89466+ pax_close_kernel();
89467 }
89468
89469 static void jump_label_update(struct static_key *key, int enable);
89470@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89471 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89472 struct jump_entry *iter;
89473
89474+ pax_open_kernel();
89475 for (iter = iter_start; iter < iter_stop; iter++) {
89476 if (within_module_init(iter->code, mod))
89477 iter->code = 0;
89478 }
89479+ pax_close_kernel();
89480 }
89481
89482 static int
89483diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89484index 5c5987f..bc502b0 100644
89485--- a/kernel/kallsyms.c
89486+++ b/kernel/kallsyms.c
89487@@ -11,6 +11,9 @@
89488 * Changed the compression method from stem compression to "table lookup"
89489 * compression (see scripts/kallsyms.c for a more complete description)
89490 */
89491+#ifdef CONFIG_GRKERNSEC_HIDESYM
89492+#define __INCLUDED_BY_HIDESYM 1
89493+#endif
89494 #include <linux/kallsyms.h>
89495 #include <linux/module.h>
89496 #include <linux/init.h>
89497@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89498
89499 static inline int is_kernel_inittext(unsigned long addr)
89500 {
89501+ if (system_state != SYSTEM_BOOTING)
89502+ return 0;
89503+
89504 if (addr >= (unsigned long)_sinittext
89505 && addr <= (unsigned long)_einittext)
89506 return 1;
89507 return 0;
89508 }
89509
89510+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89511+#ifdef CONFIG_MODULES
89512+static inline int is_module_text(unsigned long addr)
89513+{
89514+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89515+ return 1;
89516+
89517+ addr = ktla_ktva(addr);
89518+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89519+}
89520+#else
89521+static inline int is_module_text(unsigned long addr)
89522+{
89523+ return 0;
89524+}
89525+#endif
89526+#endif
89527+
89528 static inline int is_kernel_text(unsigned long addr)
89529 {
89530 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89531@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89532
89533 static inline int is_kernel(unsigned long addr)
89534 {
89535+
89536+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89537+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89538+ return 1;
89539+
89540+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89541+#else
89542 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89543+#endif
89544+
89545 return 1;
89546 return in_gate_area_no_mm(addr);
89547 }
89548
89549 static int is_ksym_addr(unsigned long addr)
89550 {
89551+
89552+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89553+ if (is_module_text(addr))
89554+ return 0;
89555+#endif
89556+
89557 if (all_var)
89558 return is_kernel(addr);
89559
89560@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89561
89562 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89563 {
89564- iter->name[0] = '\0';
89565 iter->nameoff = get_symbol_offset(new_pos);
89566 iter->pos = new_pos;
89567 }
89568@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89569 {
89570 struct kallsym_iter *iter = m->private;
89571
89572+#ifdef CONFIG_GRKERNSEC_HIDESYM
89573+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89574+ return 0;
89575+#endif
89576+
89577 /* Some debugging symbols have no name. Ignore them. */
89578 if (!iter->name[0])
89579 return 0;
89580@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89581 */
89582 type = iter->exported ? toupper(iter->type) :
89583 tolower(iter->type);
89584+
89585 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89586 type, iter->name, iter->module_name);
89587 } else
89588diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89589index 0aa69ea..a7fcafb 100644
89590--- a/kernel/kcmp.c
89591+++ b/kernel/kcmp.c
89592@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89593 struct task_struct *task1, *task2;
89594 int ret;
89595
89596+#ifdef CONFIG_GRKERNSEC
89597+ return -ENOSYS;
89598+#endif
89599+
89600 rcu_read_lock();
89601
89602 /*
89603diff --git a/kernel/kexec.c b/kernel/kexec.c
89604index 9a8a01a..3c35dd6 100644
89605--- a/kernel/kexec.c
89606+++ b/kernel/kexec.c
89607@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89608 compat_ulong_t, flags)
89609 {
89610 struct compat_kexec_segment in;
89611- struct kexec_segment out, __user *ksegments;
89612+ struct kexec_segment out;
89613+ struct kexec_segment __user *ksegments;
89614 unsigned long i, result;
89615
89616 /* Don't allow clients that don't understand the native
89617diff --git a/kernel/kmod.c b/kernel/kmod.c
89618index 2777f40..6cf5e70 100644
89619--- a/kernel/kmod.c
89620+++ b/kernel/kmod.c
89621@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89622 kfree(info->argv);
89623 }
89624
89625-static int call_modprobe(char *module_name, int wait)
89626+static int call_modprobe(char *module_name, char *module_param, int wait)
89627 {
89628 struct subprocess_info *info;
89629 static char *envp[] = {
89630@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
89631 NULL
89632 };
89633
89634- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89635+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89636 if (!argv)
89637 goto out;
89638
89639@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
89640 argv[1] = "-q";
89641 argv[2] = "--";
89642 argv[3] = module_name; /* check free_modprobe_argv() */
89643- argv[4] = NULL;
89644+ argv[4] = module_param;
89645+ argv[5] = NULL;
89646
89647 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89648 NULL, free_modprobe_argv, NULL);
89649@@ -122,9 +123,8 @@ out:
89650 * If module auto-loading support is disabled then this function
89651 * becomes a no-operation.
89652 */
89653-int __request_module(bool wait, const char *fmt, ...)
89654+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89655 {
89656- va_list args;
89657 char module_name[MODULE_NAME_LEN];
89658 unsigned int max_modprobes;
89659 int ret;
89660@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
89661 if (!modprobe_path[0])
89662 return 0;
89663
89664- va_start(args, fmt);
89665- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89666- va_end(args);
89667+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89668 if (ret >= MODULE_NAME_LEN)
89669 return -ENAMETOOLONG;
89670
89671@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
89672 if (ret)
89673 return ret;
89674
89675+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89676+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89677+ /* hack to workaround consolekit/udisks stupidity */
89678+ read_lock(&tasklist_lock);
89679+ if (!strcmp(current->comm, "mount") &&
89680+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89681+ read_unlock(&tasklist_lock);
89682+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89683+ return -EPERM;
89684+ }
89685+ read_unlock(&tasklist_lock);
89686+ }
89687+#endif
89688+
89689 /* If modprobe needs a service that is in a module, we get a recursive
89690 * loop. Limit the number of running kmod threads to max_threads/2 or
89691 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89692@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
89693
89694 trace_module_request(module_name, wait, _RET_IP_);
89695
89696- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89697+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89698
89699 atomic_dec(&kmod_concurrent);
89700 return ret;
89701 }
89702+
89703+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89704+{
89705+ va_list args;
89706+ int ret;
89707+
89708+ va_start(args, fmt);
89709+ ret = ____request_module(wait, module_param, fmt, args);
89710+ va_end(args);
89711+
89712+ return ret;
89713+}
89714+
89715+int __request_module(bool wait, const char *fmt, ...)
89716+{
89717+ va_list args;
89718+ int ret;
89719+
89720+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89721+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89722+ char module_param[MODULE_NAME_LEN];
89723+
89724+ memset(module_param, 0, sizeof(module_param));
89725+
89726+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89727+
89728+ va_start(args, fmt);
89729+ ret = ____request_module(wait, module_param, fmt, args);
89730+ va_end(args);
89731+
89732+ return ret;
89733+ }
89734+#endif
89735+
89736+ va_start(args, fmt);
89737+ ret = ____request_module(wait, NULL, fmt, args);
89738+ va_end(args);
89739+
89740+ return ret;
89741+}
89742+
89743 EXPORT_SYMBOL(__request_module);
89744 #endif /* CONFIG_MODULES */
89745
89746 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
89747 {
89748+#ifdef CONFIG_GRKERNSEC
89749+ kfree(info->path);
89750+ info->path = info->origpath;
89751+#endif
89752 if (info->cleanup)
89753 (*info->cleanup)(info);
89754 kfree(info);
89755@@ -232,6 +289,20 @@ static int ____call_usermodehelper(void *data)
89756 */
89757 set_user_nice(current, 0);
89758
89759+#ifdef CONFIG_GRKERNSEC
89760+ /* this is race-free as far as userland is concerned as we copied
89761+ out the path to be used prior to this point and are now operating
89762+ on that copy
89763+ */
89764+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
89765+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
89766+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
89767+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
89768+ retval = -EPERM;
89769+ goto out;
89770+ }
89771+#endif
89772+
89773 retval = -ENOMEM;
89774 new = prepare_kernel_cred(current);
89775 if (!new)
89776@@ -254,8 +325,8 @@ static int ____call_usermodehelper(void *data)
89777 commit_creds(new);
89778
89779 retval = do_execve(getname_kernel(sub_info->path),
89780- (const char __user *const __user *)sub_info->argv,
89781- (const char __user *const __user *)sub_info->envp);
89782+ (const char __user *const __force_user *)sub_info->argv,
89783+ (const char __user *const __force_user *)sub_info->envp);
89784 out:
89785 sub_info->retval = retval;
89786 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
89787@@ -288,7 +359,7 @@ static int wait_for_helper(void *data)
89788 *
89789 * Thus the __user pointer cast is valid here.
89790 */
89791- sys_wait4(pid, (int __user *)&ret, 0, NULL);
89792+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
89793
89794 /*
89795 * If ret is 0, either ____call_usermodehelper failed and the
89796@@ -510,7 +581,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
89797 goto out;
89798
89799 INIT_WORK(&sub_info->work, __call_usermodehelper);
89800+#ifdef CONFIG_GRKERNSEC
89801+ sub_info->origpath = path;
89802+ sub_info->path = kstrdup(path, gfp_mask);
89803+#else
89804 sub_info->path = path;
89805+#endif
89806 sub_info->argv = argv;
89807 sub_info->envp = envp;
89808
89809@@ -612,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper);
89810 static int proc_cap_handler(struct ctl_table *table, int write,
89811 void __user *buffer, size_t *lenp, loff_t *ppos)
89812 {
89813- struct ctl_table t;
89814+ ctl_table_no_const t;
89815 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
89816 kernel_cap_t new_cap;
89817 int err, i;
89818diff --git a/kernel/kprobes.c b/kernel/kprobes.c
89819index ee61992..62142b1 100644
89820--- a/kernel/kprobes.c
89821+++ b/kernel/kprobes.c
89822@@ -31,6 +31,9 @@
89823 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
89824 * <prasanna@in.ibm.com> added function-return probes.
89825 */
89826+#ifdef CONFIG_GRKERNSEC_HIDESYM
89827+#define __INCLUDED_BY_HIDESYM 1
89828+#endif
89829 #include <linux/kprobes.h>
89830 #include <linux/hash.h>
89831 #include <linux/init.h>
89832@@ -122,12 +125,12 @@ enum kprobe_slot_state {
89833
89834 static void *alloc_insn_page(void)
89835 {
89836- return module_alloc(PAGE_SIZE);
89837+ return module_alloc_exec(PAGE_SIZE);
89838 }
89839
89840 static void free_insn_page(void *page)
89841 {
89842- module_memfree(page);
89843+ module_memfree_exec(page);
89844 }
89845
89846 struct kprobe_insn_cache kprobe_insn_slots = {
89847@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
89848 kprobe_type = "k";
89849
89850 if (sym)
89851- seq_printf(pi, "%p %s %s+0x%x %s ",
89852+ seq_printf(pi, "%pK %s %s+0x%x %s ",
89853 p->addr, kprobe_type, sym, offset,
89854 (modname ? modname : " "));
89855 else
89856- seq_printf(pi, "%p %s %p ",
89857+ seq_printf(pi, "%pK %s %pK ",
89858 p->addr, kprobe_type, p->addr);
89859
89860 if (!pp)
89861diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
89862index 6683cce..daf8999 100644
89863--- a/kernel/ksysfs.c
89864+++ b/kernel/ksysfs.c
89865@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
89866 {
89867 if (count+1 > UEVENT_HELPER_PATH_LEN)
89868 return -ENOENT;
89869+ if (!capable(CAP_SYS_ADMIN))
89870+ return -EPERM;
89871 memcpy(uevent_helper, buf, count);
89872 uevent_helper[count] = '\0';
89873 if (count && uevent_helper[count-1] == '\n')
89874@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
89875 return count;
89876 }
89877
89878-static struct bin_attribute notes_attr = {
89879+static bin_attribute_no_const notes_attr __read_only = {
89880 .attr = {
89881 .name = "notes",
89882 .mode = S_IRUGO,
89883diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
89884index 88d0d44..e9ce0ee 100644
89885--- a/kernel/locking/lockdep.c
89886+++ b/kernel/locking/lockdep.c
89887@@ -599,6 +599,10 @@ static int static_obj(void *obj)
89888 end = (unsigned long) &_end,
89889 addr = (unsigned long) obj;
89890
89891+#ifdef CONFIG_PAX_KERNEXEC
89892+ start = ktla_ktva(start);
89893+#endif
89894+
89895 /*
89896 * static variable?
89897 */
89898@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
89899 if (!static_obj(lock->key)) {
89900 debug_locks_off();
89901 printk("INFO: trying to register non-static key.\n");
89902+ printk("lock:%pS key:%pS.\n", lock, lock->key);
89903 printk("the code is fine but needs lockdep annotation.\n");
89904 printk("turning off the locking correctness validator.\n");
89905 dump_stack();
89906@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
89907 if (!class)
89908 return 0;
89909 }
89910- atomic_inc((atomic_t *)&class->ops);
89911+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
89912 if (very_verbose(class)) {
89913 printk("\nacquire class [%p] %s", class->key, class->name);
89914 if (class->name_version > 1)
89915diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
89916index ef43ac4..2720dfa 100644
89917--- a/kernel/locking/lockdep_proc.c
89918+++ b/kernel/locking/lockdep_proc.c
89919@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
89920 return 0;
89921 }
89922
89923- seq_printf(m, "%p", class->key);
89924+ seq_printf(m, "%pK", class->key);
89925 #ifdef CONFIG_DEBUG_LOCKDEP
89926 seq_printf(m, " OPS:%8ld", class->ops);
89927 #endif
89928@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
89929
89930 list_for_each_entry(entry, &class->locks_after, entry) {
89931 if (entry->distance == 1) {
89932- seq_printf(m, " -> [%p] ", entry->class->key);
89933+ seq_printf(m, " -> [%pK] ", entry->class->key);
89934 print_name(m, entry->class);
89935 seq_puts(m, "\n");
89936 }
89937@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
89938 if (!class->key)
89939 continue;
89940
89941- seq_printf(m, "[%p] ", class->key);
89942+ seq_printf(m, "[%pK] ", class->key);
89943 print_name(m, class);
89944 seq_puts(m, "\n");
89945 }
89946@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
89947 if (!i)
89948 seq_line(m, '-', 40-namelen, namelen);
89949
89950- snprintf(ip, sizeof(ip), "[<%p>]",
89951+ snprintf(ip, sizeof(ip), "[<%pK>]",
89952 (void *)class->contention_point[i]);
89953 seq_printf(m, "%40s %14lu %29s %pS\n",
89954 name, stats->contention_point[i],
89955@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
89956 if (!i)
89957 seq_line(m, '-', 40-namelen, namelen);
89958
89959- snprintf(ip, sizeof(ip), "[<%p>]",
89960+ snprintf(ip, sizeof(ip), "[<%pK>]",
89961 (void *)class->contending_point[i]);
89962 seq_printf(m, "%40s %14lu %29s %pS\n",
89963 name, stats->contending_point[i],
89964diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
89965index 9887a90..0cd2b1d 100644
89966--- a/kernel/locking/mcs_spinlock.c
89967+++ b/kernel/locking/mcs_spinlock.c
89968@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
89969
89970 prev = decode_cpu(old);
89971 node->prev = prev;
89972- ACCESS_ONCE(prev->next) = node;
89973+ ACCESS_ONCE_RW(prev->next) = node;
89974
89975 /*
89976 * Normally @prev is untouchable after the above store; because at that
89977@@ -172,8 +172,8 @@ unqueue:
89978 * it will wait in Step-A.
89979 */
89980
89981- ACCESS_ONCE(next->prev) = prev;
89982- ACCESS_ONCE(prev->next) = next;
89983+ ACCESS_ONCE_RW(next->prev) = prev;
89984+ ACCESS_ONCE_RW(prev->next) = next;
89985
89986 return false;
89987 }
89988@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
89989 node = this_cpu_ptr(&osq_node);
89990 next = xchg(&node->next, NULL);
89991 if (next) {
89992- ACCESS_ONCE(next->locked) = 1;
89993+ ACCESS_ONCE_RW(next->locked) = 1;
89994 return;
89995 }
89996
89997 next = osq_wait_next(lock, node, NULL);
89998 if (next)
89999- ACCESS_ONCE(next->locked) = 1;
90000+ ACCESS_ONCE_RW(next->locked) = 1;
90001 }
90002
90003 #endif
90004diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90005index 4d60986..5d351c1 100644
90006--- a/kernel/locking/mcs_spinlock.h
90007+++ b/kernel/locking/mcs_spinlock.h
90008@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90009 */
90010 return;
90011 }
90012- ACCESS_ONCE(prev->next) = node;
90013+ ACCESS_ONCE_RW(prev->next) = node;
90014
90015 /* Wait until the lock holder passes the lock down. */
90016 arch_mcs_spin_lock_contended(&node->locked);
90017diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90018index 3ef3736..9c951fa 100644
90019--- a/kernel/locking/mutex-debug.c
90020+++ b/kernel/locking/mutex-debug.c
90021@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90022 }
90023
90024 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90025- struct thread_info *ti)
90026+ struct task_struct *task)
90027 {
90028 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90029
90030 /* Mark the current thread as blocked on the lock: */
90031- ti->task->blocked_on = waiter;
90032+ task->blocked_on = waiter;
90033 }
90034
90035 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90036- struct thread_info *ti)
90037+ struct task_struct *task)
90038 {
90039 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90040- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90041- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90042- ti->task->blocked_on = NULL;
90043+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90044+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90045+ task->blocked_on = NULL;
90046
90047 list_del_init(&waiter->list);
90048 waiter->task = NULL;
90049diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90050index 0799fd3..d06ae3b 100644
90051--- a/kernel/locking/mutex-debug.h
90052+++ b/kernel/locking/mutex-debug.h
90053@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90054 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90055 extern void debug_mutex_add_waiter(struct mutex *lock,
90056 struct mutex_waiter *waiter,
90057- struct thread_info *ti);
90058+ struct task_struct *task);
90059 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90060- struct thread_info *ti);
90061+ struct task_struct *task);
90062 extern void debug_mutex_unlock(struct mutex *lock);
90063 extern void debug_mutex_init(struct mutex *lock, const char *name,
90064 struct lock_class_key *key);
90065diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90066index 4541951..39fe90a 100644
90067--- a/kernel/locking/mutex.c
90068+++ b/kernel/locking/mutex.c
90069@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90070 goto skip_wait;
90071
90072 debug_mutex_lock_common(lock, &waiter);
90073- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90074+ debug_mutex_add_waiter(lock, &waiter, task);
90075
90076 /* add waiting tasks to the end of the waitqueue (FIFO): */
90077 list_add_tail(&waiter.list, &lock->wait_list);
90078@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90079 schedule_preempt_disabled();
90080 spin_lock_mutex(&lock->wait_lock, flags);
90081 }
90082- mutex_remove_waiter(lock, &waiter, current_thread_info());
90083+ mutex_remove_waiter(lock, &waiter, task);
90084 /* set it to 0 if there are no waiters left: */
90085 if (likely(list_empty(&lock->wait_list)))
90086 atomic_set(&lock->count, 0);
90087@@ -606,7 +606,7 @@ skip_wait:
90088 return 0;
90089
90090 err:
90091- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90092+ mutex_remove_waiter(lock, &waiter, task);
90093 spin_unlock_mutex(&lock->wait_lock, flags);
90094 debug_mutex_free_waiter(&waiter);
90095 mutex_release(&lock->dep_map, 1, ip);
90096diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90097index 1d96dd0..994ff19 100644
90098--- a/kernel/locking/rtmutex-tester.c
90099+++ b/kernel/locking/rtmutex-tester.c
90100@@ -22,7 +22,7 @@
90101 #define MAX_RT_TEST_MUTEXES 8
90102
90103 static spinlock_t rttest_lock;
90104-static atomic_t rttest_event;
90105+static atomic_unchecked_t rttest_event;
90106
90107 struct test_thread_data {
90108 int opcode;
90109@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90110
90111 case RTTEST_LOCKCONT:
90112 td->mutexes[td->opdata] = 1;
90113- td->event = atomic_add_return(1, &rttest_event);
90114+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90115 return 0;
90116
90117 case RTTEST_RESET:
90118@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90119 return 0;
90120
90121 case RTTEST_RESETEVENT:
90122- atomic_set(&rttest_event, 0);
90123+ atomic_set_unchecked(&rttest_event, 0);
90124 return 0;
90125
90126 default:
90127@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90128 return ret;
90129
90130 td->mutexes[id] = 1;
90131- td->event = atomic_add_return(1, &rttest_event);
90132+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90133 rt_mutex_lock(&mutexes[id]);
90134- td->event = atomic_add_return(1, &rttest_event);
90135+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90136 td->mutexes[id] = 4;
90137 return 0;
90138
90139@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90140 return ret;
90141
90142 td->mutexes[id] = 1;
90143- td->event = atomic_add_return(1, &rttest_event);
90144+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90145 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90146- td->event = atomic_add_return(1, &rttest_event);
90147+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90148 td->mutexes[id] = ret ? 0 : 4;
90149 return ret ? -EINTR : 0;
90150
90151@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90152 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90153 return ret;
90154
90155- td->event = atomic_add_return(1, &rttest_event);
90156+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90157 rt_mutex_unlock(&mutexes[id]);
90158- td->event = atomic_add_return(1, &rttest_event);
90159+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90160 td->mutexes[id] = 0;
90161 return 0;
90162
90163@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90164 break;
90165
90166 td->mutexes[dat] = 2;
90167- td->event = atomic_add_return(1, &rttest_event);
90168+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90169 break;
90170
90171 default:
90172@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90173 return;
90174
90175 td->mutexes[dat] = 3;
90176- td->event = atomic_add_return(1, &rttest_event);
90177+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90178 break;
90179
90180 case RTTEST_LOCKNOWAIT:
90181@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90182 return;
90183
90184 td->mutexes[dat] = 1;
90185- td->event = atomic_add_return(1, &rttest_event);
90186+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90187 return;
90188
90189 default:
90190diff --git a/kernel/module.c b/kernel/module.c
90191index d856e96..b82225c 100644
90192--- a/kernel/module.c
90193+++ b/kernel/module.c
90194@@ -59,6 +59,7 @@
90195 #include <linux/jump_label.h>
90196 #include <linux/pfn.h>
90197 #include <linux/bsearch.h>
90198+#include <linux/grsecurity.h>
90199 #include <uapi/linux/module.h>
90200 #include "module-internal.h"
90201
90202@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90203
90204 /* Bounds of module allocation, for speeding __module_address.
90205 * Protected by module_mutex. */
90206-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90207+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90208+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90209
90210 int register_module_notifier(struct notifier_block *nb)
90211 {
90212@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90213 return true;
90214
90215 list_for_each_entry_rcu(mod, &modules, list) {
90216- struct symsearch arr[] = {
90217+ struct symsearch modarr[] = {
90218 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90219 NOT_GPL_ONLY, false },
90220 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90221@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90222 if (mod->state == MODULE_STATE_UNFORMED)
90223 continue;
90224
90225- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90226+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90227 return true;
90228 }
90229 return false;
90230@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90231 if (!pcpusec->sh_size)
90232 return 0;
90233
90234- if (align > PAGE_SIZE) {
90235+ if (align-1 >= PAGE_SIZE) {
90236 pr_warn("%s: per-cpu alignment %li > %li\n",
90237 mod->name, align, PAGE_SIZE);
90238 align = PAGE_SIZE;
90239@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90240 static ssize_t show_coresize(struct module_attribute *mattr,
90241 struct module_kobject *mk, char *buffer)
90242 {
90243- return sprintf(buffer, "%u\n", mk->mod->core_size);
90244+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90245 }
90246
90247 static struct module_attribute modinfo_coresize =
90248@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90249 static ssize_t show_initsize(struct module_attribute *mattr,
90250 struct module_kobject *mk, char *buffer)
90251 {
90252- return sprintf(buffer, "%u\n", mk->mod->init_size);
90253+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90254 }
90255
90256 static struct module_attribute modinfo_initsize =
90257@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90258 goto bad_version;
90259 }
90260
90261+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90262+ /*
90263+ * avoid potentially printing jibberish on attempted load
90264+ * of a module randomized with a different seed
90265+ */
90266+ pr_warn("no symbol version for %s\n", symname);
90267+#else
90268 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90269+#endif
90270 return 0;
90271
90272 bad_version:
90273+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90274+ /*
90275+ * avoid potentially printing jibberish on attempted load
90276+ * of a module randomized with a different seed
90277+ */
90278+ pr_warn("attempted module disagrees about version of symbol %s\n",
90279+ symname);
90280+#else
90281 pr_warn("%s: disagrees about version of symbol %s\n",
90282 mod->name, symname);
90283+#endif
90284 return 0;
90285 }
90286
90287@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
90288 */
90289 #ifdef CONFIG_SYSFS
90290
90291-#ifdef CONFIG_KALLSYMS
90292+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90293 static inline bool sect_empty(const Elf_Shdr *sect)
90294 {
90295 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90296@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90297 {
90298 unsigned int notes, loaded, i;
90299 struct module_notes_attrs *notes_attrs;
90300- struct bin_attribute *nattr;
90301+ bin_attribute_no_const *nattr;
90302
90303 /* failed to create section attributes, so can't create notes */
90304 if (!mod->sect_attrs)
90305@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
90306 static int module_add_modinfo_attrs(struct module *mod)
90307 {
90308 struct module_attribute *attr;
90309- struct module_attribute *temp_attr;
90310+ module_attribute_no_const *temp_attr;
90311 int error = 0;
90312 int i;
90313
90314@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
90315
90316 static void unset_module_core_ro_nx(struct module *mod)
90317 {
90318- set_page_attributes(mod->module_core + mod->core_text_size,
90319- mod->module_core + mod->core_size,
90320+ set_page_attributes(mod->module_core_rw,
90321+ mod->module_core_rw + mod->core_size_rw,
90322 set_memory_x);
90323- set_page_attributes(mod->module_core,
90324- mod->module_core + mod->core_ro_size,
90325+ set_page_attributes(mod->module_core_rx,
90326+ mod->module_core_rx + mod->core_size_rx,
90327 set_memory_rw);
90328 }
90329
90330 static void unset_module_init_ro_nx(struct module *mod)
90331 {
90332- set_page_attributes(mod->module_init + mod->init_text_size,
90333- mod->module_init + mod->init_size,
90334+ set_page_attributes(mod->module_init_rw,
90335+ mod->module_init_rw + mod->init_size_rw,
90336 set_memory_x);
90337- set_page_attributes(mod->module_init,
90338- mod->module_init + mod->init_ro_size,
90339+ set_page_attributes(mod->module_init_rx,
90340+ mod->module_init_rx + mod->init_size_rx,
90341 set_memory_rw);
90342 }
90343
90344@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
90345 list_for_each_entry_rcu(mod, &modules, list) {
90346 if (mod->state == MODULE_STATE_UNFORMED)
90347 continue;
90348- if ((mod->module_core) && (mod->core_text_size)) {
90349- set_page_attributes(mod->module_core,
90350- mod->module_core + mod->core_text_size,
90351+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90352+ set_page_attributes(mod->module_core_rx,
90353+ mod->module_core_rx + mod->core_size_rx,
90354 set_memory_rw);
90355 }
90356- if ((mod->module_init) && (mod->init_text_size)) {
90357- set_page_attributes(mod->module_init,
90358- mod->module_init + mod->init_text_size,
90359+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90360+ set_page_attributes(mod->module_init_rx,
90361+ mod->module_init_rx + mod->init_size_rx,
90362 set_memory_rw);
90363 }
90364 }
90365@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
90366 list_for_each_entry_rcu(mod, &modules, list) {
90367 if (mod->state == MODULE_STATE_UNFORMED)
90368 continue;
90369- if ((mod->module_core) && (mod->core_text_size)) {
90370- set_page_attributes(mod->module_core,
90371- mod->module_core + mod->core_text_size,
90372+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90373+ set_page_attributes(mod->module_core_rx,
90374+ mod->module_core_rx + mod->core_size_rx,
90375 set_memory_ro);
90376 }
90377- if ((mod->module_init) && (mod->init_text_size)) {
90378- set_page_attributes(mod->module_init,
90379- mod->module_init + mod->init_text_size,
90380+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90381+ set_page_attributes(mod->module_init_rx,
90382+ mod->module_init_rx + mod->init_size_rx,
90383 set_memory_ro);
90384 }
90385 }
90386@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
90387 #else
90388 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
90389 static void unset_module_core_ro_nx(struct module *mod) { }
90390-static void unset_module_init_ro_nx(struct module *mod) { }
90391+static void unset_module_init_ro_nx(struct module *mod)
90392+{
90393+
90394+#ifdef CONFIG_PAX_KERNEXEC
90395+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90396+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90397+#endif
90398+
90399+}
90400 #endif
90401
90402 void __weak module_memfree(void *module_region)
90403@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
90404 /* This may be NULL, but that's OK */
90405 unset_module_init_ro_nx(mod);
90406 module_arch_freeing_init(mod);
90407- module_memfree(mod->module_init);
90408+ module_memfree(mod->module_init_rw);
90409+ module_memfree_exec(mod->module_init_rx);
90410 kfree(mod->args);
90411 percpu_modfree(mod);
90412
90413 /* Free lock-classes: */
90414- lockdep_free_key_range(mod->module_core, mod->core_size);
90415+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90416+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90417
90418 /* Finally, free the core (containing the module structure) */
90419 unset_module_core_ro_nx(mod);
90420- module_memfree(mod->module_core);
90421+ module_memfree_exec(mod->module_core_rx);
90422+ module_memfree(mod->module_core_rw);
90423
90424 #ifdef CONFIG_MPU
90425 update_protections(current->mm);
90426@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90427 int ret = 0;
90428 const struct kernel_symbol *ksym;
90429
90430+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90431+ int is_fs_load = 0;
90432+ int register_filesystem_found = 0;
90433+ char *p;
90434+
90435+ p = strstr(mod->args, "grsec_modharden_fs");
90436+ if (p) {
90437+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90438+ /* copy \0 as well */
90439+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90440+ is_fs_load = 1;
90441+ }
90442+#endif
90443+
90444 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90445 const char *name = info->strtab + sym[i].st_name;
90446
90447+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90448+ /* it's a real shame this will never get ripped and copied
90449+ upstream! ;(
90450+ */
90451+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90452+ register_filesystem_found = 1;
90453+#endif
90454+
90455 switch (sym[i].st_shndx) {
90456 case SHN_COMMON:
90457 /* Ignore common symbols */
90458@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90459 ksym = resolve_symbol_wait(mod, info, name);
90460 /* Ok if resolved. */
90461 if (ksym && !IS_ERR(ksym)) {
90462+ pax_open_kernel();
90463 sym[i].st_value = ksym->value;
90464+ pax_close_kernel();
90465 break;
90466 }
90467
90468@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90469 secbase = (unsigned long)mod_percpu(mod);
90470 else
90471 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90472+ pax_open_kernel();
90473 sym[i].st_value += secbase;
90474+ pax_close_kernel();
90475 break;
90476 }
90477 }
90478
90479+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90480+ if (is_fs_load && !register_filesystem_found) {
90481+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90482+ ret = -EPERM;
90483+ }
90484+#endif
90485+
90486 return ret;
90487 }
90488
90489@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90490 || s->sh_entsize != ~0UL
90491 || strstarts(sname, ".init"))
90492 continue;
90493- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90494+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90495+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90496+ else
90497+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90498 pr_debug("\t%s\n", sname);
90499 }
90500- switch (m) {
90501- case 0: /* executable */
90502- mod->core_size = debug_align(mod->core_size);
90503- mod->core_text_size = mod->core_size;
90504- break;
90505- case 1: /* RO: text and ro-data */
90506- mod->core_size = debug_align(mod->core_size);
90507- mod->core_ro_size = mod->core_size;
90508- break;
90509- case 3: /* whole core */
90510- mod->core_size = debug_align(mod->core_size);
90511- break;
90512- }
90513 }
90514
90515 pr_debug("Init section allocation order:\n");
90516@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90517 || s->sh_entsize != ~0UL
90518 || !strstarts(sname, ".init"))
90519 continue;
90520- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90521- | INIT_OFFSET_MASK);
90522+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90523+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90524+ else
90525+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90526+ s->sh_entsize |= INIT_OFFSET_MASK;
90527 pr_debug("\t%s\n", sname);
90528 }
90529- switch (m) {
90530- case 0: /* executable */
90531- mod->init_size = debug_align(mod->init_size);
90532- mod->init_text_size = mod->init_size;
90533- break;
90534- case 1: /* RO: text and ro-data */
90535- mod->init_size = debug_align(mod->init_size);
90536- mod->init_ro_size = mod->init_size;
90537- break;
90538- case 3: /* whole init */
90539- mod->init_size = debug_align(mod->init_size);
90540- break;
90541- }
90542 }
90543 }
90544
90545@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90546
90547 /* Put symbol section at end of init part of module. */
90548 symsect->sh_flags |= SHF_ALLOC;
90549- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90550+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90551 info->index.sym) | INIT_OFFSET_MASK;
90552 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90553
90554@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90555 }
90556
90557 /* Append room for core symbols at end of core part. */
90558- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90559- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90560- mod->core_size += strtab_size;
90561+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90562+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90563+ mod->core_size_rx += strtab_size;
90564
90565 /* Put string table section at end of init part of module. */
90566 strsect->sh_flags |= SHF_ALLOC;
90567- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90568+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90569 info->index.str) | INIT_OFFSET_MASK;
90570 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90571 }
90572@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90573 /* Make sure we get permanent strtab: don't use info->strtab. */
90574 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90575
90576+ pax_open_kernel();
90577+
90578 /* Set types up while we still have access to sections. */
90579 for (i = 0; i < mod->num_symtab; i++)
90580 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90581
90582- mod->core_symtab = dst = mod->module_core + info->symoffs;
90583- mod->core_strtab = s = mod->module_core + info->stroffs;
90584+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90585+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90586 src = mod->symtab;
90587 for (ndst = i = 0; i < mod->num_symtab; i++) {
90588 if (i == 0 ||
90589@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90590 }
90591 }
90592 mod->core_num_syms = ndst;
90593+
90594+ pax_close_kernel();
90595 }
90596 #else
90597 static inline void layout_symtab(struct module *mod, struct load_info *info)
90598@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
90599 return vmalloc_exec(size);
90600 }
90601
90602-static void *module_alloc_update_bounds(unsigned long size)
90603+static void *module_alloc_update_bounds_rw(unsigned long size)
90604 {
90605 void *ret = module_alloc(size);
90606
90607 if (ret) {
90608 mutex_lock(&module_mutex);
90609 /* Update module bounds. */
90610- if ((unsigned long)ret < module_addr_min)
90611- module_addr_min = (unsigned long)ret;
90612- if ((unsigned long)ret + size > module_addr_max)
90613- module_addr_max = (unsigned long)ret + size;
90614+ if ((unsigned long)ret < module_addr_min_rw)
90615+ module_addr_min_rw = (unsigned long)ret;
90616+ if ((unsigned long)ret + size > module_addr_max_rw)
90617+ module_addr_max_rw = (unsigned long)ret + size;
90618+ mutex_unlock(&module_mutex);
90619+ }
90620+ return ret;
90621+}
90622+
90623+static void *module_alloc_update_bounds_rx(unsigned long size)
90624+{
90625+ void *ret = module_alloc_exec(size);
90626+
90627+ if (ret) {
90628+ mutex_lock(&module_mutex);
90629+ /* Update module bounds. */
90630+ if ((unsigned long)ret < module_addr_min_rx)
90631+ module_addr_min_rx = (unsigned long)ret;
90632+ if ((unsigned long)ret + size > module_addr_max_rx)
90633+ module_addr_max_rx = (unsigned long)ret + size;
90634 mutex_unlock(&module_mutex);
90635 }
90636 return ret;
90637@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90638 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90639
90640 if (info->index.sym == 0) {
90641+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90642+ /*
90643+ * avoid potentially printing jibberish on attempted load
90644+ * of a module randomized with a different seed
90645+ */
90646+ pr_warn("module has no symbols (stripped?)\n");
90647+#else
90648 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90649+#endif
90650 return ERR_PTR(-ENOEXEC);
90651 }
90652
90653@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90654 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90655 {
90656 const char *modmagic = get_modinfo(info, "vermagic");
90657+ const char *license = get_modinfo(info, "license");
90658 int err;
90659
90660+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90661+ if (!license || !license_is_gpl_compatible(license))
90662+ return -ENOEXEC;
90663+#endif
90664+
90665 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90666 modmagic = NULL;
90667
90668@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90669 }
90670
90671 /* Set up license info based on the info section */
90672- set_license(mod, get_modinfo(info, "license"));
90673+ set_license(mod, license);
90674
90675 return 0;
90676 }
90677@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
90678 void *ptr;
90679
90680 /* Do the allocs. */
90681- ptr = module_alloc_update_bounds(mod->core_size);
90682+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90683 /*
90684 * The pointer to this block is stored in the module structure
90685 * which is inside the block. Just mark it as not being a
90686@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
90687 if (!ptr)
90688 return -ENOMEM;
90689
90690- memset(ptr, 0, mod->core_size);
90691- mod->module_core = ptr;
90692+ memset(ptr, 0, mod->core_size_rw);
90693+ mod->module_core_rw = ptr;
90694
90695- if (mod->init_size) {
90696- ptr = module_alloc_update_bounds(mod->init_size);
90697+ if (mod->init_size_rw) {
90698+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90699 /*
90700 * The pointer to this block is stored in the module structure
90701 * which is inside the block. This block doesn't need to be
90702@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
90703 */
90704 kmemleak_ignore(ptr);
90705 if (!ptr) {
90706- module_memfree(mod->module_core);
90707+ module_memfree(mod->module_core_rw);
90708 return -ENOMEM;
90709 }
90710- memset(ptr, 0, mod->init_size);
90711- mod->module_init = ptr;
90712+ memset(ptr, 0, mod->init_size_rw);
90713+ mod->module_init_rw = ptr;
90714 } else
90715- mod->module_init = NULL;
90716+ mod->module_init_rw = NULL;
90717+
90718+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90719+ kmemleak_not_leak(ptr);
90720+ if (!ptr) {
90721+ if (mod->module_init_rw)
90722+ module_memfree(mod->module_init_rw);
90723+ module_memfree(mod->module_core_rw);
90724+ return -ENOMEM;
90725+ }
90726+
90727+ pax_open_kernel();
90728+ memset(ptr, 0, mod->core_size_rx);
90729+ pax_close_kernel();
90730+ mod->module_core_rx = ptr;
90731+
90732+ if (mod->init_size_rx) {
90733+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90734+ kmemleak_ignore(ptr);
90735+ if (!ptr && mod->init_size_rx) {
90736+ module_memfree_exec(mod->module_core_rx);
90737+ if (mod->module_init_rw)
90738+ module_memfree(mod->module_init_rw);
90739+ module_memfree(mod->module_core_rw);
90740+ return -ENOMEM;
90741+ }
90742+
90743+ pax_open_kernel();
90744+ memset(ptr, 0, mod->init_size_rx);
90745+ pax_close_kernel();
90746+ mod->module_init_rx = ptr;
90747+ } else
90748+ mod->module_init_rx = NULL;
90749
90750 /* Transfer each section which specifies SHF_ALLOC */
90751 pr_debug("final section addresses:\n");
90752@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
90753 if (!(shdr->sh_flags & SHF_ALLOC))
90754 continue;
90755
90756- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90757- dest = mod->module_init
90758- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90759- else
90760- dest = mod->module_core + shdr->sh_entsize;
90761+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90762+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90763+ dest = mod->module_init_rw
90764+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90765+ else
90766+ dest = mod->module_init_rx
90767+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90768+ } else {
90769+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90770+ dest = mod->module_core_rw + shdr->sh_entsize;
90771+ else
90772+ dest = mod->module_core_rx + shdr->sh_entsize;
90773+ }
90774+
90775+ if (shdr->sh_type != SHT_NOBITS) {
90776+
90777+#ifdef CONFIG_PAX_KERNEXEC
90778+#ifdef CONFIG_X86_64
90779+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
90780+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
90781+#endif
90782+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
90783+ pax_open_kernel();
90784+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90785+ pax_close_kernel();
90786+ } else
90787+#endif
90788
90789- if (shdr->sh_type != SHT_NOBITS)
90790 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90791+ }
90792 /* Update sh_addr to point to copy in image. */
90793- shdr->sh_addr = (unsigned long)dest;
90794+
90795+#ifdef CONFIG_PAX_KERNEXEC
90796+ if (shdr->sh_flags & SHF_EXECINSTR)
90797+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
90798+ else
90799+#endif
90800+
90801+ shdr->sh_addr = (unsigned long)dest;
90802 pr_debug("\t0x%lx %s\n",
90803 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
90804 }
90805@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
90806 * Do it before processing of module parameters, so the module
90807 * can provide parameter accessor functions of its own.
90808 */
90809- if (mod->module_init)
90810- flush_icache_range((unsigned long)mod->module_init,
90811- (unsigned long)mod->module_init
90812- + mod->init_size);
90813- flush_icache_range((unsigned long)mod->module_core,
90814- (unsigned long)mod->module_core + mod->core_size);
90815+ if (mod->module_init_rx)
90816+ flush_icache_range((unsigned long)mod->module_init_rx,
90817+ (unsigned long)mod->module_init_rx
90818+ + mod->init_size_rx);
90819+ flush_icache_range((unsigned long)mod->module_core_rx,
90820+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
90821
90822 set_fs(old_fs);
90823 }
90824@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
90825 {
90826 percpu_modfree(mod);
90827 module_arch_freeing_init(mod);
90828- module_memfree(mod->module_init);
90829- module_memfree(mod->module_core);
90830+ module_memfree_exec(mod->module_init_rx);
90831+ module_memfree_exec(mod->module_core_rx);
90832+ module_memfree(mod->module_init_rw);
90833+ module_memfree(mod->module_core_rw);
90834 }
90835
90836 int __weak module_finalize(const Elf_Ehdr *hdr,
90837@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
90838 static int post_relocation(struct module *mod, const struct load_info *info)
90839 {
90840 /* Sort exception table now relocations are done. */
90841+ pax_open_kernel();
90842 sort_extable(mod->extable, mod->extable + mod->num_exentries);
90843+ pax_close_kernel();
90844
90845 /* Copy relocated percpu area over. */
90846 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
90847@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
90848 /* For freeing module_init on success, in case kallsyms traversing */
90849 struct mod_initfree {
90850 struct rcu_head rcu;
90851- void *module_init;
90852+ void *module_init_rw;
90853+ void *module_init_rx;
90854 };
90855
90856 static void do_free_init(struct rcu_head *head)
90857 {
90858 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
90859- module_memfree(m->module_init);
90860+ module_memfree(m->module_init_rw);
90861+ module_memfree_exec(m->module_init_rx);
90862 kfree(m);
90863 }
90864
90865@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
90866 ret = -ENOMEM;
90867 goto fail;
90868 }
90869- freeinit->module_init = mod->module_init;
90870+ freeinit->module_init_rw = mod->module_init_rw;
90871+ freeinit->module_init_rx = mod->module_init_rx;
90872
90873 /*
90874 * We want to find out whether @mod uses async during init. Clear
90875@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
90876 #endif
90877 unset_module_init_ro_nx(mod);
90878 module_arch_freeing_init(mod);
90879- mod->module_init = NULL;
90880- mod->init_size = 0;
90881- mod->init_ro_size = 0;
90882- mod->init_text_size = 0;
90883+ mod->module_init_rw = NULL;
90884+ mod->module_init_rx = NULL;
90885+ mod->init_size_rw = 0;
90886+ mod->init_size_rx = 0;
90887 /*
90888 * We want to free module_init, but be aware that kallsyms may be
90889 * walking this with preempt disabled. In all the failure paths,
90890@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
90891 module_bug_finalize(info->hdr, info->sechdrs, mod);
90892
90893 /* Set RO and NX regions for core */
90894- set_section_ro_nx(mod->module_core,
90895- mod->core_text_size,
90896- mod->core_ro_size,
90897- mod->core_size);
90898+ set_section_ro_nx(mod->module_core_rx,
90899+ mod->core_size_rx,
90900+ mod->core_size_rx,
90901+ mod->core_size_rx);
90902
90903 /* Set RO and NX regions for init */
90904- set_section_ro_nx(mod->module_init,
90905- mod->init_text_size,
90906- mod->init_ro_size,
90907- mod->init_size);
90908+ set_section_ro_nx(mod->module_init_rx,
90909+ mod->init_size_rx,
90910+ mod->init_size_rx,
90911+ mod->init_size_rx);
90912
90913 /* Mark state as coming so strong_try_module_get() ignores us,
90914 * but kallsyms etc. can see us. */
90915@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
90916 if (err)
90917 goto free_unload;
90918
90919+ /* Now copy in args */
90920+ mod->args = strndup_user(uargs, ~0UL >> 1);
90921+ if (IS_ERR(mod->args)) {
90922+ err = PTR_ERR(mod->args);
90923+ goto free_unload;
90924+ }
90925+
90926 /* Set up MODINFO_ATTR fields */
90927 setup_modinfo(mod, info);
90928
90929+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90930+ {
90931+ char *p, *p2;
90932+
90933+ if (strstr(mod->args, "grsec_modharden_netdev")) {
90934+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
90935+ err = -EPERM;
90936+ goto free_modinfo;
90937+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
90938+ p += sizeof("grsec_modharden_normal") - 1;
90939+ p2 = strstr(p, "_");
90940+ if (p2) {
90941+ *p2 = '\0';
90942+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
90943+ *p2 = '_';
90944+ }
90945+ err = -EPERM;
90946+ goto free_modinfo;
90947+ }
90948+ }
90949+#endif
90950+
90951 /* Fix up syms, so that st_value is a pointer to location. */
90952 err = simplify_symbols(mod, info);
90953 if (err < 0)
90954@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
90955
90956 flush_module_icache(mod);
90957
90958- /* Now copy in args */
90959- mod->args = strndup_user(uargs, ~0UL >> 1);
90960- if (IS_ERR(mod->args)) {
90961- err = PTR_ERR(mod->args);
90962- goto free_arch_cleanup;
90963- }
90964-
90965 dynamic_debug_setup(info->debug, info->num_debug);
90966
90967 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
90968@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
90969 ddebug_cleanup:
90970 dynamic_debug_remove(info->debug);
90971 synchronize_sched();
90972- kfree(mod->args);
90973- free_arch_cleanup:
90974 module_arch_cleanup(mod);
90975 free_modinfo:
90976 free_modinfo(mod);
90977+ kfree(mod->args);
90978 free_unload:
90979 module_unload_free(mod);
90980 unlink_mod:
90981@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
90982 unsigned long nextval;
90983
90984 /* At worse, next value is at end of module */
90985- if (within_module_init(addr, mod))
90986- nextval = (unsigned long)mod->module_init+mod->init_text_size;
90987+ if (within_module_init_rx(addr, mod))
90988+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
90989+ else if (within_module_init_rw(addr, mod))
90990+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
90991+ else if (within_module_core_rx(addr, mod))
90992+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
90993+ else if (within_module_core_rw(addr, mod))
90994+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
90995 else
90996- nextval = (unsigned long)mod->module_core+mod->core_text_size;
90997+ return NULL;
90998
90999 /* Scan for closest preceding symbol, and next symbol. (ELF
91000 starts real symbols at 1). */
91001@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
91002 return 0;
91003
91004 seq_printf(m, "%s %u",
91005- mod->name, mod->init_size + mod->core_size);
91006+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91007 print_unload_info(m, mod);
91008
91009 /* Informative for users. */
91010@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
91011 mod->state == MODULE_STATE_COMING ? "Loading" :
91012 "Live");
91013 /* Used by oprofile and other similar tools. */
91014- seq_printf(m, " 0x%pK", mod->module_core);
91015+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91016
91017 /* Taints info */
91018 if (mod->taints)
91019@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
91020
91021 static int __init proc_modules_init(void)
91022 {
91023+#ifndef CONFIG_GRKERNSEC_HIDESYM
91024+#ifdef CONFIG_GRKERNSEC_PROC_USER
91025+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91026+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91027+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91028+#else
91029 proc_create("modules", 0, NULL, &proc_modules_operations);
91030+#endif
91031+#else
91032+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91033+#endif
91034 return 0;
91035 }
91036 module_init(proc_modules_init);
91037@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
91038 {
91039 struct module *mod;
91040
91041- if (addr < module_addr_min || addr > module_addr_max)
91042+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91043+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91044 return NULL;
91045
91046 list_for_each_entry_rcu(mod, &modules, list) {
91047@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
91048 */
91049 struct module *__module_text_address(unsigned long addr)
91050 {
91051- struct module *mod = __module_address(addr);
91052+ struct module *mod;
91053+
91054+#ifdef CONFIG_X86_32
91055+ addr = ktla_ktva(addr);
91056+#endif
91057+
91058+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91059+ return NULL;
91060+
91061+ mod = __module_address(addr);
91062+
91063 if (mod) {
91064 /* Make sure it's within the text section. */
91065- if (!within(addr, mod->module_init, mod->init_text_size)
91066- && !within(addr, mod->module_core, mod->core_text_size))
91067+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91068 mod = NULL;
91069 }
91070 return mod;
91071diff --git a/kernel/notifier.c b/kernel/notifier.c
91072index 4803da6..1c5eea6 100644
91073--- a/kernel/notifier.c
91074+++ b/kernel/notifier.c
91075@@ -5,6 +5,7 @@
91076 #include <linux/rcupdate.h>
91077 #include <linux/vmalloc.h>
91078 #include <linux/reboot.h>
91079+#include <linux/mm.h>
91080
91081 /*
91082 * Notifier list for kernel code which wants to be called
91083@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91084 while ((*nl) != NULL) {
91085 if (n->priority > (*nl)->priority)
91086 break;
91087- nl = &((*nl)->next);
91088+ nl = (struct notifier_block **)&((*nl)->next);
91089 }
91090- n->next = *nl;
91091+ pax_open_kernel();
91092+ *(const void **)&n->next = *nl;
91093 rcu_assign_pointer(*nl, n);
91094+ pax_close_kernel();
91095 return 0;
91096 }
91097
91098@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91099 return 0;
91100 if (n->priority > (*nl)->priority)
91101 break;
91102- nl = &((*nl)->next);
91103+ nl = (struct notifier_block **)&((*nl)->next);
91104 }
91105- n->next = *nl;
91106+ pax_open_kernel();
91107+ *(const void **)&n->next = *nl;
91108 rcu_assign_pointer(*nl, n);
91109+ pax_close_kernel();
91110 return 0;
91111 }
91112
91113@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91114 {
91115 while ((*nl) != NULL) {
91116 if ((*nl) == n) {
91117+ pax_open_kernel();
91118 rcu_assign_pointer(*nl, n->next);
91119+ pax_close_kernel();
91120 return 0;
91121 }
91122- nl = &((*nl)->next);
91123+ nl = (struct notifier_block **)&((*nl)->next);
91124 }
91125 return -ENOENT;
91126 }
91127diff --git a/kernel/padata.c b/kernel/padata.c
91128index 161402f..598814c 100644
91129--- a/kernel/padata.c
91130+++ b/kernel/padata.c
91131@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91132 * seq_nr mod. number of cpus in use.
91133 */
91134
91135- seq_nr = atomic_inc_return(&pd->seq_nr);
91136+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91137 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91138
91139 return padata_index_to_cpu(pd, cpu_index);
91140@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91141 padata_init_pqueues(pd);
91142 padata_init_squeues(pd);
91143 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91144- atomic_set(&pd->seq_nr, -1);
91145+ atomic_set_unchecked(&pd->seq_nr, -1);
91146 atomic_set(&pd->reorder_objects, 0);
91147 atomic_set(&pd->refcnt, 0);
91148 pd->pinst = pinst;
91149diff --git a/kernel/panic.c b/kernel/panic.c
91150index 4d8d6f9..97b9b9c 100644
91151--- a/kernel/panic.c
91152+++ b/kernel/panic.c
91153@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91154 /*
91155 * Stop ourself in panic -- architecture code may override this
91156 */
91157-void __weak panic_smp_self_stop(void)
91158+void __weak __noreturn panic_smp_self_stop(void)
91159 {
91160 while (1)
91161 cpu_relax();
91162@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91163 disable_trace_on_warning();
91164
91165 pr_warn("------------[ cut here ]------------\n");
91166- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91167+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91168 raw_smp_processor_id(), current->pid, file, line, caller);
91169
91170 if (args)
91171@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91172 */
91173 __visible void __stack_chk_fail(void)
91174 {
91175- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91176+ dump_stack();
91177+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91178 __builtin_return_address(0));
91179 }
91180 EXPORT_SYMBOL(__stack_chk_fail);
91181diff --git a/kernel/pid.c b/kernel/pid.c
91182index cd36a5e..11f185d 100644
91183--- a/kernel/pid.c
91184+++ b/kernel/pid.c
91185@@ -33,6 +33,7 @@
91186 #include <linux/rculist.h>
91187 #include <linux/bootmem.h>
91188 #include <linux/hash.h>
91189+#include <linux/security.h>
91190 #include <linux/pid_namespace.h>
91191 #include <linux/init_task.h>
91192 #include <linux/syscalls.h>
91193@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91194
91195 int pid_max = PID_MAX_DEFAULT;
91196
91197-#define RESERVED_PIDS 300
91198+#define RESERVED_PIDS 500
91199
91200 int pid_max_min = RESERVED_PIDS + 1;
91201 int pid_max_max = PID_MAX_LIMIT;
91202@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91203 */
91204 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91205 {
91206+ struct task_struct *task;
91207+
91208 rcu_lockdep_assert(rcu_read_lock_held(),
91209 "find_task_by_pid_ns() needs rcu_read_lock()"
91210 " protection");
91211- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91212+
91213+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91214+
91215+ if (gr_pid_is_chrooted(task))
91216+ return NULL;
91217+
91218+ return task;
91219 }
91220
91221 struct task_struct *find_task_by_vpid(pid_t vnr)
91222@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91223 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91224 }
91225
91226+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91227+{
91228+ rcu_lockdep_assert(rcu_read_lock_held(),
91229+ "find_task_by_pid_ns() needs rcu_read_lock()"
91230+ " protection");
91231+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91232+}
91233+
91234 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91235 {
91236 struct pid *pid;
91237diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91238index a65ba13..f600dbb 100644
91239--- a/kernel/pid_namespace.c
91240+++ b/kernel/pid_namespace.c
91241@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91242 void __user *buffer, size_t *lenp, loff_t *ppos)
91243 {
91244 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91245- struct ctl_table tmp = *table;
91246+ ctl_table_no_const tmp = *table;
91247
91248 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91249 return -EPERM;
91250diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91251index 48b28d3..c63ccaf 100644
91252--- a/kernel/power/Kconfig
91253+++ b/kernel/power/Kconfig
91254@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91255 config HIBERNATION
91256 bool "Hibernation (aka 'suspend to disk')"
91257 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91258+ depends on !GRKERNSEC_KMEM
91259+ depends on !PAX_MEMORY_SANITIZE
91260 select HIBERNATE_CALLBACKS
91261 select LZO_COMPRESS
91262 select LZO_DECOMPRESS
91263diff --git a/kernel/power/process.c b/kernel/power/process.c
91264index 5a6ec86..3a8c884 100644
91265--- a/kernel/power/process.c
91266+++ b/kernel/power/process.c
91267@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91268 unsigned int elapsed_msecs;
91269 bool wakeup = false;
91270 int sleep_usecs = USEC_PER_MSEC;
91271+ bool timedout = false;
91272
91273 do_gettimeofday(&start);
91274
91275@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91276
91277 while (true) {
91278 todo = 0;
91279+ if (time_after(jiffies, end_time))
91280+ timedout = true;
91281 read_lock(&tasklist_lock);
91282 for_each_process_thread(g, p) {
91283 if (p == current || !freeze_task(p))
91284 continue;
91285
91286- if (!freezer_should_skip(p))
91287+ if (!freezer_should_skip(p)) {
91288 todo++;
91289+ if (timedout) {
91290+ printk(KERN_ERR "Task refusing to freeze:\n");
91291+ sched_show_task(p);
91292+ }
91293+ }
91294 }
91295 read_unlock(&tasklist_lock);
91296
91297@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91298 todo += wq_busy;
91299 }
91300
91301- if (!todo || time_after(jiffies, end_time))
91302+ if (!todo || timedout)
91303 break;
91304
91305 if (pm_wakeup_pending()) {
91306diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
91307index cbd69d8..2ca4a8b 100644
91308--- a/kernel/printk/console_cmdline.h
91309+++ b/kernel/printk/console_cmdline.h
91310@@ -3,7 +3,7 @@
91311
91312 struct console_cmdline
91313 {
91314- char name[8]; /* Name of the driver */
91315+ char name[16]; /* Name of the driver */
91316 int index; /* Minor dev. to use */
91317 char *options; /* Options for the driver */
91318 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
91319diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91320index fae29e3..7df1786 100644
91321--- a/kernel/printk/printk.c
91322+++ b/kernel/printk/printk.c
91323@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91324 if (from_file && type != SYSLOG_ACTION_OPEN)
91325 return 0;
91326
91327+#ifdef CONFIG_GRKERNSEC_DMESG
91328+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91329+ return -EPERM;
91330+#endif
91331+
91332 if (syslog_action_restricted(type)) {
91333 if (capable(CAP_SYSLOG))
91334 return 0;
91335@@ -2464,6 +2469,7 @@ void register_console(struct console *newcon)
91336 for (i = 0, c = console_cmdline;
91337 i < MAX_CMDLINECONSOLES && c->name[0];
91338 i++, c++) {
91339+ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
91340 if (strcmp(c->name, newcon->name) != 0)
91341 continue;
91342 if (newcon->index >= 0 &&
91343diff --git a/kernel/profile.c b/kernel/profile.c
91344index 54bf5ba..df6e0a2 100644
91345--- a/kernel/profile.c
91346+++ b/kernel/profile.c
91347@@ -37,7 +37,7 @@ struct profile_hit {
91348 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91349 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91350
91351-static atomic_t *prof_buffer;
91352+static atomic_unchecked_t *prof_buffer;
91353 static unsigned long prof_len, prof_shift;
91354
91355 int prof_on __read_mostly;
91356@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91357 hits[i].pc = 0;
91358 continue;
91359 }
91360- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91361+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91362 hits[i].hits = hits[i].pc = 0;
91363 }
91364 }
91365@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91366 * Add the current hit(s) and flush the write-queue out
91367 * to the global buffer:
91368 */
91369- atomic_add(nr_hits, &prof_buffer[pc]);
91370+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91371 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91372- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91373+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91374 hits[i].pc = hits[i].hits = 0;
91375 }
91376 out:
91377@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91378 {
91379 unsigned long pc;
91380 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91381- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91382+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91383 }
91384 #endif /* !CONFIG_SMP */
91385
91386@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91387 return -EFAULT;
91388 buf++; p++; count--; read++;
91389 }
91390- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91391+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91392 if (copy_to_user(buf, (void *)pnt, count))
91393 return -EFAULT;
91394 read += count;
91395@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91396 }
91397 #endif
91398 profile_discard_flip_buffers();
91399- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91400+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91401 return count;
91402 }
91403
91404diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91405index 1eb9d90..d40d21e 100644
91406--- a/kernel/ptrace.c
91407+++ b/kernel/ptrace.c
91408@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91409 if (seize)
91410 flags |= PT_SEIZED;
91411 rcu_read_lock();
91412- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91413+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91414 flags |= PT_PTRACE_CAP;
91415 rcu_read_unlock();
91416 task->ptrace = flags;
91417@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91418 break;
91419 return -EIO;
91420 }
91421- if (copy_to_user(dst, buf, retval))
91422+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91423 return -EFAULT;
91424 copied += retval;
91425 src += retval;
91426@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
91427 bool seized = child->ptrace & PT_SEIZED;
91428 int ret = -EIO;
91429 siginfo_t siginfo, *si;
91430- void __user *datavp = (void __user *) data;
91431+ void __user *datavp = (__force void __user *) data;
91432 unsigned long __user *datalp = datavp;
91433 unsigned long flags;
91434
91435@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91436 goto out;
91437 }
91438
91439+ if (gr_handle_ptrace(child, request)) {
91440+ ret = -EPERM;
91441+ goto out_put_task_struct;
91442+ }
91443+
91444 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91445 ret = ptrace_attach(child, request, addr, data);
91446 /*
91447 * Some architectures need to do book-keeping after
91448 * a ptrace attach.
91449 */
91450- if (!ret)
91451+ if (!ret) {
91452 arch_ptrace_attach(child);
91453+ gr_audit_ptrace(child);
91454+ }
91455 goto out_put_task_struct;
91456 }
91457
91458@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91459 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91460 if (copied != sizeof(tmp))
91461 return -EIO;
91462- return put_user(tmp, (unsigned long __user *)data);
91463+ return put_user(tmp, (__force unsigned long __user *)data);
91464 }
91465
91466 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91467@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91468 }
91469
91470 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91471- compat_long_t, addr, compat_long_t, data)
91472+ compat_ulong_t, addr, compat_ulong_t, data)
91473 {
91474 struct task_struct *child;
91475 long ret;
91476@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91477 goto out;
91478 }
91479
91480+ if (gr_handle_ptrace(child, request)) {
91481+ ret = -EPERM;
91482+ goto out_put_task_struct;
91483+ }
91484+
91485 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91486 ret = ptrace_attach(child, request, addr, data);
91487 /*
91488 * Some architectures need to do book-keeping after
91489 * a ptrace attach.
91490 */
91491- if (!ret)
91492+ if (!ret) {
91493 arch_ptrace_attach(child);
91494+ gr_audit_ptrace(child);
91495+ }
91496 goto out_put_task_struct;
91497 }
91498
91499diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91500index 4d559ba..053da37 100644
91501--- a/kernel/rcu/rcutorture.c
91502+++ b/kernel/rcu/rcutorture.c
91503@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91504 rcu_torture_count) = { 0 };
91505 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91506 rcu_torture_batch) = { 0 };
91507-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91508-static atomic_t n_rcu_torture_alloc;
91509-static atomic_t n_rcu_torture_alloc_fail;
91510-static atomic_t n_rcu_torture_free;
91511-static atomic_t n_rcu_torture_mberror;
91512-static atomic_t n_rcu_torture_error;
91513+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91514+static atomic_unchecked_t n_rcu_torture_alloc;
91515+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91516+static atomic_unchecked_t n_rcu_torture_free;
91517+static atomic_unchecked_t n_rcu_torture_mberror;
91518+static atomic_unchecked_t n_rcu_torture_error;
91519 static long n_rcu_torture_barrier_error;
91520 static long n_rcu_torture_boost_ktrerror;
91521 static long n_rcu_torture_boost_rterror;
91522@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
91523 static long n_rcu_torture_timers;
91524 static long n_barrier_attempts;
91525 static long n_barrier_successes;
91526-static atomic_long_t n_cbfloods;
91527+static atomic_long_unchecked_t n_cbfloods;
91528 static struct list_head rcu_torture_removed;
91529
91530 static int rcu_torture_writer_state;
91531@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
91532
91533 spin_lock_bh(&rcu_torture_lock);
91534 if (list_empty(&rcu_torture_freelist)) {
91535- atomic_inc(&n_rcu_torture_alloc_fail);
91536+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91537 spin_unlock_bh(&rcu_torture_lock);
91538 return NULL;
91539 }
91540- atomic_inc(&n_rcu_torture_alloc);
91541+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91542 p = rcu_torture_freelist.next;
91543 list_del_init(p);
91544 spin_unlock_bh(&rcu_torture_lock);
91545@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
91546 static void
91547 rcu_torture_free(struct rcu_torture *p)
91548 {
91549- atomic_inc(&n_rcu_torture_free);
91550+ atomic_inc_unchecked(&n_rcu_torture_free);
91551 spin_lock_bh(&rcu_torture_lock);
91552 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91553 spin_unlock_bh(&rcu_torture_lock);
91554@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91555 i = rp->rtort_pipe_count;
91556 if (i > RCU_TORTURE_PIPE_LEN)
91557 i = RCU_TORTURE_PIPE_LEN;
91558- atomic_inc(&rcu_torture_wcount[i]);
91559+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91560 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91561 rp->rtort_mbtest = 0;
91562 return true;
91563@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
91564 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
91565 do {
91566 schedule_timeout_interruptible(cbflood_inter_holdoff);
91567- atomic_long_inc(&n_cbfloods);
91568+ atomic_long_inc_unchecked(&n_cbfloods);
91569 WARN_ON(signal_pending(current));
91570 for (i = 0; i < cbflood_n_burst; i++) {
91571 for (j = 0; j < cbflood_n_per_burst; j++) {
91572@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
91573 i = old_rp->rtort_pipe_count;
91574 if (i > RCU_TORTURE_PIPE_LEN)
91575 i = RCU_TORTURE_PIPE_LEN;
91576- atomic_inc(&rcu_torture_wcount[i]);
91577+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91578 old_rp->rtort_pipe_count++;
91579 switch (synctype[torture_random(&rand) % nsynctypes]) {
91580 case RTWS_DEF_FREE:
91581@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
91582 return;
91583 }
91584 if (p->rtort_mbtest == 0)
91585- atomic_inc(&n_rcu_torture_mberror);
91586+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91587 spin_lock(&rand_lock);
91588 cur_ops->read_delay(&rand);
91589 n_rcu_torture_timers++;
91590@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
91591 continue;
91592 }
91593 if (p->rtort_mbtest == 0)
91594- atomic_inc(&n_rcu_torture_mberror);
91595+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91596 cur_ops->read_delay(&rand);
91597 preempt_disable();
91598 pipe_count = p->rtort_pipe_count;
91599@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
91600 rcu_torture_current,
91601 rcu_torture_current_version,
91602 list_empty(&rcu_torture_freelist),
91603- atomic_read(&n_rcu_torture_alloc),
91604- atomic_read(&n_rcu_torture_alloc_fail),
91605- atomic_read(&n_rcu_torture_free));
91606+ atomic_read_unchecked(&n_rcu_torture_alloc),
91607+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91608+ atomic_read_unchecked(&n_rcu_torture_free));
91609 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
91610- atomic_read(&n_rcu_torture_mberror),
91611+ atomic_read_unchecked(&n_rcu_torture_mberror),
91612 n_rcu_torture_boost_ktrerror,
91613 n_rcu_torture_boost_rterror);
91614 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
91615@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
91616 n_barrier_successes,
91617 n_barrier_attempts,
91618 n_rcu_torture_barrier_error);
91619- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
91620+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
91621
91622 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91623- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91624+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91625 n_rcu_torture_barrier_error != 0 ||
91626 n_rcu_torture_boost_ktrerror != 0 ||
91627 n_rcu_torture_boost_rterror != 0 ||
91628 n_rcu_torture_boost_failure != 0 ||
91629 i > 1) {
91630 pr_cont("%s", "!!! ");
91631- atomic_inc(&n_rcu_torture_error);
91632+ atomic_inc_unchecked(&n_rcu_torture_error);
91633 WARN_ON_ONCE(1);
91634 }
91635 pr_cont("Reader Pipe: ");
91636@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
91637 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91638 pr_cont("Free-Block Circulation: ");
91639 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91640- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
91641+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
91642 }
91643 pr_cont("\n");
91644
91645@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
91646
91647 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91648
91649- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91650+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91651 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91652 else if (torture_onoff_failures())
91653 rcu_torture_print_module_parms(cur_ops,
91654@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
91655
91656 rcu_torture_current = NULL;
91657 rcu_torture_current_version = 0;
91658- atomic_set(&n_rcu_torture_alloc, 0);
91659- atomic_set(&n_rcu_torture_alloc_fail, 0);
91660- atomic_set(&n_rcu_torture_free, 0);
91661- atomic_set(&n_rcu_torture_mberror, 0);
91662- atomic_set(&n_rcu_torture_error, 0);
91663+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91664+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91665+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91666+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91667+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91668 n_rcu_torture_barrier_error = 0;
91669 n_rcu_torture_boost_ktrerror = 0;
91670 n_rcu_torture_boost_rterror = 0;
91671 n_rcu_torture_boost_failure = 0;
91672 n_rcu_torture_boosts = 0;
91673 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91674- atomic_set(&rcu_torture_wcount[i], 0);
91675+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91676 for_each_possible_cpu(cpu) {
91677 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91678 per_cpu(rcu_torture_count, cpu)[i] = 0;
91679diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91680index 0db5649..e6ec167 100644
91681--- a/kernel/rcu/tiny.c
91682+++ b/kernel/rcu/tiny.c
91683@@ -42,7 +42,7 @@
91684 /* Forward declarations for tiny_plugin.h. */
91685 struct rcu_ctrlblk;
91686 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91687-static void rcu_process_callbacks(struct softirq_action *unused);
91688+static void rcu_process_callbacks(void);
91689 static void __call_rcu(struct rcu_head *head,
91690 void (*func)(struct rcu_head *rcu),
91691 struct rcu_ctrlblk *rcp);
91692@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91693 false));
91694 }
91695
91696-static void rcu_process_callbacks(struct softirq_action *unused)
91697+static __latent_entropy void rcu_process_callbacks(void)
91698 {
91699 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91700 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91701diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91702index 858c565..7efd915 100644
91703--- a/kernel/rcu/tiny_plugin.h
91704+++ b/kernel/rcu/tiny_plugin.h
91705@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91706 dump_stack();
91707 }
91708 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91709- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91710+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91711 3 * rcu_jiffies_till_stall_check() + 3;
91712 else if (ULONG_CMP_GE(j, js))
91713- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91714+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91715 }
91716
91717 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91718 {
91719 rcp->ticks_this_gp = 0;
91720 rcp->gp_start = jiffies;
91721- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91722+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91723 }
91724
91725 static void check_cpu_stalls(void)
91726diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91727index 7680fc2..b8e9161 100644
91728--- a/kernel/rcu/tree.c
91729+++ b/kernel/rcu/tree.c
91730@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
91731 */
91732 rdtp = this_cpu_ptr(&rcu_dynticks);
91733 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91734- atomic_add(2, &rdtp->dynticks); /* QS. */
91735+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91736 smp_mb__after_atomic(); /* Later stuff after QS. */
91737 break;
91738 }
91739@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
91740 rcu_prepare_for_idle();
91741 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91742 smp_mb__before_atomic(); /* See above. */
91743- atomic_inc(&rdtp->dynticks);
91744+ atomic_inc_unchecked(&rdtp->dynticks);
91745 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91746- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91747+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91748 rcu_dynticks_task_enter();
91749
91750 /*
91751@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
91752
91753 rcu_dynticks_task_exit();
91754 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91755- atomic_inc(&rdtp->dynticks);
91756+ atomic_inc_unchecked(&rdtp->dynticks);
91757 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91758 smp_mb__after_atomic(); /* See above. */
91759- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91760+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91761 rcu_cleanup_after_idle();
91762 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91763 if (!user && !is_idle_task(current)) {
91764@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
91765 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91766
91767 if (rdtp->dynticks_nmi_nesting == 0 &&
91768- (atomic_read(&rdtp->dynticks) & 0x1))
91769+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91770 return;
91771 rdtp->dynticks_nmi_nesting++;
91772 smp_mb__before_atomic(); /* Force delay from prior write. */
91773- atomic_inc(&rdtp->dynticks);
91774+ atomic_inc_unchecked(&rdtp->dynticks);
91775 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91776 smp_mb__after_atomic(); /* See above. */
91777- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91778+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91779 }
91780
91781 /**
91782@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
91783 return;
91784 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91785 smp_mb__before_atomic(); /* See above. */
91786- atomic_inc(&rdtp->dynticks);
91787+ atomic_inc_unchecked(&rdtp->dynticks);
91788 smp_mb__after_atomic(); /* Force delay to next write. */
91789- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91790+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91791 }
91792
91793 /**
91794@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
91795 */
91796 bool notrace __rcu_is_watching(void)
91797 {
91798- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91799+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91800 }
91801
91802 /**
91803@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91804 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91805 bool *isidle, unsigned long *maxj)
91806 {
91807- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91808+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91809 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91810 if ((rdp->dynticks_snap & 0x1) == 0) {
91811 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91812@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91813 int *rcrmp;
91814 unsigned int snap;
91815
91816- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91817+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91818 snap = (unsigned int)rdp->dynticks_snap;
91819
91820 /*
91821@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91822 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91823 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91824 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91825- ACCESS_ONCE(rdp->cond_resched_completed) =
91826+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
91827 ACCESS_ONCE(rdp->mynode->completed);
91828 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
91829- ACCESS_ONCE(*rcrmp) =
91830+ ACCESS_ONCE_RW(*rcrmp) =
91831 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
91832 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
91833 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
91834@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
91835 rsp->gp_start = j;
91836 smp_wmb(); /* Record start time before stall time. */
91837 j1 = rcu_jiffies_till_stall_check();
91838- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
91839+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
91840 rsp->jiffies_resched = j + j1 / 2;
91841 }
91842
91843@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
91844 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91845 return;
91846 }
91847- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91848+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91849 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91850
91851 /*
91852@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
91853
91854 raw_spin_lock_irqsave(&rnp->lock, flags);
91855 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
91856- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
91857+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
91858 3 * rcu_jiffies_till_stall_check() + 3;
91859 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91860
91861@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
91862 struct rcu_state *rsp;
91863
91864 for_each_rcu_flavor(rsp)
91865- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91866+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91867 }
91868
91869 /*
91870@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
91871 raw_spin_unlock_irq(&rnp->lock);
91872 return 0;
91873 }
91874- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91875+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91876
91877 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
91878 /*
91879@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
91880 rdp = this_cpu_ptr(rsp->rda);
91881 rcu_preempt_check_blocked_tasks(rnp);
91882 rnp->qsmask = rnp->qsmaskinit;
91883- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
91884+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
91885 WARN_ON_ONCE(rnp->completed != rsp->completed);
91886- ACCESS_ONCE(rnp->completed) = rsp->completed;
91887+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
91888 if (rnp == rdp->mynode)
91889 (void)__note_gp_changes(rsp, rnp, rdp);
91890 rcu_preempt_boost_start_gp(rnp);
91891@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
91892 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
91893 raw_spin_lock_irq(&rnp->lock);
91894 smp_mb__after_unlock_lock();
91895- ACCESS_ONCE(rsp->gp_flags) =
91896+ ACCESS_ONCE_RW(rsp->gp_flags) =
91897 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
91898 raw_spin_unlock_irq(&rnp->lock);
91899 }
91900@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91901 rcu_for_each_node_breadth_first(rsp, rnp) {
91902 raw_spin_lock_irq(&rnp->lock);
91903 smp_mb__after_unlock_lock();
91904- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
91905+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
91906 rdp = this_cpu_ptr(rsp->rda);
91907 if (rnp == rdp->mynode)
91908 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
91909@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91910 rcu_nocb_gp_set(rnp, nocb);
91911
91912 /* Declare grace period done. */
91913- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
91914+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
91915 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
91916 rsp->fqs_state = RCU_GP_IDLE;
91917 rdp = this_cpu_ptr(rsp->rda);
91918 /* Advance CBs to reduce false positives below. */
91919 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
91920 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
91921- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91922+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91923 trace_rcu_grace_period(rsp->name,
91924 ACCESS_ONCE(rsp->gpnum),
91925 TPS("newreq"));
91926@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
91927 */
91928 return false;
91929 }
91930- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91931+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91932 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
91933 TPS("newreq"));
91934
91935@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
91936 rsp->qlen += rdp->qlen;
91937 rdp->n_cbs_orphaned += rdp->qlen;
91938 rdp->qlen_lazy = 0;
91939- ACCESS_ONCE(rdp->qlen) = 0;
91940+ ACCESS_ONCE_RW(rdp->qlen) = 0;
91941 }
91942
91943 /*
91944@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
91945 }
91946 smp_mb(); /* List handling before counting for rcu_barrier(). */
91947 rdp->qlen_lazy -= count_lazy;
91948- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
91949+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
91950 rdp->n_cbs_invoked += count;
91951
91952 /* Reinstate batch limit if we have worked down the excess. */
91953@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
91954 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
91955 return; /* Someone beat us to it. */
91956 }
91957- ACCESS_ONCE(rsp->gp_flags) =
91958+ ACCESS_ONCE_RW(rsp->gp_flags) =
91959 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
91960 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
91961 rcu_gp_kthread_wake(rsp);
91962@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
91963 /*
91964 * Do RCU core processing for the current CPU.
91965 */
91966-static void rcu_process_callbacks(struct softirq_action *unused)
91967+static void rcu_process_callbacks(void)
91968 {
91969 struct rcu_state *rsp;
91970
91971@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
91972 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
91973 if (debug_rcu_head_queue(head)) {
91974 /* Probable double call_rcu(), so leak the callback. */
91975- ACCESS_ONCE(head->func) = rcu_leak_callback;
91976+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
91977 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
91978 return;
91979 }
91980@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
91981 local_irq_restore(flags);
91982 return;
91983 }
91984- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
91985+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
91986 if (lazy)
91987 rdp->qlen_lazy++;
91988 else
91989@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
91990 * counter wrap on a 32-bit system. Quite a few more CPUs would of
91991 * course be required on a 64-bit system.
91992 */
91993- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
91994+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
91995 (ulong)atomic_long_read(&rsp->expedited_done) +
91996 ULONG_MAX / 8)) {
91997 synchronize_sched();
91998- atomic_long_inc(&rsp->expedited_wrap);
91999+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92000 return;
92001 }
92002
92003@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
92004 * Take a ticket. Note that atomic_inc_return() implies a
92005 * full memory barrier.
92006 */
92007- snap = atomic_long_inc_return(&rsp->expedited_start);
92008+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92009 firstsnap = snap;
92010 if (!try_get_online_cpus()) {
92011 /* CPU hotplug operation in flight, fall back to normal GP. */
92012 wait_rcu_gp(call_rcu_sched);
92013- atomic_long_inc(&rsp->expedited_normal);
92014+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92015 return;
92016 }
92017 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92018@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
92019 for_each_cpu(cpu, cm) {
92020 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
92021
92022- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92023+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92024 cpumask_clear_cpu(cpu, cm);
92025 }
92026 if (cpumask_weight(cm) == 0)
92027@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
92028 synchronize_sched_expedited_cpu_stop,
92029 NULL) == -EAGAIN) {
92030 put_online_cpus();
92031- atomic_long_inc(&rsp->expedited_tryfail);
92032+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92033
92034 /* Check to see if someone else did our work for us. */
92035 s = atomic_long_read(&rsp->expedited_done);
92036 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92037 /* ensure test happens before caller kfree */
92038 smp_mb__before_atomic(); /* ^^^ */
92039- atomic_long_inc(&rsp->expedited_workdone1);
92040+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92041 free_cpumask_var(cm);
92042 return;
92043 }
92044@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
92045 udelay(trycount * num_online_cpus());
92046 } else {
92047 wait_rcu_gp(call_rcu_sched);
92048- atomic_long_inc(&rsp->expedited_normal);
92049+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92050 free_cpumask_var(cm);
92051 return;
92052 }
92053@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
92054 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92055 /* ensure test happens before caller kfree */
92056 smp_mb__before_atomic(); /* ^^^ */
92057- atomic_long_inc(&rsp->expedited_workdone2);
92058+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92059 free_cpumask_var(cm);
92060 return;
92061 }
92062@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
92063 if (!try_get_online_cpus()) {
92064 /* CPU hotplug operation in flight, use normal GP. */
92065 wait_rcu_gp(call_rcu_sched);
92066- atomic_long_inc(&rsp->expedited_normal);
92067+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92068 free_cpumask_var(cm);
92069 return;
92070 }
92071- snap = atomic_long_read(&rsp->expedited_start);
92072+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92073 smp_mb(); /* ensure read is before try_stop_cpus(). */
92074 }
92075- atomic_long_inc(&rsp->expedited_stoppedcpus);
92076+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92077
92078 all_cpus_idle:
92079 free_cpumask_var(cm);
92080@@ -3072,16 +3072,16 @@ all_cpus_idle:
92081 * than we did already did their update.
92082 */
92083 do {
92084- atomic_long_inc(&rsp->expedited_done_tries);
92085+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92086 s = atomic_long_read(&rsp->expedited_done);
92087 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92088 /* ensure test happens before caller kfree */
92089 smp_mb__before_atomic(); /* ^^^ */
92090- atomic_long_inc(&rsp->expedited_done_lost);
92091+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92092 break;
92093 }
92094 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92095- atomic_long_inc(&rsp->expedited_done_exit);
92096+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92097
92098 put_online_cpus();
92099 }
92100@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92101 * ACCESS_ONCE() to prevent the compiler from speculating
92102 * the increment to precede the early-exit check.
92103 */
92104- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92105+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92106 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92107 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92108 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92109@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92110
92111 /* Increment ->n_barrier_done to prevent duplicate work. */
92112 smp_mb(); /* Keep increment after above mechanism. */
92113- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92114+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92115 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92116 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92117 smp_mb(); /* Keep increment before caller's subsequent code. */
92118@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92119 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92120 init_callback_list(rdp);
92121 rdp->qlen_lazy = 0;
92122- ACCESS_ONCE(rdp->qlen) = 0;
92123+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92124 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92125 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92126- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92127+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92128 rdp->cpu = cpu;
92129 rdp->rsp = rsp;
92130 rcu_boot_init_nocb_percpu_data(rdp);
92131@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92132 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92133 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92134 rcu_sysidle_init_percpu_data(rdp->dynticks);
92135- atomic_set(&rdp->dynticks->dynticks,
92136- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92137+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92138+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92139 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92140
92141 /* Add CPU to rcu_node bitmasks. */
92142diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92143index 8e7b184..9c55768 100644
92144--- a/kernel/rcu/tree.h
92145+++ b/kernel/rcu/tree.h
92146@@ -87,11 +87,11 @@ struct rcu_dynticks {
92147 long long dynticks_nesting; /* Track irq/process nesting level. */
92148 /* Process level is worth LLONG_MAX/2. */
92149 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92150- atomic_t dynticks; /* Even value for idle, else odd. */
92151+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92152 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92153 long long dynticks_idle_nesting;
92154 /* irq/process nesting level from idle. */
92155- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92156+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92157 /* "Idle" excludes userspace execution. */
92158 unsigned long dynticks_idle_jiffies;
92159 /* End of last non-NMI non-idle period. */
92160@@ -466,17 +466,17 @@ struct rcu_state {
92161 /* _rcu_barrier(). */
92162 /* End of fields guarded by barrier_mutex. */
92163
92164- atomic_long_t expedited_start; /* Starting ticket. */
92165- atomic_long_t expedited_done; /* Done ticket. */
92166- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92167- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92168- atomic_long_t expedited_workdone1; /* # done by others #1. */
92169- atomic_long_t expedited_workdone2; /* # done by others #2. */
92170- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92171- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92172- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92173- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92174- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92175+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92176+ atomic_long_t expedited_done; /* Done ticket. */
92177+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92178+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92179+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92180+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92181+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92182+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92183+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92184+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92185+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92186
92187 unsigned long jiffies_force_qs; /* Time at which to invoke */
92188 /* force_quiescent_state(). */
92189diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92190index 3ec85cb..3687925 100644
92191--- a/kernel/rcu/tree_plugin.h
92192+++ b/kernel/rcu/tree_plugin.h
92193@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92194 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92195 {
92196 return !rcu_preempted_readers_exp(rnp) &&
92197- ACCESS_ONCE(rnp->expmask) == 0;
92198+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92199 }
92200
92201 /*
92202@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
92203
92204 /* Clean up and exit. */
92205 smp_mb(); /* ensure expedited GP seen before counter increment. */
92206- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92207+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92208 sync_rcu_preempt_exp_count + 1;
92209 unlock_mb_ret:
92210 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92211@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92212 free_cpumask_var(cm);
92213 }
92214
92215-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92216+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92217 .store = &rcu_cpu_kthread_task,
92218 .thread_should_run = rcu_cpu_kthread_should_run,
92219 .thread_fn = rcu_cpu_kthread,
92220@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92221 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92222 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92223 cpu, ticks_value, ticks_title,
92224- atomic_read(&rdtp->dynticks) & 0xfff,
92225+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92226 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92227 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92228 fast_no_hz);
92229@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92230 return;
92231 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92232 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92233- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92234+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92235 wake_up(&rdp_leader->nocb_wq);
92236 }
92237 }
92238@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92239
92240 /* Enqueue the callback on the nocb list and update counts. */
92241 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92242- ACCESS_ONCE(*old_rhpp) = rhp;
92243+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92244 atomic_long_add(rhcount, &rdp->nocb_q_count);
92245 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92246 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92247@@ -2286,7 +2286,7 @@ wait_again:
92248 continue; /* No CBs here, try next follower. */
92249
92250 /* Move callbacks to wait-for-GP list, which is empty. */
92251- ACCESS_ONCE(rdp->nocb_head) = NULL;
92252+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92253 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92254 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92255 rdp->nocb_gp_count_lazy =
92256@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
92257 list = ACCESS_ONCE(rdp->nocb_follower_head);
92258 BUG_ON(!list);
92259 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92260- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92261+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92262 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92263 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92264 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92265@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
92266 list = next;
92267 }
92268 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92269- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92270- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
92271+ ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92272+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
92273 rdp->nocb_p_count_lazy - cl;
92274 rdp->n_nocbs_invoked += c;
92275 }
92276@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92277 if (!rcu_nocb_need_deferred_wakeup(rdp))
92278 return;
92279 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92280- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92281+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92282 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92283 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92284 }
92285@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92286 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92287 "rcuo%c/%d", rsp->abbr, cpu);
92288 BUG_ON(IS_ERR(t));
92289- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
92290+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
92291 }
92292
92293 /*
92294@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
92295
92296 /* Record start of fully idle period. */
92297 j = jiffies;
92298- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92299+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92300 smp_mb__before_atomic();
92301- atomic_inc(&rdtp->dynticks_idle);
92302+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92303 smp_mb__after_atomic();
92304- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92305+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92306 }
92307
92308 /*
92309@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
92310
92311 /* Record end of idle period. */
92312 smp_mb__before_atomic();
92313- atomic_inc(&rdtp->dynticks_idle);
92314+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92315 smp_mb__after_atomic();
92316- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92317+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92318
92319 /*
92320 * If we are the timekeeping CPU, we are permitted to be non-idle
92321@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92322 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92323
92324 /* Pick up current idle and NMI-nesting counter and check. */
92325- cur = atomic_read(&rdtp->dynticks_idle);
92326+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92327 if (cur & 0x1) {
92328 *isidle = false; /* We are not idle! */
92329 return;
92330@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
92331 case RCU_SYSIDLE_NOT:
92332
92333 /* First time all are idle, so note a short idle period. */
92334- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92335+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92336 break;
92337
92338 case RCU_SYSIDLE_SHORT:
92339@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
92340 {
92341 smp_mb();
92342 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92343- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92344+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92345 }
92346
92347 /*
92348@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92349 smp_mb(); /* grace period precedes setting inuse. */
92350
92351 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92352- ACCESS_ONCE(rshp->inuse) = 0;
92353+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92354 }
92355
92356 /*
92357@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
92358 static void rcu_dynticks_task_enter(void)
92359 {
92360 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92361- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
92362+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
92363 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92364 }
92365
92366@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
92367 static void rcu_dynticks_task_exit(void)
92368 {
92369 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92370- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
92371+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
92372 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92373 }
92374diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92375index 5cdc62e..cc52e88 100644
92376--- a/kernel/rcu/tree_trace.c
92377+++ b/kernel/rcu/tree_trace.c
92378@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92379 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92380 rdp->passed_quiesce, rdp->qs_pending);
92381 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92382- atomic_read(&rdp->dynticks->dynticks),
92383+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92384 rdp->dynticks->dynticks_nesting,
92385 rdp->dynticks->dynticks_nmi_nesting,
92386 rdp->dynticks_fqs);
92387@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92388 struct rcu_state *rsp = (struct rcu_state *)m->private;
92389
92390 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92391- atomic_long_read(&rsp->expedited_start),
92392+ atomic_long_read_unchecked(&rsp->expedited_start),
92393 atomic_long_read(&rsp->expedited_done),
92394- atomic_long_read(&rsp->expedited_wrap),
92395- atomic_long_read(&rsp->expedited_tryfail),
92396- atomic_long_read(&rsp->expedited_workdone1),
92397- atomic_long_read(&rsp->expedited_workdone2),
92398- atomic_long_read(&rsp->expedited_normal),
92399- atomic_long_read(&rsp->expedited_stoppedcpus),
92400- atomic_long_read(&rsp->expedited_done_tries),
92401- atomic_long_read(&rsp->expedited_done_lost),
92402- atomic_long_read(&rsp->expedited_done_exit));
92403+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92404+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92405+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92406+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92407+ atomic_long_read_unchecked(&rsp->expedited_normal),
92408+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92409+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92410+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92411+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92412 return 0;
92413 }
92414
92415diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92416index e0d31a3..f4dafe3 100644
92417--- a/kernel/rcu/update.c
92418+++ b/kernel/rcu/update.c
92419@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
92420 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92421 */
92422 if (till_stall_check < 3) {
92423- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92424+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92425 till_stall_check = 3;
92426 } else if (till_stall_check > 300) {
92427- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92428+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92429 till_stall_check = 300;
92430 }
92431 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92432@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
92433 !ACCESS_ONCE(t->on_rq) ||
92434 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
92435 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
92436- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
92437+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
92438 list_del_init(&t->rcu_tasks_holdout_list);
92439 put_task_struct(t);
92440 return;
92441@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
92442 !is_idle_task(t)) {
92443 get_task_struct(t);
92444 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
92445- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
92446+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
92447 list_add(&t->rcu_tasks_holdout_list,
92448 &rcu_tasks_holdouts);
92449 }
92450@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
92451 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
92452 BUG_ON(IS_ERR(t));
92453 smp_mb(); /* Ensure others see full kthread. */
92454- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
92455+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
92456 mutex_unlock(&rcu_tasks_kthread_mutex);
92457 }
92458
92459diff --git a/kernel/resource.c b/kernel/resource.c
92460index 0bcebff..e7cd5b2 100644
92461--- a/kernel/resource.c
92462+++ b/kernel/resource.c
92463@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92464
92465 static int __init ioresources_init(void)
92466 {
92467+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92468+#ifdef CONFIG_GRKERNSEC_PROC_USER
92469+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92470+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92471+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92472+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92473+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92474+#endif
92475+#else
92476 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92477 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92478+#endif
92479 return 0;
92480 }
92481 __initcall(ioresources_init);
92482diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92483index 8a2e230..6020954 100644
92484--- a/kernel/sched/auto_group.c
92485+++ b/kernel/sched/auto_group.c
92486@@ -11,7 +11,7 @@
92487
92488 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92489 static struct autogroup autogroup_default;
92490-static atomic_t autogroup_seq_nr;
92491+static atomic_unchecked_t autogroup_seq_nr;
92492
92493 void __init autogroup_init(struct task_struct *init_task)
92494 {
92495@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92496
92497 kref_init(&ag->kref);
92498 init_rwsem(&ag->lock);
92499- ag->id = atomic_inc_return(&autogroup_seq_nr);
92500+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92501 ag->tg = tg;
92502 #ifdef CONFIG_RT_GROUP_SCHED
92503 /*
92504diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92505index 607f852..486bc87 100644
92506--- a/kernel/sched/completion.c
92507+++ b/kernel/sched/completion.c
92508@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92509 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92510 * or number of jiffies left till timeout) if completed.
92511 */
92512-long __sched
92513+long __sched __intentional_overflow(-1)
92514 wait_for_completion_interruptible_timeout(struct completion *x,
92515 unsigned long timeout)
92516 {
92517@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92518 *
92519 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92520 */
92521-int __sched wait_for_completion_killable(struct completion *x)
92522+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92523 {
92524 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92525 if (t == -ERESTARTSYS)
92526@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92527 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92528 * or number of jiffies left till timeout) if completed.
92529 */
92530-long __sched
92531+long __sched __intentional_overflow(-1)
92532 wait_for_completion_killable_timeout(struct completion *x,
92533 unsigned long timeout)
92534 {
92535diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92536index 5eab11d..537f3b6 100644
92537--- a/kernel/sched/core.c
92538+++ b/kernel/sched/core.c
92539@@ -1897,7 +1897,7 @@ void set_numabalancing_state(bool enabled)
92540 int sysctl_numa_balancing(struct ctl_table *table, int write,
92541 void __user *buffer, size_t *lenp, loff_t *ppos)
92542 {
92543- struct ctl_table t;
92544+ ctl_table_no_const t;
92545 int err;
92546 int state = numabalancing_enabled;
92547
92548@@ -2347,8 +2347,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92549 next->active_mm = oldmm;
92550 atomic_inc(&oldmm->mm_count);
92551 enter_lazy_tlb(oldmm, next);
92552- } else
92553+ } else {
92554 switch_mm(oldmm, mm, next);
92555+ populate_stack();
92556+ }
92557
92558 if (!prev->mm) {
92559 prev->active_mm = NULL;
92560@@ -3147,6 +3149,8 @@ int can_nice(const struct task_struct *p, const int nice)
92561 /* convert nice value [19,-20] to rlimit style value [1,40] */
92562 int nice_rlim = nice_to_rlimit(nice);
92563
92564+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92565+
92566 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92567 capable(CAP_SYS_NICE));
92568 }
92569@@ -3173,7 +3177,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92570 nice = task_nice(current) + increment;
92571
92572 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92573- if (increment < 0 && !can_nice(current, nice))
92574+ if (increment < 0 && (!can_nice(current, nice) ||
92575+ gr_handle_chroot_nice()))
92576 return -EPERM;
92577
92578 retval = security_task_setnice(current, nice);
92579@@ -3468,6 +3473,7 @@ recheck:
92580 if (policy != p->policy && !rlim_rtprio)
92581 return -EPERM;
92582
92583+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92584 /* can't increase priority */
92585 if (attr->sched_priority > p->rt_priority &&
92586 attr->sched_priority > rlim_rtprio)
92587@@ -4968,6 +4974,7 @@ void idle_task_exit(void)
92588
92589 if (mm != &init_mm) {
92590 switch_mm(mm, &init_mm, current);
92591+ populate_stack();
92592 finish_arch_post_lock_switch();
92593 }
92594 mmdrop(mm);
92595@@ -5063,7 +5070,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92596
92597 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92598
92599-static struct ctl_table sd_ctl_dir[] = {
92600+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92601 {
92602 .procname = "sched_domain",
92603 .mode = 0555,
92604@@ -5080,17 +5087,17 @@ static struct ctl_table sd_ctl_root[] = {
92605 {}
92606 };
92607
92608-static struct ctl_table *sd_alloc_ctl_entry(int n)
92609+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92610 {
92611- struct ctl_table *entry =
92612+ ctl_table_no_const *entry =
92613 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92614
92615 return entry;
92616 }
92617
92618-static void sd_free_ctl_entry(struct ctl_table **tablep)
92619+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92620 {
92621- struct ctl_table *entry;
92622+ ctl_table_no_const *entry;
92623
92624 /*
92625 * In the intermediate directories, both the child directory and
92626@@ -5098,22 +5105,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92627 * will always be set. In the lowest directory the names are
92628 * static strings and all have proc handlers.
92629 */
92630- for (entry = *tablep; entry->mode; entry++) {
92631- if (entry->child)
92632- sd_free_ctl_entry(&entry->child);
92633+ for (entry = tablep; entry->mode; entry++) {
92634+ if (entry->child) {
92635+ sd_free_ctl_entry(entry->child);
92636+ pax_open_kernel();
92637+ entry->child = NULL;
92638+ pax_close_kernel();
92639+ }
92640 if (entry->proc_handler == NULL)
92641 kfree(entry->procname);
92642 }
92643
92644- kfree(*tablep);
92645- *tablep = NULL;
92646+ kfree(tablep);
92647 }
92648
92649 static int min_load_idx = 0;
92650 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92651
92652 static void
92653-set_table_entry(struct ctl_table *entry,
92654+set_table_entry(ctl_table_no_const *entry,
92655 const char *procname, void *data, int maxlen,
92656 umode_t mode, proc_handler *proc_handler,
92657 bool load_idx)
92658@@ -5133,7 +5143,7 @@ set_table_entry(struct ctl_table *entry,
92659 static struct ctl_table *
92660 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92661 {
92662- struct ctl_table *table = sd_alloc_ctl_entry(14);
92663+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92664
92665 if (table == NULL)
92666 return NULL;
92667@@ -5171,9 +5181,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92668 return table;
92669 }
92670
92671-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92672+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92673 {
92674- struct ctl_table *entry, *table;
92675+ ctl_table_no_const *entry, *table;
92676 struct sched_domain *sd;
92677 int domain_num = 0, i;
92678 char buf[32];
92679@@ -5200,11 +5210,13 @@ static struct ctl_table_header *sd_sysctl_header;
92680 static void register_sched_domain_sysctl(void)
92681 {
92682 int i, cpu_num = num_possible_cpus();
92683- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92684+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92685 char buf[32];
92686
92687 WARN_ON(sd_ctl_dir[0].child);
92688+ pax_open_kernel();
92689 sd_ctl_dir[0].child = entry;
92690+ pax_close_kernel();
92691
92692 if (entry == NULL)
92693 return;
92694@@ -5227,8 +5239,12 @@ static void unregister_sched_domain_sysctl(void)
92695 if (sd_sysctl_header)
92696 unregister_sysctl_table(sd_sysctl_header);
92697 sd_sysctl_header = NULL;
92698- if (sd_ctl_dir[0].child)
92699- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92700+ if (sd_ctl_dir[0].child) {
92701+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92702+ pax_open_kernel();
92703+ sd_ctl_dir[0].child = NULL;
92704+ pax_close_kernel();
92705+ }
92706 }
92707 #else
92708 static void register_sched_domain_sysctl(void)
92709diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92710index fe331fc..29d620e 100644
92711--- a/kernel/sched/fair.c
92712+++ b/kernel/sched/fair.c
92713@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92714
92715 static void reset_ptenuma_scan(struct task_struct *p)
92716 {
92717- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92718+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92719 p->mm->numa_scan_offset = 0;
92720 }
92721
92722@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92723 * run_rebalance_domains is triggered when needed from the scheduler tick.
92724 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92725 */
92726-static void run_rebalance_domains(struct softirq_action *h)
92727+static __latent_entropy void run_rebalance_domains(void)
92728 {
92729 struct rq *this_rq = this_rq();
92730 enum cpu_idle_type idle = this_rq->idle_balance ?
92731diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92732index 9a2a45c..bb91ace 100644
92733--- a/kernel/sched/sched.h
92734+++ b/kernel/sched/sched.h
92735@@ -1182,7 +1182,7 @@ struct sched_class {
92736 #ifdef CONFIG_FAIR_GROUP_SCHED
92737 void (*task_move_group) (struct task_struct *p, int on_rq);
92738 #endif
92739-};
92740+} __do_const;
92741
92742 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92743 {
92744diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92745index 4ef9687..4f44028 100644
92746--- a/kernel/seccomp.c
92747+++ b/kernel/seccomp.c
92748@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
92749
92750 switch (action) {
92751 case SECCOMP_RET_ERRNO:
92752- /* Set the low-order 16-bits as a errno. */
92753+ /* Set low-order bits as an errno, capped at MAX_ERRNO. */
92754+ if (data > MAX_ERRNO)
92755+ data = MAX_ERRNO;
92756 syscall_set_return_value(current, task_pt_regs(current),
92757 -data, 0);
92758 goto skip;
92759diff --git a/kernel/signal.c b/kernel/signal.c
92760index 16a30529..25ad033 100644
92761--- a/kernel/signal.c
92762+++ b/kernel/signal.c
92763@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92764
92765 int print_fatal_signals __read_mostly;
92766
92767-static void __user *sig_handler(struct task_struct *t, int sig)
92768+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92769 {
92770 return t->sighand->action[sig - 1].sa.sa_handler;
92771 }
92772
92773-static int sig_handler_ignored(void __user *handler, int sig)
92774+static int sig_handler_ignored(__sighandler_t handler, int sig)
92775 {
92776 /* Is it explicitly or implicitly ignored? */
92777 return handler == SIG_IGN ||
92778@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92779
92780 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92781 {
92782- void __user *handler;
92783+ __sighandler_t handler;
92784
92785 handler = sig_handler(t, sig);
92786
92787@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92788 atomic_inc(&user->sigpending);
92789 rcu_read_unlock();
92790
92791+ if (!override_rlimit)
92792+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92793+
92794 if (override_rlimit ||
92795 atomic_read(&user->sigpending) <=
92796 task_rlimit(t, RLIMIT_SIGPENDING)) {
92797@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92798
92799 int unhandled_signal(struct task_struct *tsk, int sig)
92800 {
92801- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92802+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92803 if (is_global_init(tsk))
92804 return 1;
92805 if (handler != SIG_IGN && handler != SIG_DFL)
92806@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92807 }
92808 }
92809
92810+ /* allow glibc communication via tgkill to other threads in our
92811+ thread group */
92812+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92813+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92814+ && gr_handle_signal(t, sig))
92815+ return -EPERM;
92816+
92817 return security_task_kill(t, info, sig, 0);
92818 }
92819
92820@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92821 return send_signal(sig, info, p, 1);
92822 }
92823
92824-static int
92825+int
92826 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92827 {
92828 return send_signal(sig, info, t, 0);
92829@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92830 unsigned long int flags;
92831 int ret, blocked, ignored;
92832 struct k_sigaction *action;
92833+ int is_unhandled = 0;
92834
92835 spin_lock_irqsave(&t->sighand->siglock, flags);
92836 action = &t->sighand->action[sig-1];
92837@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92838 }
92839 if (action->sa.sa_handler == SIG_DFL)
92840 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92841+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92842+ is_unhandled = 1;
92843 ret = specific_send_sig_info(sig, info, t);
92844 spin_unlock_irqrestore(&t->sighand->siglock, flags);
92845
92846+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
92847+ normal operation */
92848+ if (is_unhandled) {
92849+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
92850+ gr_handle_crash(t, sig);
92851+ }
92852+
92853 return ret;
92854 }
92855
92856@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92857 ret = check_kill_permission(sig, info, p);
92858 rcu_read_unlock();
92859
92860- if (!ret && sig)
92861+ if (!ret && sig) {
92862 ret = do_send_sig_info(sig, info, p, true);
92863+ if (!ret)
92864+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
92865+ }
92866
92867 return ret;
92868 }
92869@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
92870 int error = -ESRCH;
92871
92872 rcu_read_lock();
92873- p = find_task_by_vpid(pid);
92874+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
92875+ /* allow glibc communication via tgkill to other threads in our
92876+ thread group */
92877+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
92878+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
92879+ p = find_task_by_vpid_unrestricted(pid);
92880+ else
92881+#endif
92882+ p = find_task_by_vpid(pid);
92883 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
92884 error = check_kill_permission(sig, info, p);
92885 /*
92886@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
92887 }
92888 seg = get_fs();
92889 set_fs(KERNEL_DS);
92890- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
92891- (stack_t __force __user *) &uoss,
92892+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
92893+ (stack_t __force_user *) &uoss,
92894 compat_user_stack_pointer());
92895 set_fs(seg);
92896 if (ret >= 0 && uoss_ptr) {
92897diff --git a/kernel/smpboot.c b/kernel/smpboot.c
92898index 40190f2..8861d40 100644
92899--- a/kernel/smpboot.c
92900+++ b/kernel/smpboot.c
92901@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
92902 }
92903 smpboot_unpark_thread(plug_thread, cpu);
92904 }
92905- list_add(&plug_thread->list, &hotplug_threads);
92906+ pax_list_add(&plug_thread->list, &hotplug_threads);
92907 out:
92908 mutex_unlock(&smpboot_threads_lock);
92909 put_online_cpus();
92910@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
92911 {
92912 get_online_cpus();
92913 mutex_lock(&smpboot_threads_lock);
92914- list_del(&plug_thread->list);
92915+ pax_list_del(&plug_thread->list);
92916 smpboot_destroy_threads(plug_thread);
92917 mutex_unlock(&smpboot_threads_lock);
92918 put_online_cpus();
92919diff --git a/kernel/softirq.c b/kernel/softirq.c
92920index c497fcd..e8f90a9 100644
92921--- a/kernel/softirq.c
92922+++ b/kernel/softirq.c
92923@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
92924 EXPORT_SYMBOL(irq_stat);
92925 #endif
92926
92927-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
92928+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
92929
92930 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
92931
92932@@ -266,7 +266,7 @@ restart:
92933 kstat_incr_softirqs_this_cpu(vec_nr);
92934
92935 trace_softirq_entry(vec_nr);
92936- h->action(h);
92937+ h->action();
92938 trace_softirq_exit(vec_nr);
92939 if (unlikely(prev_count != preempt_count())) {
92940 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
92941@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
92942 or_softirq_pending(1UL << nr);
92943 }
92944
92945-void open_softirq(int nr, void (*action)(struct softirq_action *))
92946+void __init open_softirq(int nr, void (*action)(void))
92947 {
92948 softirq_vec[nr].action = action;
92949 }
92950@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
92951 }
92952 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
92953
92954-static void tasklet_action(struct softirq_action *a)
92955+static void tasklet_action(void)
92956 {
92957 struct tasklet_struct *list;
92958
92959@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
92960 }
92961 }
92962
92963-static void tasklet_hi_action(struct softirq_action *a)
92964+static __latent_entropy void tasklet_hi_action(void)
92965 {
92966 struct tasklet_struct *list;
92967
92968@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
92969 .notifier_call = cpu_callback
92970 };
92971
92972-static struct smp_hotplug_thread softirq_threads = {
92973+static struct smp_hotplug_thread softirq_threads __read_only = {
92974 .store = &ksoftirqd,
92975 .thread_should_run = ksoftirqd_should_run,
92976 .thread_fn = run_ksoftirqd,
92977diff --git a/kernel/sys.c b/kernel/sys.c
92978index ea9c881..2194af5 100644
92979--- a/kernel/sys.c
92980+++ b/kernel/sys.c
92981@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
92982 error = -EACCES;
92983 goto out;
92984 }
92985+
92986+ if (gr_handle_chroot_setpriority(p, niceval)) {
92987+ error = -EACCES;
92988+ goto out;
92989+ }
92990+
92991 no_nice = security_task_setnice(p, niceval);
92992 if (no_nice) {
92993 error = no_nice;
92994@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
92995 goto error;
92996 }
92997
92998+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
92999+ goto error;
93000+
93001+ if (!gid_eq(new->gid, old->gid)) {
93002+ /* make sure we generate a learn log for what will
93003+ end up being a role transition after a full-learning
93004+ policy is generated
93005+ CAP_SETGID is required to perform a transition
93006+ we may not log a CAP_SETGID check above, e.g.
93007+ in the case where new rgid = old egid
93008+ */
93009+ gr_learn_cap(current, new, CAP_SETGID);
93010+ }
93011+
93012 if (rgid != (gid_t) -1 ||
93013 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93014 new->sgid = new->egid;
93015@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93016 old = current_cred();
93017
93018 retval = -EPERM;
93019+
93020+ if (gr_check_group_change(kgid, kgid, kgid))
93021+ goto error;
93022+
93023 if (ns_capable(old->user_ns, CAP_SETGID))
93024 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93025 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93026@@ -411,7 +435,7 @@ error:
93027 /*
93028 * change the user struct in a credentials set to match the new UID
93029 */
93030-static int set_user(struct cred *new)
93031+int set_user(struct cred *new)
93032 {
93033 struct user_struct *new_user;
93034
93035@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93036 goto error;
93037 }
93038
93039+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93040+ goto error;
93041+
93042 if (!uid_eq(new->uid, old->uid)) {
93043+ /* make sure we generate a learn log for what will
93044+ end up being a role transition after a full-learning
93045+ policy is generated
93046+ CAP_SETUID is required to perform a transition
93047+ we may not log a CAP_SETUID check above, e.g.
93048+ in the case where new ruid = old euid
93049+ */
93050+ gr_learn_cap(current, new, CAP_SETUID);
93051 retval = set_user(new);
93052 if (retval < 0)
93053 goto error;
93054@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93055 old = current_cred();
93056
93057 retval = -EPERM;
93058+
93059+ if (gr_check_crash_uid(kuid))
93060+ goto error;
93061+ if (gr_check_user_change(kuid, kuid, kuid))
93062+ goto error;
93063+
93064 if (ns_capable(old->user_ns, CAP_SETUID)) {
93065 new->suid = new->uid = kuid;
93066 if (!uid_eq(kuid, old->uid)) {
93067@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93068 goto error;
93069 }
93070
93071+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93072+ goto error;
93073+
93074 if (ruid != (uid_t) -1) {
93075 new->uid = kruid;
93076 if (!uid_eq(kruid, old->uid)) {
93077@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93078 goto error;
93079 }
93080
93081+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93082+ goto error;
93083+
93084 if (rgid != (gid_t) -1)
93085 new->gid = krgid;
93086 if (egid != (gid_t) -1)
93087@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93088 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93089 ns_capable(old->user_ns, CAP_SETUID)) {
93090 if (!uid_eq(kuid, old->fsuid)) {
93091+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93092+ goto error;
93093+
93094 new->fsuid = kuid;
93095 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93096 goto change_okay;
93097 }
93098 }
93099
93100+error:
93101 abort_creds(new);
93102 return old_fsuid;
93103
93104@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93105 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93106 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93107 ns_capable(old->user_ns, CAP_SETGID)) {
93108+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93109+ goto error;
93110+
93111 if (!gid_eq(kgid, old->fsgid)) {
93112 new->fsgid = kgid;
93113 goto change_okay;
93114 }
93115 }
93116
93117+error:
93118 abort_creds(new);
93119 return old_fsgid;
93120
93121@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93122 return -EFAULT;
93123
93124 down_read(&uts_sem);
93125- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93126+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93127 __OLD_UTS_LEN);
93128 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93129- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93130+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93131 __OLD_UTS_LEN);
93132 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93133- error |= __copy_to_user(&name->release, &utsname()->release,
93134+ error |= __copy_to_user(name->release, &utsname()->release,
93135 __OLD_UTS_LEN);
93136 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93137- error |= __copy_to_user(&name->version, &utsname()->version,
93138+ error |= __copy_to_user(name->version, &utsname()->version,
93139 __OLD_UTS_LEN);
93140 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93141- error |= __copy_to_user(&name->machine, &utsname()->machine,
93142+ error |= __copy_to_user(name->machine, &utsname()->machine,
93143 __OLD_UTS_LEN);
93144 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93145 up_read(&uts_sem);
93146@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93147 */
93148 new_rlim->rlim_cur = 1;
93149 }
93150+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93151+ is changed to a lower value. Since tasks can be created by the same
93152+ user in between this limit change and an execve by this task, force
93153+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93154+ */
93155+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93156+ tsk->flags |= PF_NPROC_EXCEEDED;
93157 }
93158 if (!retval) {
93159 if (old_rlim)
93160diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93161index 137c7f6..eab3b1a 100644
93162--- a/kernel/sysctl.c
93163+++ b/kernel/sysctl.c
93164@@ -94,7 +94,6 @@
93165
93166
93167 #if defined(CONFIG_SYSCTL)
93168-
93169 /* External variables not in a header file. */
93170 extern int max_threads;
93171 extern int suid_dumpable;
93172@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93173
93174 /* Constants used for minimum and maximum */
93175 #ifdef CONFIG_LOCKUP_DETECTOR
93176-static int sixty = 60;
93177+static int sixty __read_only = 60;
93178 #endif
93179
93180-static int __maybe_unused neg_one = -1;
93181+static int __maybe_unused neg_one __read_only = -1;
93182
93183-static int zero;
93184-static int __maybe_unused one = 1;
93185-static int __maybe_unused two = 2;
93186-static int __maybe_unused four = 4;
93187-static unsigned long one_ul = 1;
93188-static int one_hundred = 100;
93189+static int zero __read_only = 0;
93190+static int __maybe_unused one __read_only = 1;
93191+static int __maybe_unused two __read_only = 2;
93192+static int __maybe_unused three __read_only = 3;
93193+static int __maybe_unused four __read_only = 4;
93194+static unsigned long one_ul __read_only = 1;
93195+static int one_hundred __read_only = 100;
93196 #ifdef CONFIG_PRINTK
93197-static int ten_thousand = 10000;
93198+static int ten_thousand __read_only = 10000;
93199 #endif
93200
93201 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93202@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93203 void __user *buffer, size_t *lenp, loff_t *ppos);
93204 #endif
93205
93206-#ifdef CONFIG_PRINTK
93207 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93208 void __user *buffer, size_t *lenp, loff_t *ppos);
93209-#endif
93210
93211 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93212 void __user *buffer, size_t *lenp, loff_t *ppos);
93213@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93214
93215 #endif
93216
93217+extern struct ctl_table grsecurity_table[];
93218+
93219 static struct ctl_table kern_table[];
93220 static struct ctl_table vm_table[];
93221 static struct ctl_table fs_table[];
93222@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93223 int sysctl_legacy_va_layout;
93224 #endif
93225
93226+#ifdef CONFIG_PAX_SOFTMODE
93227+static struct ctl_table pax_table[] = {
93228+ {
93229+ .procname = "softmode",
93230+ .data = &pax_softmode,
93231+ .maxlen = sizeof(unsigned int),
93232+ .mode = 0600,
93233+ .proc_handler = &proc_dointvec,
93234+ },
93235+
93236+ { }
93237+};
93238+#endif
93239+
93240 /* The default sysctl tables: */
93241
93242 static struct ctl_table sysctl_base_table[] = {
93243@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93244 #endif
93245
93246 static struct ctl_table kern_table[] = {
93247+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93248+ {
93249+ .procname = "grsecurity",
93250+ .mode = 0500,
93251+ .child = grsecurity_table,
93252+ },
93253+#endif
93254+
93255+#ifdef CONFIG_PAX_SOFTMODE
93256+ {
93257+ .procname = "pax",
93258+ .mode = 0500,
93259+ .child = pax_table,
93260+ },
93261+#endif
93262+
93263 {
93264 .procname = "sched_child_runs_first",
93265 .data = &sysctl_sched_child_runs_first,
93266@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93267 .data = &modprobe_path,
93268 .maxlen = KMOD_PATH_LEN,
93269 .mode = 0644,
93270- .proc_handler = proc_dostring,
93271+ .proc_handler = proc_dostring_modpriv,
93272 },
93273 {
93274 .procname = "modules_disabled",
93275@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93276 .extra1 = &zero,
93277 .extra2 = &one,
93278 },
93279+#endif
93280 {
93281 .procname = "kptr_restrict",
93282 .data = &kptr_restrict,
93283 .maxlen = sizeof(int),
93284 .mode = 0644,
93285 .proc_handler = proc_dointvec_minmax_sysadmin,
93286+#ifdef CONFIG_GRKERNSEC_HIDESYM
93287+ .extra1 = &two,
93288+#else
93289 .extra1 = &zero,
93290+#endif
93291 .extra2 = &two,
93292 },
93293-#endif
93294 {
93295 .procname = "ngroups_max",
93296 .data = &ngroups_max,
93297@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93298 */
93299 {
93300 .procname = "perf_event_paranoid",
93301- .data = &sysctl_perf_event_paranoid,
93302- .maxlen = sizeof(sysctl_perf_event_paranoid),
93303+ .data = &sysctl_perf_event_legitimately_concerned,
93304+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93305 .mode = 0644,
93306- .proc_handler = proc_dointvec,
93307+ /* go ahead, be a hero */
93308+ .proc_handler = proc_dointvec_minmax_sysadmin,
93309+ .extra1 = &neg_one,
93310+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93311+ .extra2 = &three,
93312+#else
93313+ .extra2 = &two,
93314+#endif
93315 },
93316 {
93317 .procname = "perf_event_mlock_kb",
93318@@ -1343,6 +1384,13 @@ static struct ctl_table vm_table[] = {
93319 .proc_handler = proc_dointvec_minmax,
93320 .extra1 = &zero,
93321 },
93322+ {
93323+ .procname = "heap_stack_gap",
93324+ .data = &sysctl_heap_stack_gap,
93325+ .maxlen = sizeof(sysctl_heap_stack_gap),
93326+ .mode = 0644,
93327+ .proc_handler = proc_doulongvec_minmax,
93328+ },
93329 #else
93330 {
93331 .procname = "nr_trim_pages",
93332@@ -1825,6 +1873,16 @@ int proc_dostring(struct ctl_table *table, int write,
93333 (char __user *)buffer, lenp, ppos);
93334 }
93335
93336+int proc_dostring_modpriv(struct ctl_table *table, int write,
93337+ void __user *buffer, size_t *lenp, loff_t *ppos)
93338+{
93339+ if (write && !capable(CAP_SYS_MODULE))
93340+ return -EPERM;
93341+
93342+ return _proc_do_string(table->data, table->maxlen, write,
93343+ buffer, lenp, ppos);
93344+}
93345+
93346 static size_t proc_skip_spaces(char **buf)
93347 {
93348 size_t ret;
93349@@ -1930,6 +1988,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93350 len = strlen(tmp);
93351 if (len > *size)
93352 len = *size;
93353+ if (len > sizeof(tmp))
93354+ len = sizeof(tmp);
93355 if (copy_to_user(*buf, tmp, len))
93356 return -EFAULT;
93357 *size -= len;
93358@@ -2107,7 +2167,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93359 static int proc_taint(struct ctl_table *table, int write,
93360 void __user *buffer, size_t *lenp, loff_t *ppos)
93361 {
93362- struct ctl_table t;
93363+ ctl_table_no_const t;
93364 unsigned long tmptaint = get_taint();
93365 int err;
93366
93367@@ -2135,7 +2195,6 @@ static int proc_taint(struct ctl_table *table, int write,
93368 return err;
93369 }
93370
93371-#ifdef CONFIG_PRINTK
93372 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93373 void __user *buffer, size_t *lenp, loff_t *ppos)
93374 {
93375@@ -2144,7 +2203,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93376
93377 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93378 }
93379-#endif
93380
93381 struct do_proc_dointvec_minmax_conv_param {
93382 int *min;
93383@@ -2704,6 +2762,12 @@ int proc_dostring(struct ctl_table *table, int write,
93384 return -ENOSYS;
93385 }
93386
93387+int proc_dostring_modpriv(struct ctl_table *table, int write,
93388+ void __user *buffer, size_t *lenp, loff_t *ppos)
93389+{
93390+ return -ENOSYS;
93391+}
93392+
93393 int proc_dointvec(struct ctl_table *table, int write,
93394 void __user *buffer, size_t *lenp, loff_t *ppos)
93395 {
93396@@ -2760,5 +2824,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93397 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93398 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93399 EXPORT_SYMBOL(proc_dostring);
93400+EXPORT_SYMBOL(proc_dostring_modpriv);
93401 EXPORT_SYMBOL(proc_doulongvec_minmax);
93402 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93403diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93404index 670fff8..a247812 100644
93405--- a/kernel/taskstats.c
93406+++ b/kernel/taskstats.c
93407@@ -28,9 +28,12 @@
93408 #include <linux/fs.h>
93409 #include <linux/file.h>
93410 #include <linux/pid_namespace.h>
93411+#include <linux/grsecurity.h>
93412 #include <net/genetlink.h>
93413 #include <linux/atomic.h>
93414
93415+extern int gr_is_taskstats_denied(int pid);
93416+
93417 /*
93418 * Maximum length of a cpumask that can be specified in
93419 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93420@@ -576,6 +579,9 @@ err:
93421
93422 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93423 {
93424+ if (gr_is_taskstats_denied(current->pid))
93425+ return -EACCES;
93426+
93427 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93428 return cmd_attr_register_cpumask(info);
93429 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93430diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93431index a7077d3..dd48a49 100644
93432--- a/kernel/time/alarmtimer.c
93433+++ b/kernel/time/alarmtimer.c
93434@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93435 struct platform_device *pdev;
93436 int error = 0;
93437 int i;
93438- struct k_clock alarm_clock = {
93439+ static struct k_clock alarm_clock = {
93440 .clock_getres = alarm_clock_getres,
93441 .clock_get = alarm_clock_get,
93442 .timer_create = alarm_timer_create,
93443diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93444index d8c724c..6b331a4 100644
93445--- a/kernel/time/hrtimer.c
93446+++ b/kernel/time/hrtimer.c
93447@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93448 local_irq_restore(flags);
93449 }
93450
93451-static void run_hrtimer_softirq(struct softirq_action *h)
93452+static __latent_entropy void run_hrtimer_softirq(void)
93453 {
93454 hrtimer_peek_ahead_timers();
93455 }
93456diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93457index a16b678..8c5bd9d 100644
93458--- a/kernel/time/posix-cpu-timers.c
93459+++ b/kernel/time/posix-cpu-timers.c
93460@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
93461
93462 static __init int init_posix_cpu_timers(void)
93463 {
93464- struct k_clock process = {
93465+ static struct k_clock process = {
93466 .clock_getres = process_cpu_clock_getres,
93467 .clock_get = process_cpu_clock_get,
93468 .timer_create = process_cpu_timer_create,
93469 .nsleep = process_cpu_nsleep,
93470 .nsleep_restart = process_cpu_nsleep_restart,
93471 };
93472- struct k_clock thread = {
93473+ static struct k_clock thread = {
93474 .clock_getres = thread_cpu_clock_getres,
93475 .clock_get = thread_cpu_clock_get,
93476 .timer_create = thread_cpu_timer_create,
93477diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93478index 31ea01f..7fc61ef 100644
93479--- a/kernel/time/posix-timers.c
93480+++ b/kernel/time/posix-timers.c
93481@@ -43,6 +43,7 @@
93482 #include <linux/hash.h>
93483 #include <linux/posix-clock.h>
93484 #include <linux/posix-timers.h>
93485+#include <linux/grsecurity.h>
93486 #include <linux/syscalls.h>
93487 #include <linux/wait.h>
93488 #include <linux/workqueue.h>
93489@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93490 * which we beg off on and pass to do_sys_settimeofday().
93491 */
93492
93493-static struct k_clock posix_clocks[MAX_CLOCKS];
93494+static struct k_clock *posix_clocks[MAX_CLOCKS];
93495
93496 /*
93497 * These ones are defined below.
93498@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93499 */
93500 static __init int init_posix_timers(void)
93501 {
93502- struct k_clock clock_realtime = {
93503+ static struct k_clock clock_realtime = {
93504 .clock_getres = hrtimer_get_res,
93505 .clock_get = posix_clock_realtime_get,
93506 .clock_set = posix_clock_realtime_set,
93507@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93508 .timer_get = common_timer_get,
93509 .timer_del = common_timer_del,
93510 };
93511- struct k_clock clock_monotonic = {
93512+ static struct k_clock clock_monotonic = {
93513 .clock_getres = hrtimer_get_res,
93514 .clock_get = posix_ktime_get_ts,
93515 .nsleep = common_nsleep,
93516@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93517 .timer_get = common_timer_get,
93518 .timer_del = common_timer_del,
93519 };
93520- struct k_clock clock_monotonic_raw = {
93521+ static struct k_clock clock_monotonic_raw = {
93522 .clock_getres = hrtimer_get_res,
93523 .clock_get = posix_get_monotonic_raw,
93524 };
93525- struct k_clock clock_realtime_coarse = {
93526+ static struct k_clock clock_realtime_coarse = {
93527 .clock_getres = posix_get_coarse_res,
93528 .clock_get = posix_get_realtime_coarse,
93529 };
93530- struct k_clock clock_monotonic_coarse = {
93531+ static struct k_clock clock_monotonic_coarse = {
93532 .clock_getres = posix_get_coarse_res,
93533 .clock_get = posix_get_monotonic_coarse,
93534 };
93535- struct k_clock clock_tai = {
93536+ static struct k_clock clock_tai = {
93537 .clock_getres = hrtimer_get_res,
93538 .clock_get = posix_get_tai,
93539 .nsleep = common_nsleep,
93540@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93541 .timer_get = common_timer_get,
93542 .timer_del = common_timer_del,
93543 };
93544- struct k_clock clock_boottime = {
93545+ static struct k_clock clock_boottime = {
93546 .clock_getres = hrtimer_get_res,
93547 .clock_get = posix_get_boottime,
93548 .nsleep = common_nsleep,
93549@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93550 return;
93551 }
93552
93553- posix_clocks[clock_id] = *new_clock;
93554+ posix_clocks[clock_id] = new_clock;
93555 }
93556 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93557
93558@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93559 return (id & CLOCKFD_MASK) == CLOCKFD ?
93560 &clock_posix_dynamic : &clock_posix_cpu;
93561
93562- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93563+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93564 return NULL;
93565- return &posix_clocks[id];
93566+ return posix_clocks[id];
93567 }
93568
93569 static int common_timer_create(struct k_itimer *new_timer)
93570@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93571 struct k_clock *kc = clockid_to_kclock(which_clock);
93572 struct k_itimer *new_timer;
93573 int error, new_timer_id;
93574- sigevent_t event;
93575+ sigevent_t event = { };
93576 int it_id_set = IT_ID_NOT_SET;
93577
93578 if (!kc)
93579@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93580 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93581 return -EFAULT;
93582
93583+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93584+ have their clock_set fptr set to a nosettime dummy function
93585+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93586+ call common_clock_set, which calls do_sys_settimeofday, which
93587+ we hook
93588+ */
93589+
93590 return kc->clock_set(which_clock, &new_tp);
93591 }
93592
93593diff --git a/kernel/time/time.c b/kernel/time/time.c
93594index 2c85b77..6530536 100644
93595--- a/kernel/time/time.c
93596+++ b/kernel/time/time.c
93597@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93598 return error;
93599
93600 if (tz) {
93601+ /* we log in do_settimeofday called below, so don't log twice
93602+ */
93603+ if (!tv)
93604+ gr_log_timechange();
93605+
93606 sys_tz = *tz;
93607 update_vsyscall_tz();
93608 if (firsttime) {
93609diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93610index 6a93185..288c331 100644
93611--- a/kernel/time/timekeeping.c
93612+++ b/kernel/time/timekeeping.c
93613@@ -15,6 +15,7 @@
93614 #include <linux/init.h>
93615 #include <linux/mm.h>
93616 #include <linux/sched.h>
93617+#include <linux/grsecurity.h>
93618 #include <linux/syscore_ops.h>
93619 #include <linux/clocksource.h>
93620 #include <linux/jiffies.h>
93621@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
93622 if (!timespec64_valid_strict(ts))
93623 return -EINVAL;
93624
93625+ gr_log_timechange();
93626+
93627 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93628 write_seqcount_begin(&tk_core.seq);
93629
93630diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93631index 2d3f5c5..7ed7dc5 100644
93632--- a/kernel/time/timer.c
93633+++ b/kernel/time/timer.c
93634@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
93635 /*
93636 * This function runs timers and the timer-tq in bottom half context.
93637 */
93638-static void run_timer_softirq(struct softirq_action *h)
93639+static __latent_entropy void run_timer_softirq(void)
93640 {
93641 struct tvec_base *base = __this_cpu_read(tvec_bases);
93642
93643@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
93644 *
93645 * In all cases the return value is guaranteed to be non-negative.
93646 */
93647-signed long __sched schedule_timeout(signed long timeout)
93648+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93649 {
93650 struct timer_list timer;
93651 unsigned long expire;
93652diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93653index 61ed862..3b52c65 100644
93654--- a/kernel/time/timer_list.c
93655+++ b/kernel/time/timer_list.c
93656@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93657
93658 static void print_name_offset(struct seq_file *m, void *sym)
93659 {
93660+#ifdef CONFIG_GRKERNSEC_HIDESYM
93661+ SEQ_printf(m, "<%p>", NULL);
93662+#else
93663 char symname[KSYM_NAME_LEN];
93664
93665 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93666 SEQ_printf(m, "<%pK>", sym);
93667 else
93668 SEQ_printf(m, "%s", symname);
93669+#endif
93670 }
93671
93672 static void
93673@@ -119,7 +123,11 @@ next_one:
93674 static void
93675 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93676 {
93677+#ifdef CONFIG_GRKERNSEC_HIDESYM
93678+ SEQ_printf(m, " .base: %p\n", NULL);
93679+#else
93680 SEQ_printf(m, " .base: %pK\n", base);
93681+#endif
93682 SEQ_printf(m, " .index: %d\n",
93683 base->index);
93684 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93685@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93686 {
93687 struct proc_dir_entry *pe;
93688
93689+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93690+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93691+#else
93692 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93693+#endif
93694 if (!pe)
93695 return -ENOMEM;
93696 return 0;
93697diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93698index 1fb08f2..ca4bb1e 100644
93699--- a/kernel/time/timer_stats.c
93700+++ b/kernel/time/timer_stats.c
93701@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93702 static unsigned long nr_entries;
93703 static struct entry entries[MAX_ENTRIES];
93704
93705-static atomic_t overflow_count;
93706+static atomic_unchecked_t overflow_count;
93707
93708 /*
93709 * The entries are in a hash-table, for fast lookup:
93710@@ -140,7 +140,7 @@ static void reset_entries(void)
93711 nr_entries = 0;
93712 memset(entries, 0, sizeof(entries));
93713 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93714- atomic_set(&overflow_count, 0);
93715+ atomic_set_unchecked(&overflow_count, 0);
93716 }
93717
93718 static struct entry *alloc_entry(void)
93719@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93720 if (likely(entry))
93721 entry->count++;
93722 else
93723- atomic_inc(&overflow_count);
93724+ atomic_inc_unchecked(&overflow_count);
93725
93726 out_unlock:
93727 raw_spin_unlock_irqrestore(lock, flags);
93728@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93729
93730 static void print_name_offset(struct seq_file *m, unsigned long addr)
93731 {
93732+#ifdef CONFIG_GRKERNSEC_HIDESYM
93733+ seq_printf(m, "<%p>", NULL);
93734+#else
93735 char symname[KSYM_NAME_LEN];
93736
93737 if (lookup_symbol_name(addr, symname) < 0)
93738- seq_printf(m, "<%p>", (void *)addr);
93739+ seq_printf(m, "<%pK>", (void *)addr);
93740 else
93741 seq_printf(m, "%s", symname);
93742+#endif
93743 }
93744
93745 static int tstats_show(struct seq_file *m, void *v)
93746@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93747
93748 seq_puts(m, "Timer Stats Version: v0.3\n");
93749 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93750- if (atomic_read(&overflow_count))
93751- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93752+ if (atomic_read_unchecked(&overflow_count))
93753+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93754 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93755
93756 for (i = 0; i < nr_entries; i++) {
93757@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93758 {
93759 struct proc_dir_entry *pe;
93760
93761+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93762+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93763+#else
93764 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93765+#endif
93766 if (!pe)
93767 return -ENOMEM;
93768 return 0;
93769diff --git a/kernel/torture.c b/kernel/torture.c
93770index dd70993..0bf694b 100644
93771--- a/kernel/torture.c
93772+++ b/kernel/torture.c
93773@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93774 mutex_lock(&fullstop_mutex);
93775 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93776 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93777- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93778+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93779 } else {
93780 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93781 }
93782@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
93783 if (!torture_must_stop()) {
93784 if (stutter > 1) {
93785 schedule_timeout_interruptible(stutter - 1);
93786- ACCESS_ONCE(stutter_pause_test) = 2;
93787+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93788 }
93789 schedule_timeout_interruptible(1);
93790- ACCESS_ONCE(stutter_pause_test) = 1;
93791+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93792 }
93793 if (!torture_must_stop())
93794 schedule_timeout_interruptible(stutter);
93795- ACCESS_ONCE(stutter_pause_test) = 0;
93796+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93797 torture_shutdown_absorb("torture_stutter");
93798 } while (!torture_must_stop());
93799 torture_kthread_stopping("torture_stutter");
93800@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
93801 schedule_timeout_uninterruptible(10);
93802 return true;
93803 }
93804- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93805+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93806 mutex_unlock(&fullstop_mutex);
93807 torture_shutdown_cleanup();
93808 torture_shuffle_cleanup();
93809diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93810index 483cecf..ac46091 100644
93811--- a/kernel/trace/blktrace.c
93812+++ b/kernel/trace/blktrace.c
93813@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93814 struct blk_trace *bt = filp->private_data;
93815 char buf[16];
93816
93817- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93818+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93819
93820 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93821 }
93822@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93823 return 1;
93824
93825 bt = buf->chan->private_data;
93826- atomic_inc(&bt->dropped);
93827+ atomic_inc_unchecked(&bt->dropped);
93828 return 0;
93829 }
93830
93831@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93832
93833 bt->dir = dir;
93834 bt->dev = dev;
93835- atomic_set(&bt->dropped, 0);
93836+ atomic_set_unchecked(&bt->dropped, 0);
93837 INIT_LIST_HEAD(&bt->running_list);
93838
93839 ret = -EIO;
93840diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93841index 224e768..8303c84 100644
93842--- a/kernel/trace/ftrace.c
93843+++ b/kernel/trace/ftrace.c
93844@@ -2372,12 +2372,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
93845 if (unlikely(ftrace_disabled))
93846 return 0;
93847
93848+ ret = ftrace_arch_code_modify_prepare();
93849+ FTRACE_WARN_ON(ret);
93850+ if (ret)
93851+ return 0;
93852+
93853 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
93854+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
93855 if (ret) {
93856 ftrace_bug(ret, rec);
93857- return 0;
93858 }
93859- return 1;
93860+ return ret ? 0 : 1;
93861 }
93862
93863 /*
93864@@ -4754,8 +4759,10 @@ static int ftrace_process_locs(struct module *mod,
93865 if (!count)
93866 return 0;
93867
93868+ pax_open_kernel();
93869 sort(start, count, sizeof(*start),
93870 ftrace_cmp_ips, ftrace_swap_ips);
93871+ pax_close_kernel();
93872
93873 start_pg = ftrace_allocate_pages(count);
93874 if (!start_pg)
93875@@ -5633,7 +5640,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
93876
93877 if (t->ret_stack == NULL) {
93878 atomic_set(&t->tracing_graph_pause, 0);
93879- atomic_set(&t->trace_overrun, 0);
93880+ atomic_set_unchecked(&t->trace_overrun, 0);
93881 t->curr_ret_stack = -1;
93882 /* Make sure the tasks see the -1 first: */
93883 smp_wmb();
93884@@ -5856,7 +5863,7 @@ static void
93885 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
93886 {
93887 atomic_set(&t->tracing_graph_pause, 0);
93888- atomic_set(&t->trace_overrun, 0);
93889+ atomic_set_unchecked(&t->trace_overrun, 0);
93890 t->ftrace_timestamp = 0;
93891 /* make curr_ret_stack visible before we add the ret_stack */
93892 smp_wmb();
93893diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
93894index d2e151c..b68c835 100644
93895--- a/kernel/trace/ring_buffer.c
93896+++ b/kernel/trace/ring_buffer.c
93897@@ -350,9 +350,9 @@ struct buffer_data_page {
93898 */
93899 struct buffer_page {
93900 struct list_head list; /* list of buffer pages */
93901- local_t write; /* index for next write */
93902+ local_unchecked_t write; /* index for next write */
93903 unsigned read; /* index for next read */
93904- local_t entries; /* entries on this page */
93905+ local_unchecked_t entries; /* entries on this page */
93906 unsigned long real_end; /* real end of data */
93907 struct buffer_data_page *page; /* Actual data page */
93908 };
93909@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
93910 unsigned long last_overrun;
93911 local_t entries_bytes;
93912 local_t entries;
93913- local_t overrun;
93914- local_t commit_overrun;
93915+ local_unchecked_t overrun;
93916+ local_unchecked_t commit_overrun;
93917 local_t dropped_events;
93918 local_t committing;
93919 local_t commits;
93920@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93921 *
93922 * We add a counter to the write field to denote this.
93923 */
93924- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
93925- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
93926+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
93927+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
93928
93929 /*
93930 * Just make sure we have seen our old_write and synchronize
93931@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93932 * cmpxchg to only update if an interrupt did not already
93933 * do it for us. If the cmpxchg fails, we don't care.
93934 */
93935- (void)local_cmpxchg(&next_page->write, old_write, val);
93936- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
93937+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
93938+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
93939
93940 /*
93941 * No need to worry about races with clearing out the commit.
93942@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
93943
93944 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
93945 {
93946- return local_read(&bpage->entries) & RB_WRITE_MASK;
93947+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
93948 }
93949
93950 static inline unsigned long rb_page_write(struct buffer_page *bpage)
93951 {
93952- return local_read(&bpage->write) & RB_WRITE_MASK;
93953+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
93954 }
93955
93956 static int
93957@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
93958 * bytes consumed in ring buffer from here.
93959 * Increment overrun to account for the lost events.
93960 */
93961- local_add(page_entries, &cpu_buffer->overrun);
93962+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
93963 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
93964 }
93965
93966@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
93967 * it is our responsibility to update
93968 * the counters.
93969 */
93970- local_add(entries, &cpu_buffer->overrun);
93971+ local_add_unchecked(entries, &cpu_buffer->overrun);
93972 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
93973
93974 /*
93975@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93976 if (tail == BUF_PAGE_SIZE)
93977 tail_page->real_end = 0;
93978
93979- local_sub(length, &tail_page->write);
93980+ local_sub_unchecked(length, &tail_page->write);
93981 return;
93982 }
93983
93984@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93985 rb_event_set_padding(event);
93986
93987 /* Set the write back to the previous setting */
93988- local_sub(length, &tail_page->write);
93989+ local_sub_unchecked(length, &tail_page->write);
93990 return;
93991 }
93992
93993@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93994
93995 /* Set write to end of buffer */
93996 length = (tail + length) - BUF_PAGE_SIZE;
93997- local_sub(length, &tail_page->write);
93998+ local_sub_unchecked(length, &tail_page->write);
93999 }
94000
94001 /*
94002@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94003 * about it.
94004 */
94005 if (unlikely(next_page == commit_page)) {
94006- local_inc(&cpu_buffer->commit_overrun);
94007+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94008 goto out_reset;
94009 }
94010
94011@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94012 cpu_buffer->tail_page) &&
94013 (cpu_buffer->commit_page ==
94014 cpu_buffer->reader_page))) {
94015- local_inc(&cpu_buffer->commit_overrun);
94016+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94017 goto out_reset;
94018 }
94019 }
94020@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94021 length += RB_LEN_TIME_EXTEND;
94022
94023 tail_page = cpu_buffer->tail_page;
94024- write = local_add_return(length, &tail_page->write);
94025+ write = local_add_return_unchecked(length, &tail_page->write);
94026
94027 /* set write to only the index of the write */
94028 write &= RB_WRITE_MASK;
94029@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94030 kmemcheck_annotate_bitfield(event, bitfield);
94031 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94032
94033- local_inc(&tail_page->entries);
94034+ local_inc_unchecked(&tail_page->entries);
94035
94036 /*
94037 * If this is the first commit on the page, then update
94038@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94039
94040 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94041 unsigned long write_mask =
94042- local_read(&bpage->write) & ~RB_WRITE_MASK;
94043+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94044 unsigned long event_length = rb_event_length(event);
94045 /*
94046 * This is on the tail page. It is possible that
94047@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94048 */
94049 old_index += write_mask;
94050 new_index += write_mask;
94051- index = local_cmpxchg(&bpage->write, old_index, new_index);
94052+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94053 if (index == old_index) {
94054 /* update counters */
94055 local_sub(event_length, &cpu_buffer->entries_bytes);
94056@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94057
94058 /* Do the likely case first */
94059 if (likely(bpage->page == (void *)addr)) {
94060- local_dec(&bpage->entries);
94061+ local_dec_unchecked(&bpage->entries);
94062 return;
94063 }
94064
94065@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94066 start = bpage;
94067 do {
94068 if (bpage->page == (void *)addr) {
94069- local_dec(&bpage->entries);
94070+ local_dec_unchecked(&bpage->entries);
94071 return;
94072 }
94073 rb_inc_page(cpu_buffer, &bpage);
94074@@ -3200,7 +3200,7 @@ static inline unsigned long
94075 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94076 {
94077 return local_read(&cpu_buffer->entries) -
94078- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94079+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94080 }
94081
94082 /**
94083@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94084 return 0;
94085
94086 cpu_buffer = buffer->buffers[cpu];
94087- ret = local_read(&cpu_buffer->overrun);
94088+ ret = local_read_unchecked(&cpu_buffer->overrun);
94089
94090 return ret;
94091 }
94092@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94093 return 0;
94094
94095 cpu_buffer = buffer->buffers[cpu];
94096- ret = local_read(&cpu_buffer->commit_overrun);
94097+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94098
94099 return ret;
94100 }
94101@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94102 /* if you care about this being correct, lock the buffer */
94103 for_each_buffer_cpu(buffer, cpu) {
94104 cpu_buffer = buffer->buffers[cpu];
94105- overruns += local_read(&cpu_buffer->overrun);
94106+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94107 }
94108
94109 return overruns;
94110@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94111 /*
94112 * Reset the reader page to size zero.
94113 */
94114- local_set(&cpu_buffer->reader_page->write, 0);
94115- local_set(&cpu_buffer->reader_page->entries, 0);
94116+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94117+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94118 local_set(&cpu_buffer->reader_page->page->commit, 0);
94119 cpu_buffer->reader_page->real_end = 0;
94120
94121@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94122 * want to compare with the last_overrun.
94123 */
94124 smp_mb();
94125- overwrite = local_read(&(cpu_buffer->overrun));
94126+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94127
94128 /*
94129 * Here's the tricky part.
94130@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94131
94132 cpu_buffer->head_page
94133 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94134- local_set(&cpu_buffer->head_page->write, 0);
94135- local_set(&cpu_buffer->head_page->entries, 0);
94136+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94137+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94138 local_set(&cpu_buffer->head_page->page->commit, 0);
94139
94140 cpu_buffer->head_page->read = 0;
94141@@ -4186,14 +4186,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94142
94143 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94144 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94145- local_set(&cpu_buffer->reader_page->write, 0);
94146- local_set(&cpu_buffer->reader_page->entries, 0);
94147+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94148+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94149 local_set(&cpu_buffer->reader_page->page->commit, 0);
94150 cpu_buffer->reader_page->read = 0;
94151
94152 local_set(&cpu_buffer->entries_bytes, 0);
94153- local_set(&cpu_buffer->overrun, 0);
94154- local_set(&cpu_buffer->commit_overrun, 0);
94155+ local_set_unchecked(&cpu_buffer->overrun, 0);
94156+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94157 local_set(&cpu_buffer->dropped_events, 0);
94158 local_set(&cpu_buffer->entries, 0);
94159 local_set(&cpu_buffer->committing, 0);
94160@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94161 rb_init_page(bpage);
94162 bpage = reader->page;
94163 reader->page = *data_page;
94164- local_set(&reader->write, 0);
94165- local_set(&reader->entries, 0);
94166+ local_set_unchecked(&reader->write, 0);
94167+ local_set_unchecked(&reader->entries, 0);
94168 reader->read = 0;
94169 *data_page = bpage;
94170
94171diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94172index 361a827..6a319a3 100644
94173--- a/kernel/trace/trace.c
94174+++ b/kernel/trace/trace.c
94175@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94176 return 0;
94177 }
94178
94179-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94180+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94181 {
94182 /* do nothing if flag is already set */
94183 if (!!(trace_flags & mask) == !!enabled)
94184diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94185index 8de48ba..3e5b4fa 100644
94186--- a/kernel/trace/trace.h
94187+++ b/kernel/trace/trace.h
94188@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94189 void trace_printk_init_buffers(void);
94190 void trace_printk_start_comm(void);
94191 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94192-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94193+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94194
94195 /*
94196 * Normal trace_printk() and friends allocates special buffers
94197diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94198index 57b67b1..66082a9 100644
94199--- a/kernel/trace/trace_clock.c
94200+++ b/kernel/trace/trace_clock.c
94201@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94202 return now;
94203 }
94204
94205-static atomic64_t trace_counter;
94206+static atomic64_unchecked_t trace_counter;
94207
94208 /*
94209 * trace_clock_counter(): simply an atomic counter.
94210@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94211 */
94212 u64 notrace trace_clock_counter(void)
94213 {
94214- return atomic64_add_return(1, &trace_counter);
94215+ return atomic64_inc_return_unchecked(&trace_counter);
94216 }
94217diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94218index b03a0ea..2df3168 100644
94219--- a/kernel/trace/trace_events.c
94220+++ b/kernel/trace/trace_events.c
94221@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94222 return 0;
94223 }
94224
94225-struct ftrace_module_file_ops;
94226 static void __add_event_to_tracers(struct ftrace_event_call *call);
94227
94228 /* Add an additional event_call dynamically */
94229diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94230index ba47600..d0e47fa 100644
94231--- a/kernel/trace/trace_functions_graph.c
94232+++ b/kernel/trace/trace_functions_graph.c
94233@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94234
94235 /* The return trace stack is full */
94236 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94237- atomic_inc(&current->trace_overrun);
94238+ atomic_inc_unchecked(&current->trace_overrun);
94239 return -EBUSY;
94240 }
94241
94242@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
94243 *ret = current->ret_stack[index].ret;
94244 trace->func = current->ret_stack[index].func;
94245 trace->calltime = current->ret_stack[index].calltime;
94246- trace->overrun = atomic_read(&current->trace_overrun);
94247+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
94248 trace->depth = index;
94249 }
94250
94251diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94252index 7a9ba62..2e0e4a1 100644
94253--- a/kernel/trace/trace_mmiotrace.c
94254+++ b/kernel/trace/trace_mmiotrace.c
94255@@ -24,7 +24,7 @@ struct header_iter {
94256 static struct trace_array *mmio_trace_array;
94257 static bool overrun_detected;
94258 static unsigned long prev_overruns;
94259-static atomic_t dropped_count;
94260+static atomic_unchecked_t dropped_count;
94261
94262 static void mmio_reset_data(struct trace_array *tr)
94263 {
94264@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
94265
94266 static unsigned long count_overruns(struct trace_iterator *iter)
94267 {
94268- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94269+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94270 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94271
94272 if (over > prev_overruns)
94273@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94274 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94275 sizeof(*entry), 0, pc);
94276 if (!event) {
94277- atomic_inc(&dropped_count);
94278+ atomic_inc_unchecked(&dropped_count);
94279 return;
94280 }
94281 entry = ring_buffer_event_data(event);
94282@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94283 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94284 sizeof(*entry), 0, pc);
94285 if (!event) {
94286- atomic_inc(&dropped_count);
94287+ atomic_inc_unchecked(&dropped_count);
94288 return;
94289 }
94290 entry = ring_buffer_event_data(event);
94291diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94292index b77b9a6..82f19bd 100644
94293--- a/kernel/trace/trace_output.c
94294+++ b/kernel/trace/trace_output.c
94295@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
94296 goto out;
94297 }
94298
94299+ pax_open_kernel();
94300 if (event->funcs->trace == NULL)
94301- event->funcs->trace = trace_nop_print;
94302+ *(void **)&event->funcs->trace = trace_nop_print;
94303 if (event->funcs->raw == NULL)
94304- event->funcs->raw = trace_nop_print;
94305+ *(void **)&event->funcs->raw = trace_nop_print;
94306 if (event->funcs->hex == NULL)
94307- event->funcs->hex = trace_nop_print;
94308+ *(void **)&event->funcs->hex = trace_nop_print;
94309 if (event->funcs->binary == NULL)
94310- event->funcs->binary = trace_nop_print;
94311+ *(void **)&event->funcs->binary = trace_nop_print;
94312+ pax_close_kernel();
94313
94314 key = event->type & (EVENT_HASHSIZE - 1);
94315
94316diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94317index f8b45d8..70ff6c8 100644
94318--- a/kernel/trace/trace_seq.c
94319+++ b/kernel/trace/trace_seq.c
94320@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94321 return 0;
94322 }
94323
94324- seq_buf_path(&s->seq, path, "\n");
94325+ seq_buf_path(&s->seq, path, "\n\\");
94326
94327 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
94328 s->seq.len = save_len;
94329diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94330index 16eddb3..758b308 100644
94331--- a/kernel/trace/trace_stack.c
94332+++ b/kernel/trace/trace_stack.c
94333@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94334 return;
94335
94336 /* we do not handle interrupt stacks yet */
94337- if (!object_is_on_stack(stack))
94338+ if (!object_starts_on_stack(stack))
94339 return;
94340
94341 local_irq_save(flags);
94342diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94343index c6ee36f..78513f3 100644
94344--- a/kernel/trace/trace_syscalls.c
94345+++ b/kernel/trace/trace_syscalls.c
94346@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94347 int num;
94348
94349 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94350+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94351+ return -EINVAL;
94352
94353 mutex_lock(&syscall_trace_lock);
94354 if (!sys_perf_refcount_enter)
94355@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94356 int num;
94357
94358 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94359+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94360+ return;
94361
94362 mutex_lock(&syscall_trace_lock);
94363 sys_perf_refcount_enter--;
94364@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94365 int num;
94366
94367 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94368+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94369+ return -EINVAL;
94370
94371 mutex_lock(&syscall_trace_lock);
94372 if (!sys_perf_refcount_exit)
94373@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94374 int num;
94375
94376 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94377+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94378+ return;
94379
94380 mutex_lock(&syscall_trace_lock);
94381 sys_perf_refcount_exit--;
94382diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94383index 4109f83..fe1f830 100644
94384--- a/kernel/user_namespace.c
94385+++ b/kernel/user_namespace.c
94386@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
94387 !kgid_has_mapping(parent_ns, group))
94388 return -EPERM;
94389
94390+#ifdef CONFIG_GRKERNSEC
94391+ /*
94392+ * This doesn't really inspire confidence:
94393+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94394+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94395+ * Increases kernel attack surface in areas developers
94396+ * previously cared little about ("low importance due
94397+ * to requiring "root" capability")
94398+ * To be removed when this code receives *proper* review
94399+ */
94400+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94401+ !capable(CAP_SETGID))
94402+ return -EPERM;
94403+#endif
94404+
94405 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94406 if (!ns)
94407 return -ENOMEM;
94408@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
94409 if (atomic_read(&current->mm->mm_users) > 1)
94410 return -EINVAL;
94411
94412- if (current->fs->users != 1)
94413+ if (atomic_read(&current->fs->users) != 1)
94414 return -EINVAL;
94415
94416 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94417diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94418index c8eac43..4b5f08f 100644
94419--- a/kernel/utsname_sysctl.c
94420+++ b/kernel/utsname_sysctl.c
94421@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94422 static int proc_do_uts_string(struct ctl_table *table, int write,
94423 void __user *buffer, size_t *lenp, loff_t *ppos)
94424 {
94425- struct ctl_table uts_table;
94426+ ctl_table_no_const uts_table;
94427 int r;
94428 memcpy(&uts_table, table, sizeof(uts_table));
94429 uts_table.data = get_uts(table, write);
94430diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94431index 70bf118..4be3c37 100644
94432--- a/kernel/watchdog.c
94433+++ b/kernel/watchdog.c
94434@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94435 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94436 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94437
94438-static struct smp_hotplug_thread watchdog_threads = {
94439+static struct smp_hotplug_thread watchdog_threads __read_only = {
94440 .store = &softlockup_watchdog,
94441 .thread_should_run = watchdog_should_run,
94442 .thread_fn = watchdog,
94443diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94444index beeeac9..65cbfb3 100644
94445--- a/kernel/workqueue.c
94446+++ b/kernel/workqueue.c
94447@@ -4517,7 +4517,7 @@ static void rebind_workers(struct worker_pool *pool)
94448 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94449 worker_flags |= WORKER_REBOUND;
94450 worker_flags &= ~WORKER_UNBOUND;
94451- ACCESS_ONCE(worker->flags) = worker_flags;
94452+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94453 }
94454
94455 spin_unlock_irq(&pool->lock);
94456diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94457index 5f2ce61..85a0b1b 100644
94458--- a/lib/Kconfig.debug
94459+++ b/lib/Kconfig.debug
94460@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
94461
94462 config DEBUG_WW_MUTEX_SLOWPATH
94463 bool "Wait/wound mutex debugging: Slowpath testing"
94464- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94465+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94466 select DEBUG_LOCK_ALLOC
94467 select DEBUG_SPINLOCK
94468 select DEBUG_MUTEXES
94469@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94470
94471 config DEBUG_LOCK_ALLOC
94472 bool "Lock debugging: detect incorrect freeing of live locks"
94473- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94474+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94475 select DEBUG_SPINLOCK
94476 select DEBUG_MUTEXES
94477 select LOCKDEP
94478@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
94479
94480 config PROVE_LOCKING
94481 bool "Lock debugging: prove locking correctness"
94482- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94483+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94484 select LOCKDEP
94485 select DEBUG_SPINLOCK
94486 select DEBUG_MUTEXES
94487@@ -992,7 +992,7 @@ config LOCKDEP
94488
94489 config LOCK_STAT
94490 bool "Lock usage statistics"
94491- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94492+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94493 select LOCKDEP
94494 select DEBUG_SPINLOCK
94495 select DEBUG_MUTEXES
94496@@ -1453,6 +1453,7 @@ config LATENCYTOP
94497 depends on DEBUG_KERNEL
94498 depends on STACKTRACE_SUPPORT
94499 depends on PROC_FS
94500+ depends on !GRKERNSEC_HIDESYM
94501 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94502 select KALLSYMS
94503 select KALLSYMS_ALL
94504@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94505 config DEBUG_STRICT_USER_COPY_CHECKS
94506 bool "Strict user copy size checks"
94507 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94508- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94509+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94510 help
94511 Enabling this option turns a certain set of sanity checks for user
94512 copy operations into compile time failures.
94513@@ -1597,7 +1598,7 @@ endmenu # runtime tests
94514
94515 config PROVIDE_OHCI1394_DMA_INIT
94516 bool "Remote debugging over FireWire early on boot"
94517- depends on PCI && X86
94518+ depends on PCI && X86 && !GRKERNSEC
94519 help
94520 If you want to debug problems which hang or crash the kernel early
94521 on boot and the crashing machine has a FireWire port, you can use
94522diff --git a/lib/Makefile b/lib/Makefile
94523index 3c3b30b..ca29102 100644
94524--- a/lib/Makefile
94525+++ b/lib/Makefile
94526@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94527 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94528 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94529 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94530-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94531+obj-y += list_debug.o
94532 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94533
94534 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94535diff --git a/lib/average.c b/lib/average.c
94536index 114d1be..ab0350c 100644
94537--- a/lib/average.c
94538+++ b/lib/average.c
94539@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94540 {
94541 unsigned long internal = ACCESS_ONCE(avg->internal);
94542
94543- ACCESS_ONCE(avg->internal) = internal ?
94544+ ACCESS_ONCE_RW(avg->internal) = internal ?
94545 (((internal << avg->weight) - internal) +
94546 (val << avg->factor)) >> avg->weight :
94547 (val << avg->factor);
94548diff --git a/lib/bitmap.c b/lib/bitmap.c
94549index 324ea9e..46b1ae2 100644
94550--- a/lib/bitmap.c
94551+++ b/lib/bitmap.c
94552@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
94553 }
94554 EXPORT_SYMBOL(__bitmap_subset);
94555
94556-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94557+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94558 {
94559 unsigned int k, lim = bits/BITS_PER_LONG;
94560 int w = 0;
94561@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94562 {
94563 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94564 u32 chunk;
94565- const char __user __force *ubuf = (const char __user __force *)buf;
94566+ const char __user *ubuf = (const char __force_user *)buf;
94567
94568 bitmap_zero(maskp, nmaskbits);
94569
94570@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
94571 {
94572 if (!access_ok(VERIFY_READ, ubuf, ulen))
94573 return -EFAULT;
94574- return __bitmap_parse((const char __force *)ubuf,
94575+ return __bitmap_parse((const char __force_kernel *)ubuf,
94576 ulen, 1, maskp, nmaskbits);
94577
94578 }
94579@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94580 {
94581 unsigned a, b;
94582 int c, old_c, totaldigits;
94583- const char __user __force *ubuf = (const char __user __force *)buf;
94584+ const char __user *ubuf = (const char __force_user *)buf;
94585 int exp_digit, in_range;
94586
94587 totaldigits = c = 0;
94588@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94589 {
94590 if (!access_ok(VERIFY_READ, ubuf, ulen))
94591 return -EFAULT;
94592- return __bitmap_parselist((const char __force *)ubuf,
94593+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94594 ulen, 1, maskp, nmaskbits);
94595 }
94596 EXPORT_SYMBOL(bitmap_parselist_user);
94597diff --git a/lib/bug.c b/lib/bug.c
94598index 0c3bd95..5a615a1 100644
94599--- a/lib/bug.c
94600+++ b/lib/bug.c
94601@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94602 return BUG_TRAP_TYPE_NONE;
94603
94604 bug = find_bug(bugaddr);
94605+ if (!bug)
94606+ return BUG_TRAP_TYPE_NONE;
94607
94608 file = NULL;
94609 line = 0;
94610diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94611index 547f7f9..a6d4ba0 100644
94612--- a/lib/debugobjects.c
94613+++ b/lib/debugobjects.c
94614@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94615 if (limit > 4)
94616 return;
94617
94618- is_on_stack = object_is_on_stack(addr);
94619+ is_on_stack = object_starts_on_stack(addr);
94620 if (is_on_stack == onstack)
94621 return;
94622
94623diff --git a/lib/div64.c b/lib/div64.c
94624index 4382ad7..08aa558 100644
94625--- a/lib/div64.c
94626+++ b/lib/div64.c
94627@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94628 EXPORT_SYMBOL(__div64_32);
94629
94630 #ifndef div_s64_rem
94631-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94632+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94633 {
94634 u64 quotient;
94635
94636@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94637 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94638 */
94639 #ifndef div64_u64
94640-u64 div64_u64(u64 dividend, u64 divisor)
94641+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94642 {
94643 u32 high = divisor >> 32;
94644 u64 quot;
94645diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94646index 9722bd2..0d826f4 100644
94647--- a/lib/dma-debug.c
94648+++ b/lib/dma-debug.c
94649@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94650
94651 void dma_debug_add_bus(struct bus_type *bus)
94652 {
94653- struct notifier_block *nb;
94654+ notifier_block_no_const *nb;
94655
94656 if (dma_debug_disabled())
94657 return;
94658@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94659
94660 static void check_for_stack(struct device *dev, void *addr)
94661 {
94662- if (object_is_on_stack(addr))
94663+ if (object_starts_on_stack(addr))
94664 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
94665 "stack [addr=%p]\n", addr);
94666 }
94667diff --git a/lib/inflate.c b/lib/inflate.c
94668index 013a761..c28f3fc 100644
94669--- a/lib/inflate.c
94670+++ b/lib/inflate.c
94671@@ -269,7 +269,7 @@ static void free(void *where)
94672 malloc_ptr = free_mem_ptr;
94673 }
94674 #else
94675-#define malloc(a) kmalloc(a, GFP_KERNEL)
94676+#define malloc(a) kmalloc((a), GFP_KERNEL)
94677 #define free(a) kfree(a)
94678 #endif
94679
94680diff --git a/lib/ioremap.c b/lib/ioremap.c
94681index 0c9216c..863bd89 100644
94682--- a/lib/ioremap.c
94683+++ b/lib/ioremap.c
94684@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94685 unsigned long next;
94686
94687 phys_addr -= addr;
94688- pmd = pmd_alloc(&init_mm, pud, addr);
94689+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94690 if (!pmd)
94691 return -ENOMEM;
94692 do {
94693@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94694 unsigned long next;
94695
94696 phys_addr -= addr;
94697- pud = pud_alloc(&init_mm, pgd, addr);
94698+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94699 if (!pud)
94700 return -ENOMEM;
94701 do {
94702diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94703index bd2bea9..6b3c95e 100644
94704--- a/lib/is_single_threaded.c
94705+++ b/lib/is_single_threaded.c
94706@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94707 struct task_struct *p, *t;
94708 bool ret;
94709
94710+ if (!mm)
94711+ return true;
94712+
94713 if (atomic_read(&task->signal->live) != 1)
94714 return false;
94715
94716diff --git a/lib/kobject.c b/lib/kobject.c
94717index 03d4ab3..46f6374 100644
94718--- a/lib/kobject.c
94719+++ b/lib/kobject.c
94720@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94721
94722
94723 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94724-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94725+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94726
94727-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94728+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94729 {
94730 enum kobj_ns_type type = ops->type;
94731 int error;
94732diff --git a/lib/list_debug.c b/lib/list_debug.c
94733index c24c2f7..f0296f4 100644
94734--- a/lib/list_debug.c
94735+++ b/lib/list_debug.c
94736@@ -11,7 +11,9 @@
94737 #include <linux/bug.h>
94738 #include <linux/kernel.h>
94739 #include <linux/rculist.h>
94740+#include <linux/mm.h>
94741
94742+#ifdef CONFIG_DEBUG_LIST
94743 /*
94744 * Insert a new entry between two known consecutive entries.
94745 *
94746@@ -19,21 +21,40 @@
94747 * the prev/next entries already!
94748 */
94749
94750+static bool __list_add_debug(struct list_head *new,
94751+ struct list_head *prev,
94752+ struct list_head *next)
94753+{
94754+ if (unlikely(next->prev != prev)) {
94755+ printk(KERN_ERR "list_add corruption. next->prev should be "
94756+ "prev (%p), but was %p. (next=%p).\n",
94757+ prev, next->prev, next);
94758+ BUG();
94759+ return false;
94760+ }
94761+ if (unlikely(prev->next != next)) {
94762+ printk(KERN_ERR "list_add corruption. prev->next should be "
94763+ "next (%p), but was %p. (prev=%p).\n",
94764+ next, prev->next, prev);
94765+ BUG();
94766+ return false;
94767+ }
94768+ if (unlikely(new == prev || new == next)) {
94769+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94770+ new, prev, next);
94771+ BUG();
94772+ return false;
94773+ }
94774+ return true;
94775+}
94776+
94777 void __list_add(struct list_head *new,
94778- struct list_head *prev,
94779- struct list_head *next)
94780+ struct list_head *prev,
94781+ struct list_head *next)
94782 {
94783- WARN(next->prev != prev,
94784- "list_add corruption. next->prev should be "
94785- "prev (%p), but was %p. (next=%p).\n",
94786- prev, next->prev, next);
94787- WARN(prev->next != next,
94788- "list_add corruption. prev->next should be "
94789- "next (%p), but was %p. (prev=%p).\n",
94790- next, prev->next, prev);
94791- WARN(new == prev || new == next,
94792- "list_add double add: new=%p, prev=%p, next=%p.\n",
94793- new, prev, next);
94794+ if (!__list_add_debug(new, prev, next))
94795+ return;
94796+
94797 next->prev = new;
94798 new->next = next;
94799 new->prev = prev;
94800@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
94801 }
94802 EXPORT_SYMBOL(__list_add);
94803
94804-void __list_del_entry(struct list_head *entry)
94805+static bool __list_del_entry_debug(struct list_head *entry)
94806 {
94807 struct list_head *prev, *next;
94808
94809 prev = entry->prev;
94810 next = entry->next;
94811
94812- if (WARN(next == LIST_POISON1,
94813- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94814- entry, LIST_POISON1) ||
94815- WARN(prev == LIST_POISON2,
94816- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94817- entry, LIST_POISON2) ||
94818- WARN(prev->next != entry,
94819- "list_del corruption. prev->next should be %p, "
94820- "but was %p\n", entry, prev->next) ||
94821- WARN(next->prev != entry,
94822- "list_del corruption. next->prev should be %p, "
94823- "but was %p\n", entry, next->prev))
94824+ if (unlikely(next == LIST_POISON1)) {
94825+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94826+ entry, LIST_POISON1);
94827+ BUG();
94828+ return false;
94829+ }
94830+ if (unlikely(prev == LIST_POISON2)) {
94831+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94832+ entry, LIST_POISON2);
94833+ BUG();
94834+ return false;
94835+ }
94836+ if (unlikely(entry->prev->next != entry)) {
94837+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
94838+ "but was %p\n", entry, prev->next);
94839+ BUG();
94840+ return false;
94841+ }
94842+ if (unlikely(entry->next->prev != entry)) {
94843+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
94844+ "but was %p\n", entry, next->prev);
94845+ BUG();
94846+ return false;
94847+ }
94848+ return true;
94849+}
94850+
94851+void __list_del_entry(struct list_head *entry)
94852+{
94853+ if (!__list_del_entry_debug(entry))
94854 return;
94855
94856- __list_del(prev, next);
94857+ __list_del(entry->prev, entry->next);
94858 }
94859 EXPORT_SYMBOL(__list_del_entry);
94860
94861@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
94862 void __list_add_rcu(struct list_head *new,
94863 struct list_head *prev, struct list_head *next)
94864 {
94865- WARN(next->prev != prev,
94866- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
94867- prev, next->prev, next);
94868- WARN(prev->next != next,
94869- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
94870- next, prev->next, prev);
94871+ if (!__list_add_debug(new, prev, next))
94872+ return;
94873+
94874 new->next = next;
94875 new->prev = prev;
94876 rcu_assign_pointer(list_next_rcu(prev), new);
94877 next->prev = new;
94878 }
94879 EXPORT_SYMBOL(__list_add_rcu);
94880+#endif
94881+
94882+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
94883+{
94884+#ifdef CONFIG_DEBUG_LIST
94885+ if (!__list_add_debug(new, prev, next))
94886+ return;
94887+#endif
94888+
94889+ pax_open_kernel();
94890+ next->prev = new;
94891+ new->next = next;
94892+ new->prev = prev;
94893+ prev->next = new;
94894+ pax_close_kernel();
94895+}
94896+EXPORT_SYMBOL(__pax_list_add);
94897+
94898+void pax_list_del(struct list_head *entry)
94899+{
94900+#ifdef CONFIG_DEBUG_LIST
94901+ if (!__list_del_entry_debug(entry))
94902+ return;
94903+#endif
94904+
94905+ pax_open_kernel();
94906+ __list_del(entry->prev, entry->next);
94907+ entry->next = LIST_POISON1;
94908+ entry->prev = LIST_POISON2;
94909+ pax_close_kernel();
94910+}
94911+EXPORT_SYMBOL(pax_list_del);
94912+
94913+void pax_list_del_init(struct list_head *entry)
94914+{
94915+ pax_open_kernel();
94916+ __list_del(entry->prev, entry->next);
94917+ INIT_LIST_HEAD(entry);
94918+ pax_close_kernel();
94919+}
94920+EXPORT_SYMBOL(pax_list_del_init);
94921+
94922+void __pax_list_add_rcu(struct list_head *new,
94923+ struct list_head *prev, struct list_head *next)
94924+{
94925+#ifdef CONFIG_DEBUG_LIST
94926+ if (!__list_add_debug(new, prev, next))
94927+ return;
94928+#endif
94929+
94930+ pax_open_kernel();
94931+ new->next = next;
94932+ new->prev = prev;
94933+ rcu_assign_pointer(list_next_rcu(prev), new);
94934+ next->prev = new;
94935+ pax_close_kernel();
94936+}
94937+EXPORT_SYMBOL(__pax_list_add_rcu);
94938+
94939+void pax_list_del_rcu(struct list_head *entry)
94940+{
94941+#ifdef CONFIG_DEBUG_LIST
94942+ if (!__list_del_entry_debug(entry))
94943+ return;
94944+#endif
94945+
94946+ pax_open_kernel();
94947+ __list_del(entry->prev, entry->next);
94948+ entry->next = LIST_POISON1;
94949+ entry->prev = LIST_POISON2;
94950+ pax_close_kernel();
94951+}
94952+EXPORT_SYMBOL(pax_list_del_rcu);
94953diff --git a/lib/lockref.c b/lib/lockref.c
94954index d2233de..fa1a2f6 100644
94955--- a/lib/lockref.c
94956+++ b/lib/lockref.c
94957@@ -48,13 +48,13 @@
94958 void lockref_get(struct lockref *lockref)
94959 {
94960 CMPXCHG_LOOP(
94961- new.count++;
94962+ __lockref_inc(&new);
94963 ,
94964 return;
94965 );
94966
94967 spin_lock(&lockref->lock);
94968- lockref->count++;
94969+ __lockref_inc(lockref);
94970 spin_unlock(&lockref->lock);
94971 }
94972 EXPORT_SYMBOL(lockref_get);
94973@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
94974 int retval;
94975
94976 CMPXCHG_LOOP(
94977- new.count++;
94978+ __lockref_inc(&new);
94979 if (!old.count)
94980 return 0;
94981 ,
94982@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
94983 spin_lock(&lockref->lock);
94984 retval = 0;
94985 if (lockref->count) {
94986- lockref->count++;
94987+ __lockref_inc(lockref);
94988 retval = 1;
94989 }
94990 spin_unlock(&lockref->lock);
94991@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
94992 int lockref_get_or_lock(struct lockref *lockref)
94993 {
94994 CMPXCHG_LOOP(
94995- new.count++;
94996+ __lockref_inc(&new);
94997 if (!old.count)
94998 break;
94999 ,
95000@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95001 spin_lock(&lockref->lock);
95002 if (!lockref->count)
95003 return 0;
95004- lockref->count++;
95005+ __lockref_inc(lockref);
95006 spin_unlock(&lockref->lock);
95007 return 1;
95008 }
95009@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95010 int lockref_put_or_lock(struct lockref *lockref)
95011 {
95012 CMPXCHG_LOOP(
95013- new.count--;
95014+ __lockref_dec(&new);
95015 if (old.count <= 1)
95016 break;
95017 ,
95018@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95019 spin_lock(&lockref->lock);
95020 if (lockref->count <= 1)
95021 return 0;
95022- lockref->count--;
95023+ __lockref_dec(lockref);
95024 spin_unlock(&lockref->lock);
95025 return 1;
95026 }
95027@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95028 int retval;
95029
95030 CMPXCHG_LOOP(
95031- new.count++;
95032+ __lockref_inc(&new);
95033 if ((int)old.count < 0)
95034 return 0;
95035 ,
95036@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95037 spin_lock(&lockref->lock);
95038 retval = 0;
95039 if ((int) lockref->count >= 0) {
95040- lockref->count++;
95041+ __lockref_inc(lockref);
95042 retval = 1;
95043 }
95044 spin_unlock(&lockref->lock);
95045diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95046index 6111bcb..02e816b 100644
95047--- a/lib/percpu-refcount.c
95048+++ b/lib/percpu-refcount.c
95049@@ -31,7 +31,7 @@
95050 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95051 */
95052
95053-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95054+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95055
95056 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95057
95058diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95059index 3291a8e..346a91e 100644
95060--- a/lib/radix-tree.c
95061+++ b/lib/radix-tree.c
95062@@ -67,7 +67,7 @@ struct radix_tree_preload {
95063 int nr;
95064 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95065 };
95066-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95067+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95068
95069 static inline void *ptr_to_indirect(void *ptr)
95070 {
95071diff --git a/lib/random32.c b/lib/random32.c
95072index 0bee183..526f12f 100644
95073--- a/lib/random32.c
95074+++ b/lib/random32.c
95075@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95076 }
95077 #endif
95078
95079-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95080+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95081
95082 /**
95083 * prandom_u32_state - seeded pseudo-random number generator.
95084diff --git a/lib/rbtree.c b/lib/rbtree.c
95085index c16c81a..4dcbda1 100644
95086--- a/lib/rbtree.c
95087+++ b/lib/rbtree.c
95088@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95089 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95090
95091 static const struct rb_augment_callbacks dummy_callbacks = {
95092- dummy_propagate, dummy_copy, dummy_rotate
95093+ .propagate = dummy_propagate,
95094+ .copy = dummy_copy,
95095+ .rotate = dummy_rotate
95096 };
95097
95098 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95099diff --git a/lib/show_mem.c b/lib/show_mem.c
95100index 7de89f4..00d70b7 100644
95101--- a/lib/show_mem.c
95102+++ b/lib/show_mem.c
95103@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
95104 quicklist_total_size());
95105 #endif
95106 #ifdef CONFIG_MEMORY_FAILURE
95107- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95108+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95109 #endif
95110 }
95111diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95112index bb2b201..46abaf9 100644
95113--- a/lib/strncpy_from_user.c
95114+++ b/lib/strncpy_from_user.c
95115@@ -21,7 +21,7 @@
95116 */
95117 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95118 {
95119- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95120+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95121 long res = 0;
95122
95123 /*
95124diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95125index a28df52..3d55877 100644
95126--- a/lib/strnlen_user.c
95127+++ b/lib/strnlen_user.c
95128@@ -26,7 +26,7 @@
95129 */
95130 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95131 {
95132- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95133+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95134 long align, res = 0;
95135 unsigned long c;
95136
95137diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95138index 4abda07..b9d3765 100644
95139--- a/lib/swiotlb.c
95140+++ b/lib/swiotlb.c
95141@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95142
95143 void
95144 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95145- dma_addr_t dev_addr)
95146+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95147 {
95148 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95149
95150diff --git a/lib/usercopy.c b/lib/usercopy.c
95151index 4f5b1dd..7cab418 100644
95152--- a/lib/usercopy.c
95153+++ b/lib/usercopy.c
95154@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95155 WARN(1, "Buffer overflow detected!\n");
95156 }
95157 EXPORT_SYMBOL(copy_from_user_overflow);
95158+
95159+void copy_to_user_overflow(void)
95160+{
95161+ WARN(1, "Buffer overflow detected!\n");
95162+}
95163+EXPORT_SYMBOL(copy_to_user_overflow);
95164diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95165index ec337f6..8484eb2 100644
95166--- a/lib/vsprintf.c
95167+++ b/lib/vsprintf.c
95168@@ -16,6 +16,9 @@
95169 * - scnprintf and vscnprintf
95170 */
95171
95172+#ifdef CONFIG_GRKERNSEC_HIDESYM
95173+#define __INCLUDED_BY_HIDESYM 1
95174+#endif
95175 #include <stdarg.h>
95176 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95177 #include <linux/types.h>
95178@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95179 #ifdef CONFIG_KALLSYMS
95180 if (*fmt == 'B')
95181 sprint_backtrace(sym, value);
95182- else if (*fmt != 'f' && *fmt != 's')
95183+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95184 sprint_symbol(sym, value);
95185 else
95186 sprint_symbol_no_offset(sym, value);
95187@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
95188 return number(buf, end, num, spec);
95189 }
95190
95191+#ifdef CONFIG_GRKERNSEC_HIDESYM
95192+int kptr_restrict __read_mostly = 2;
95193+#else
95194 int kptr_restrict __read_mostly;
95195+#endif
95196
95197 /*
95198 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95199@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
95200 *
95201 * - 'F' For symbolic function descriptor pointers with offset
95202 * - 'f' For simple symbolic function names without offset
95203+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95204 * - 'S' For symbolic direct pointers with offset
95205 * - 's' For symbolic direct pointers without offset
95206+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95207 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95208 * - 'B' For backtraced symbolic direct pointers with offset
95209 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95210@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95211
95212 if (!ptr && *fmt != 'K') {
95213 /*
95214- * Print (null) with the same width as a pointer so it makes
95215+ * Print (nil) with the same width as a pointer so it makes
95216 * tabular output look nice.
95217 */
95218 if (spec.field_width == -1)
95219 spec.field_width = default_width;
95220- return string(buf, end, "(null)", spec);
95221+ return string(buf, end, "(nil)", spec);
95222 }
95223
95224 switch (*fmt) {
95225@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95226 /* Fallthrough */
95227 case 'S':
95228 case 's':
95229+#ifdef CONFIG_GRKERNSEC_HIDESYM
95230+ break;
95231+#else
95232+ return symbol_string(buf, end, ptr, spec, fmt);
95233+#endif
95234+ case 'X':
95235+ ptr = dereference_function_descriptor(ptr);
95236+ case 'A':
95237 case 'B':
95238 return symbol_string(buf, end, ptr, spec, fmt);
95239 case 'R':
95240@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95241 va_end(va);
95242 return buf;
95243 }
95244+ case 'P':
95245+ break;
95246 case 'K':
95247 /*
95248 * %pK cannot be used in IRQ context because its test
95249@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95250 ((const struct file *)ptr)->f_path.dentry,
95251 spec, fmt);
95252 }
95253+
95254+#ifdef CONFIG_GRKERNSEC_HIDESYM
95255+ /* 'P' = approved pointers to copy to userland,
95256+ as in the /proc/kallsyms case, as we make it display nothing
95257+ for non-root users, and the real contents for root users
95258+ 'X' = approved simple symbols
95259+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95260+ above
95261+ */
95262+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95263+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95264+ dump_stack();
95265+ ptr = NULL;
95266+ }
95267+#endif
95268+
95269 spec.flags |= SMALL;
95270 if (spec.field_width == -1) {
95271 spec.field_width = default_width;
95272@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95273 typeof(type) value; \
95274 if (sizeof(type) == 8) { \
95275 args = PTR_ALIGN(args, sizeof(u32)); \
95276- *(u32 *)&value = *(u32 *)args; \
95277- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95278+ *(u32 *)&value = *(const u32 *)args; \
95279+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95280 } else { \
95281 args = PTR_ALIGN(args, sizeof(type)); \
95282- value = *(typeof(type) *)args; \
95283+ value = *(const typeof(type) *)args; \
95284 } \
95285 args += sizeof(type); \
95286 value; \
95287@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95288 case FORMAT_TYPE_STR: {
95289 const char *str_arg = args;
95290 args += strlen(str_arg) + 1;
95291- str = string(str, end, (char *)str_arg, spec);
95292+ str = string(str, end, str_arg, spec);
95293 break;
95294 }
95295
95296diff --git a/localversion-grsec b/localversion-grsec
95297new file mode 100644
95298index 0000000..7cd6065
95299--- /dev/null
95300+++ b/localversion-grsec
95301@@ -0,0 +1 @@
95302+-grsec
95303diff --git a/mm/Kconfig b/mm/Kconfig
95304index 1d1ae6b..0f05885 100644
95305--- a/mm/Kconfig
95306+++ b/mm/Kconfig
95307@@ -341,10 +341,11 @@ config KSM
95308 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95309
95310 config DEFAULT_MMAP_MIN_ADDR
95311- int "Low address space to protect from user allocation"
95312+ int "Low address space to protect from user allocation"
95313 depends on MMU
95314- default 4096
95315- help
95316+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95317+ default 65536
95318+ help
95319 This is the portion of low virtual memory which should be protected
95320 from userspace allocation. Keeping a user from writing to low pages
95321 can help reduce the impact of kernel NULL pointer bugs.
95322@@ -375,7 +376,7 @@ config MEMORY_FAILURE
95323
95324 config HWPOISON_INJECT
95325 tristate "HWPoison pages injector"
95326- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95327+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95328 select PROC_PAGE_MONITOR
95329
95330 config NOMMU_INITIAL_TRIM_EXCESS
95331diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95332index 0ae0df5..82ac56b 100644
95333--- a/mm/backing-dev.c
95334+++ b/mm/backing-dev.c
95335@@ -12,7 +12,7 @@
95336 #include <linux/device.h>
95337 #include <trace/events/writeback.h>
95338
95339-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95340+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95341
95342 struct backing_dev_info default_backing_dev_info = {
95343 .name = "default",
95344@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95345 return err;
95346
95347 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95348- atomic_long_inc_return(&bdi_seq));
95349+ atomic_long_inc_return_unchecked(&bdi_seq));
95350 if (err) {
95351 bdi_destroy(bdi);
95352 return err;
95353diff --git a/mm/filemap.c b/mm/filemap.c
95354index 673e458..7192013 100644
95355--- a/mm/filemap.c
95356+++ b/mm/filemap.c
95357@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95358 struct address_space *mapping = file->f_mapping;
95359
95360 if (!mapping->a_ops->readpage)
95361- return -ENOEXEC;
95362+ return -ENODEV;
95363 file_accessed(file);
95364 vma->vm_ops = &generic_file_vm_ops;
95365 return 0;
95366@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95367 *pos = i_size_read(inode);
95368
95369 if (limit != RLIM_INFINITY) {
95370+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95371 if (*pos >= limit) {
95372 send_sig(SIGXFSZ, current, 0);
95373 return -EFBIG;
95374diff --git a/mm/fremap.c b/mm/fremap.c
95375index 2805d71..8b56e7d 100644
95376--- a/mm/fremap.c
95377+++ b/mm/fremap.c
95378@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95379 retry:
95380 vma = find_vma(mm, start);
95381
95382+#ifdef CONFIG_PAX_SEGMEXEC
95383+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95384+ goto out;
95385+#endif
95386+
95387 /*
95388 * Make sure the vma is shared, that it supports prefaulting,
95389 * and that the remapped range is valid and fully within
95390diff --git a/mm/gup.c b/mm/gup.c
95391index 9b2afbf..647297c 100644
95392--- a/mm/gup.c
95393+++ b/mm/gup.c
95394@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95395 unsigned int fault_flags = 0;
95396 int ret;
95397
95398- /* For mlock, just skip the stack guard page. */
95399- if ((*flags & FOLL_MLOCK) &&
95400- (stack_guard_page_start(vma, address) ||
95401- stack_guard_page_end(vma, address + PAGE_SIZE)))
95402- return -ENOENT;
95403 if (*flags & FOLL_WRITE)
95404 fault_flags |= FAULT_FLAG_WRITE;
95405 if (nonblocking)
95406@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95407 if (!(gup_flags & FOLL_FORCE))
95408 gup_flags |= FOLL_NUMA;
95409
95410- do {
95411+ while (nr_pages) {
95412 struct page *page;
95413 unsigned int foll_flags = gup_flags;
95414 unsigned int page_increm;
95415
95416 /* first iteration or cross vma bound */
95417 if (!vma || start >= vma->vm_end) {
95418- vma = find_extend_vma(mm, start);
95419+ vma = find_vma(mm, start);
95420 if (!vma && in_gate_area(mm, start)) {
95421 int ret;
95422 ret = get_gate_page(mm, start & PAGE_MASK,
95423@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95424 goto next_page;
95425 }
95426
95427- if (!vma || check_vma_flags(vma, gup_flags))
95428+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95429 return i ? : -EFAULT;
95430 if (is_vm_hugetlb_page(vma)) {
95431 i = follow_hugetlb_page(mm, vma, pages, vmas,
95432@@ -518,7 +513,7 @@ next_page:
95433 i += page_increm;
95434 start += page_increm * PAGE_SIZE;
95435 nr_pages -= page_increm;
95436- } while (nr_pages);
95437+ }
95438 return i;
95439 }
95440 EXPORT_SYMBOL(__get_user_pages);
95441diff --git a/mm/highmem.c b/mm/highmem.c
95442index 123bcd3..0de52ba 100644
95443--- a/mm/highmem.c
95444+++ b/mm/highmem.c
95445@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95446 * So no dangers, even with speculative execution.
95447 */
95448 page = pte_page(pkmap_page_table[i]);
95449+ pax_open_kernel();
95450 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95451-
95452+ pax_close_kernel();
95453 set_page_address(page, NULL);
95454 need_flush = 1;
95455 }
95456@@ -259,9 +260,11 @@ start:
95457 }
95458 }
95459 vaddr = PKMAP_ADDR(last_pkmap_nr);
95460+
95461+ pax_open_kernel();
95462 set_pte_at(&init_mm, vaddr,
95463 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95464-
95465+ pax_close_kernel();
95466 pkmap_count[last_pkmap_nr] = 1;
95467 set_page_address(page, (void *)vaddr);
95468
95469diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95470index c49586f..41e5fd9 100644
95471--- a/mm/hugetlb.c
95472+++ b/mm/hugetlb.c
95473@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95474 struct ctl_table *table, int write,
95475 void __user *buffer, size_t *length, loff_t *ppos)
95476 {
95477+ ctl_table_no_const t;
95478 struct hstate *h = &default_hstate;
95479 unsigned long tmp = h->max_huge_pages;
95480 int ret;
95481@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95482 if (!hugepages_supported())
95483 return -ENOTSUPP;
95484
95485- table->data = &tmp;
95486- table->maxlen = sizeof(unsigned long);
95487- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95488+ t = *table;
95489+ t.data = &tmp;
95490+ t.maxlen = sizeof(unsigned long);
95491+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95492 if (ret)
95493 goto out;
95494
95495@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95496 struct hstate *h = &default_hstate;
95497 unsigned long tmp;
95498 int ret;
95499+ ctl_table_no_const hugetlb_table;
95500
95501 if (!hugepages_supported())
95502 return -ENOTSUPP;
95503@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95504 if (write && hstate_is_gigantic(h))
95505 return -EINVAL;
95506
95507- table->data = &tmp;
95508- table->maxlen = sizeof(unsigned long);
95509- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95510+ hugetlb_table = *table;
95511+ hugetlb_table.data = &tmp;
95512+ hugetlb_table.maxlen = sizeof(unsigned long);
95513+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95514 if (ret)
95515 goto out;
95516
95517@@ -2797,6 +2801,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95518 i_mmap_unlock_write(mapping);
95519 }
95520
95521+#ifdef CONFIG_PAX_SEGMEXEC
95522+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95523+{
95524+ struct mm_struct *mm = vma->vm_mm;
95525+ struct vm_area_struct *vma_m;
95526+ unsigned long address_m;
95527+ pte_t *ptep_m;
95528+
95529+ vma_m = pax_find_mirror_vma(vma);
95530+ if (!vma_m)
95531+ return;
95532+
95533+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95534+ address_m = address + SEGMEXEC_TASK_SIZE;
95535+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95536+ get_page(page_m);
95537+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95538+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95539+}
95540+#endif
95541+
95542 /*
95543 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95544 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95545@@ -2909,6 +2934,11 @@ retry_avoidcopy:
95546 make_huge_pte(vma, new_page, 1));
95547 page_remove_rmap(old_page);
95548 hugepage_add_new_anon_rmap(new_page, vma, address);
95549+
95550+#ifdef CONFIG_PAX_SEGMEXEC
95551+ pax_mirror_huge_pte(vma, address, new_page);
95552+#endif
95553+
95554 /* Make the old page be freed below */
95555 new_page = old_page;
95556 }
95557@@ -3069,6 +3099,10 @@ retry:
95558 && (vma->vm_flags & VM_SHARED)));
95559 set_huge_pte_at(mm, address, ptep, new_pte);
95560
95561+#ifdef CONFIG_PAX_SEGMEXEC
95562+ pax_mirror_huge_pte(vma, address, page);
95563+#endif
95564+
95565 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95566 /* Optimization, do the COW without a second fault */
95567 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95568@@ -3135,6 +3169,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95569 struct hstate *h = hstate_vma(vma);
95570 struct address_space *mapping;
95571
95572+#ifdef CONFIG_PAX_SEGMEXEC
95573+ struct vm_area_struct *vma_m;
95574+#endif
95575+
95576 address &= huge_page_mask(h);
95577
95578 ptep = huge_pte_offset(mm, address);
95579@@ -3148,6 +3186,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95580 VM_FAULT_SET_HINDEX(hstate_index(h));
95581 }
95582
95583+#ifdef CONFIG_PAX_SEGMEXEC
95584+ vma_m = pax_find_mirror_vma(vma);
95585+ if (vma_m) {
95586+ unsigned long address_m;
95587+
95588+ if (vma->vm_start > vma_m->vm_start) {
95589+ address_m = address;
95590+ address -= SEGMEXEC_TASK_SIZE;
95591+ vma = vma_m;
95592+ h = hstate_vma(vma);
95593+ } else
95594+ address_m = address + SEGMEXEC_TASK_SIZE;
95595+
95596+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95597+ return VM_FAULT_OOM;
95598+ address_m &= HPAGE_MASK;
95599+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95600+ }
95601+#endif
95602+
95603 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95604 if (!ptep)
95605 return VM_FAULT_OOM;
95606diff --git a/mm/internal.h b/mm/internal.h
95607index efad241..57ae4ca 100644
95608--- a/mm/internal.h
95609+++ b/mm/internal.h
95610@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95611
95612 extern int __isolate_free_page(struct page *page, unsigned int order);
95613 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95614+extern void free_compound_page(struct page *page);
95615 extern void prep_compound_page(struct page *page, unsigned long order);
95616 #ifdef CONFIG_MEMORY_FAILURE
95617 extern bool is_free_buddy_page(struct page *page);
95618@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
95619
95620 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95621 unsigned long, unsigned long,
95622- unsigned long, unsigned long);
95623+ unsigned long, unsigned long) __intentional_overflow(-1);
95624
95625 extern void set_pageblock_order(void);
95626 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95627diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95628index 3cda50c..032ba634 100644
95629--- a/mm/kmemleak.c
95630+++ b/mm/kmemleak.c
95631@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95632
95633 for (i = 0; i < object->trace_len; i++) {
95634 void *ptr = (void *)object->trace[i];
95635- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95636+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95637 }
95638 }
95639
95640@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95641 return -ENOMEM;
95642 }
95643
95644- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95645+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95646 &kmemleak_fops);
95647 if (!dentry)
95648 pr_warning("Failed to create the debugfs kmemleak file\n");
95649diff --git a/mm/maccess.c b/mm/maccess.c
95650index d53adf9..03a24bf 100644
95651--- a/mm/maccess.c
95652+++ b/mm/maccess.c
95653@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95654 set_fs(KERNEL_DS);
95655 pagefault_disable();
95656 ret = __copy_from_user_inatomic(dst,
95657- (__force const void __user *)src, size);
95658+ (const void __force_user *)src, size);
95659 pagefault_enable();
95660 set_fs(old_fs);
95661
95662@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95663
95664 set_fs(KERNEL_DS);
95665 pagefault_disable();
95666- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95667+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95668 pagefault_enable();
95669 set_fs(old_fs);
95670
95671diff --git a/mm/madvise.c b/mm/madvise.c
95672index a271adc..831d82f 100644
95673--- a/mm/madvise.c
95674+++ b/mm/madvise.c
95675@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95676 pgoff_t pgoff;
95677 unsigned long new_flags = vma->vm_flags;
95678
95679+#ifdef CONFIG_PAX_SEGMEXEC
95680+ struct vm_area_struct *vma_m;
95681+#endif
95682+
95683 switch (behavior) {
95684 case MADV_NORMAL:
95685 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95686@@ -126,6 +130,13 @@ success:
95687 /*
95688 * vm_flags is protected by the mmap_sem held in write mode.
95689 */
95690+
95691+#ifdef CONFIG_PAX_SEGMEXEC
95692+ vma_m = pax_find_mirror_vma(vma);
95693+ if (vma_m)
95694+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95695+#endif
95696+
95697 vma->vm_flags = new_flags;
95698
95699 out:
95700@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95701 struct vm_area_struct **prev,
95702 unsigned long start, unsigned long end)
95703 {
95704+
95705+#ifdef CONFIG_PAX_SEGMEXEC
95706+ struct vm_area_struct *vma_m;
95707+#endif
95708+
95709 *prev = vma;
95710 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95711 return -EINVAL;
95712@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95713 zap_page_range(vma, start, end - start, &details);
95714 } else
95715 zap_page_range(vma, start, end - start, NULL);
95716+
95717+#ifdef CONFIG_PAX_SEGMEXEC
95718+ vma_m = pax_find_mirror_vma(vma);
95719+ if (vma_m) {
95720+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95721+ struct zap_details details = {
95722+ .nonlinear_vma = vma_m,
95723+ .last_index = ULONG_MAX,
95724+ };
95725+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95726+ } else
95727+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95728+ }
95729+#endif
95730+
95731 return 0;
95732 }
95733
95734@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95735 if (end < start)
95736 return error;
95737
95738+#ifdef CONFIG_PAX_SEGMEXEC
95739+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95740+ if (end > SEGMEXEC_TASK_SIZE)
95741+ return error;
95742+ } else
95743+#endif
95744+
95745+ if (end > TASK_SIZE)
95746+ return error;
95747+
95748 error = 0;
95749 if (end == start)
95750 return error;
95751diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95752index feb803b..d382029 100644
95753--- a/mm/memory-failure.c
95754+++ b/mm/memory-failure.c
95755@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95756
95757 int sysctl_memory_failure_recovery __read_mostly = 1;
95758
95759-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95760+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95761
95762 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
95763
95764@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
95765 pfn, t->comm, t->pid);
95766 si.si_signo = SIGBUS;
95767 si.si_errno = 0;
95768- si.si_addr = (void *)addr;
95769+ si.si_addr = (void __user *)addr;
95770 #ifdef __ARCH_SI_TRAPNO
95771 si.si_trapno = trapno;
95772 #endif
95773@@ -786,7 +786,7 @@ static struct page_state {
95774 unsigned long res;
95775 char *msg;
95776 int (*action)(struct page *p, unsigned long pfn);
95777-} error_states[] = {
95778+} __do_const error_states[] = {
95779 { reserved, reserved, "reserved kernel", me_kernel },
95780 /*
95781 * free pages are specially detected outside this table:
95782@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95783 nr_pages = 1 << compound_order(hpage);
95784 else /* normal page or thp */
95785 nr_pages = 1;
95786- atomic_long_add(nr_pages, &num_poisoned_pages);
95787+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
95788
95789 /*
95790 * We need/can do nothing about count=0 pages.
95791@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95792 if (PageHWPoison(hpage)) {
95793 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
95794 || (p != hpage && TestSetPageHWPoison(hpage))) {
95795- atomic_long_sub(nr_pages, &num_poisoned_pages);
95796+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95797 unlock_page(hpage);
95798 return 0;
95799 }
95800@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95801 */
95802 if (!PageHWPoison(p)) {
95803 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
95804- atomic_long_sub(nr_pages, &num_poisoned_pages);
95805+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95806 put_page(hpage);
95807 res = 0;
95808 goto out;
95809 }
95810 if (hwpoison_filter(p)) {
95811 if (TestClearPageHWPoison(p))
95812- atomic_long_sub(nr_pages, &num_poisoned_pages);
95813+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95814 unlock_page(hpage);
95815 put_page(hpage);
95816 return 0;
95817@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
95818 return 0;
95819 }
95820 if (TestClearPageHWPoison(p))
95821- atomic_long_dec(&num_poisoned_pages);
95822+ atomic_long_dec_unchecked(&num_poisoned_pages);
95823 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
95824 return 0;
95825 }
95826@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
95827 */
95828 if (TestClearPageHWPoison(page)) {
95829 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
95830- atomic_long_sub(nr_pages, &num_poisoned_pages);
95831+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95832 freeit = 1;
95833 if (PageHuge(page))
95834 clear_page_hwpoison_huge_page(page);
95835@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
95836 if (PageHuge(page)) {
95837 set_page_hwpoison_huge_page(hpage);
95838 dequeue_hwpoisoned_huge_page(hpage);
95839- atomic_long_add(1 << compound_order(hpage),
95840+ atomic_long_add_unchecked(1 << compound_order(hpage),
95841 &num_poisoned_pages);
95842 } else {
95843 SetPageHWPoison(page);
95844- atomic_long_inc(&num_poisoned_pages);
95845+ atomic_long_inc_unchecked(&num_poisoned_pages);
95846 }
95847 }
95848 return ret;
95849@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
95850 put_page(page);
95851 pr_info("soft_offline: %#lx: invalidated\n", pfn);
95852 SetPageHWPoison(page);
95853- atomic_long_inc(&num_poisoned_pages);
95854+ atomic_long_inc_unchecked(&num_poisoned_pages);
95855 return 0;
95856 }
95857
95858@@ -1661,7 +1661,7 @@ static int __soft_offline_page(struct page *page, int flags)
95859 if (!is_free_buddy_page(page))
95860 pr_info("soft offline: %#lx: page leaked\n",
95861 pfn);
95862- atomic_long_inc(&num_poisoned_pages);
95863+ atomic_long_inc_unchecked(&num_poisoned_pages);
95864 }
95865 } else {
95866 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
95867@@ -1731,11 +1731,11 @@ int soft_offline_page(struct page *page, int flags)
95868 if (PageHuge(page)) {
95869 set_page_hwpoison_huge_page(hpage);
95870 dequeue_hwpoisoned_huge_page(hpage);
95871- atomic_long_add(1 << compound_order(hpage),
95872+ atomic_long_add_unchecked(1 << compound_order(hpage),
95873 &num_poisoned_pages);
95874 } else {
95875 SetPageHWPoison(page);
95876- atomic_long_inc(&num_poisoned_pages);
95877+ atomic_long_inc_unchecked(&num_poisoned_pages);
95878 }
95879 }
95880 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
95881diff --git a/mm/memory.c b/mm/memory.c
95882index 2c3536c..e800104 100644
95883--- a/mm/memory.c
95884+++ b/mm/memory.c
95885@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95886 free_pte_range(tlb, pmd, addr);
95887 } while (pmd++, addr = next, addr != end);
95888
95889+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
95890 start &= PUD_MASK;
95891 if (start < floor)
95892 return;
95893@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95894 pmd = pmd_offset(pud, start);
95895 pud_clear(pud);
95896 pmd_free_tlb(tlb, pmd, start);
95897+#endif
95898+
95899 }
95900
95901 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95902@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95903 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
95904 } while (pud++, addr = next, addr != end);
95905
95906+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
95907 start &= PGDIR_MASK;
95908 if (start < floor)
95909 return;
95910@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95911 pud = pud_offset(pgd, start);
95912 pgd_clear(pgd);
95913 pud_free_tlb(tlb, pud, start);
95914+#endif
95915+
95916 }
95917
95918 /*
95919@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
95920 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
95921 */
95922 if (vma->vm_ops)
95923- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
95924+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
95925 vma->vm_ops->fault);
95926 if (vma->vm_file)
95927- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
95928+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
95929 vma->vm_file->f_op->mmap);
95930 dump_stack();
95931 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
95932@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
95933 page_add_file_rmap(page);
95934 set_pte_at(mm, addr, pte, mk_pte(page, prot));
95935
95936+#ifdef CONFIG_PAX_SEGMEXEC
95937+ pax_mirror_file_pte(vma, addr, page, ptl);
95938+#endif
95939+
95940 retval = 0;
95941 pte_unmap_unlock(pte, ptl);
95942 return retval;
95943@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
95944 if (!page_count(page))
95945 return -EINVAL;
95946 if (!(vma->vm_flags & VM_MIXEDMAP)) {
95947+
95948+#ifdef CONFIG_PAX_SEGMEXEC
95949+ struct vm_area_struct *vma_m;
95950+#endif
95951+
95952 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
95953 BUG_ON(vma->vm_flags & VM_PFNMAP);
95954 vma->vm_flags |= VM_MIXEDMAP;
95955+
95956+#ifdef CONFIG_PAX_SEGMEXEC
95957+ vma_m = pax_find_mirror_vma(vma);
95958+ if (vma_m)
95959+ vma_m->vm_flags |= VM_MIXEDMAP;
95960+#endif
95961+
95962 }
95963 return insert_page(vma, addr, page, vma->vm_page_prot);
95964 }
95965@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
95966 unsigned long pfn)
95967 {
95968 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
95969+ BUG_ON(vma->vm_mirror);
95970
95971 if (addr < vma->vm_start || addr >= vma->vm_end)
95972 return -EFAULT;
95973@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
95974
95975 BUG_ON(pud_huge(*pud));
95976
95977- pmd = pmd_alloc(mm, pud, addr);
95978+ pmd = (mm == &init_mm) ?
95979+ pmd_alloc_kernel(mm, pud, addr) :
95980+ pmd_alloc(mm, pud, addr);
95981 if (!pmd)
95982 return -ENOMEM;
95983 do {
95984@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
95985 unsigned long next;
95986 int err;
95987
95988- pud = pud_alloc(mm, pgd, addr);
95989+ pud = (mm == &init_mm) ?
95990+ pud_alloc_kernel(mm, pgd, addr) :
95991+ pud_alloc(mm, pgd, addr);
95992 if (!pud)
95993 return -ENOMEM;
95994 do {
95995@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
95996 return ret;
95997 }
95998
95999+#ifdef CONFIG_PAX_SEGMEXEC
96000+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96001+{
96002+ struct mm_struct *mm = vma->vm_mm;
96003+ spinlock_t *ptl;
96004+ pte_t *pte, entry;
96005+
96006+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96007+ entry = *pte;
96008+ if (!pte_present(entry)) {
96009+ if (!pte_none(entry)) {
96010+ BUG_ON(pte_file(entry));
96011+ free_swap_and_cache(pte_to_swp_entry(entry));
96012+ pte_clear_not_present_full(mm, address, pte, 0);
96013+ }
96014+ } else {
96015+ struct page *page;
96016+
96017+ flush_cache_page(vma, address, pte_pfn(entry));
96018+ entry = ptep_clear_flush(vma, address, pte);
96019+ BUG_ON(pte_dirty(entry));
96020+ page = vm_normal_page(vma, address, entry);
96021+ if (page) {
96022+ update_hiwater_rss(mm);
96023+ if (PageAnon(page))
96024+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96025+ else
96026+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96027+ page_remove_rmap(page);
96028+ page_cache_release(page);
96029+ }
96030+ }
96031+ pte_unmap_unlock(pte, ptl);
96032+}
96033+
96034+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96035+ *
96036+ * the ptl of the lower mapped page is held on entry and is not released on exit
96037+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96038+ */
96039+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96040+{
96041+ struct mm_struct *mm = vma->vm_mm;
96042+ unsigned long address_m;
96043+ spinlock_t *ptl_m;
96044+ struct vm_area_struct *vma_m;
96045+ pmd_t *pmd_m;
96046+ pte_t *pte_m, entry_m;
96047+
96048+ BUG_ON(!page_m || !PageAnon(page_m));
96049+
96050+ vma_m = pax_find_mirror_vma(vma);
96051+ if (!vma_m)
96052+ return;
96053+
96054+ BUG_ON(!PageLocked(page_m));
96055+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96056+ address_m = address + SEGMEXEC_TASK_SIZE;
96057+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96058+ pte_m = pte_offset_map(pmd_m, address_m);
96059+ ptl_m = pte_lockptr(mm, pmd_m);
96060+ if (ptl != ptl_m) {
96061+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96062+ if (!pte_none(*pte_m))
96063+ goto out;
96064+ }
96065+
96066+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96067+ page_cache_get(page_m);
96068+ page_add_anon_rmap(page_m, vma_m, address_m);
96069+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96070+ set_pte_at(mm, address_m, pte_m, entry_m);
96071+ update_mmu_cache(vma_m, address_m, pte_m);
96072+out:
96073+ if (ptl != ptl_m)
96074+ spin_unlock(ptl_m);
96075+ pte_unmap(pte_m);
96076+ unlock_page(page_m);
96077+}
96078+
96079+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96080+{
96081+ struct mm_struct *mm = vma->vm_mm;
96082+ unsigned long address_m;
96083+ spinlock_t *ptl_m;
96084+ struct vm_area_struct *vma_m;
96085+ pmd_t *pmd_m;
96086+ pte_t *pte_m, entry_m;
96087+
96088+ BUG_ON(!page_m || PageAnon(page_m));
96089+
96090+ vma_m = pax_find_mirror_vma(vma);
96091+ if (!vma_m)
96092+ return;
96093+
96094+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96095+ address_m = address + SEGMEXEC_TASK_SIZE;
96096+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96097+ pte_m = pte_offset_map(pmd_m, address_m);
96098+ ptl_m = pte_lockptr(mm, pmd_m);
96099+ if (ptl != ptl_m) {
96100+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96101+ if (!pte_none(*pte_m))
96102+ goto out;
96103+ }
96104+
96105+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96106+ page_cache_get(page_m);
96107+ page_add_file_rmap(page_m);
96108+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96109+ set_pte_at(mm, address_m, pte_m, entry_m);
96110+ update_mmu_cache(vma_m, address_m, pte_m);
96111+out:
96112+ if (ptl != ptl_m)
96113+ spin_unlock(ptl_m);
96114+ pte_unmap(pte_m);
96115+}
96116+
96117+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96118+{
96119+ struct mm_struct *mm = vma->vm_mm;
96120+ unsigned long address_m;
96121+ spinlock_t *ptl_m;
96122+ struct vm_area_struct *vma_m;
96123+ pmd_t *pmd_m;
96124+ pte_t *pte_m, entry_m;
96125+
96126+ vma_m = pax_find_mirror_vma(vma);
96127+ if (!vma_m)
96128+ return;
96129+
96130+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96131+ address_m = address + SEGMEXEC_TASK_SIZE;
96132+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96133+ pte_m = pte_offset_map(pmd_m, address_m);
96134+ ptl_m = pte_lockptr(mm, pmd_m);
96135+ if (ptl != ptl_m) {
96136+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96137+ if (!pte_none(*pte_m))
96138+ goto out;
96139+ }
96140+
96141+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96142+ set_pte_at(mm, address_m, pte_m, entry_m);
96143+out:
96144+ if (ptl != ptl_m)
96145+ spin_unlock(ptl_m);
96146+ pte_unmap(pte_m);
96147+}
96148+
96149+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96150+{
96151+ struct page *page_m;
96152+ pte_t entry;
96153+
96154+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96155+ goto out;
96156+
96157+ entry = *pte;
96158+ page_m = vm_normal_page(vma, address, entry);
96159+ if (!page_m)
96160+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96161+ else if (PageAnon(page_m)) {
96162+ if (pax_find_mirror_vma(vma)) {
96163+ pte_unmap_unlock(pte, ptl);
96164+ lock_page(page_m);
96165+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96166+ if (pte_same(entry, *pte))
96167+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96168+ else
96169+ unlock_page(page_m);
96170+ }
96171+ } else
96172+ pax_mirror_file_pte(vma, address, page_m, ptl);
96173+
96174+out:
96175+ pte_unmap_unlock(pte, ptl);
96176+}
96177+#endif
96178+
96179 /*
96180 * This routine handles present pages, when users try to write
96181 * to a shared page. It is done by copying the page to a new address
96182@@ -2212,6 +2419,12 @@ gotten:
96183 */
96184 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96185 if (likely(pte_same(*page_table, orig_pte))) {
96186+
96187+#ifdef CONFIG_PAX_SEGMEXEC
96188+ if (pax_find_mirror_vma(vma))
96189+ BUG_ON(!trylock_page(new_page));
96190+#endif
96191+
96192 if (old_page) {
96193 if (!PageAnon(old_page)) {
96194 dec_mm_counter_fast(mm, MM_FILEPAGES);
96195@@ -2265,6 +2478,10 @@ gotten:
96196 page_remove_rmap(old_page);
96197 }
96198
96199+#ifdef CONFIG_PAX_SEGMEXEC
96200+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96201+#endif
96202+
96203 /* Free the old page.. */
96204 new_page = old_page;
96205 ret |= VM_FAULT_WRITE;
96206@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96207 swap_free(entry);
96208 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96209 try_to_free_swap(page);
96210+
96211+#ifdef CONFIG_PAX_SEGMEXEC
96212+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96213+#endif
96214+
96215 unlock_page(page);
96216 if (page != swapcache) {
96217 /*
96218@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96219
96220 /* No need to invalidate - it was non-present before */
96221 update_mmu_cache(vma, address, page_table);
96222+
96223+#ifdef CONFIG_PAX_SEGMEXEC
96224+ pax_mirror_anon_pte(vma, address, page, ptl);
96225+#endif
96226+
96227 unlock:
96228 pte_unmap_unlock(page_table, ptl);
96229 out:
96230@@ -2581,40 +2808,6 @@ out_release:
96231 }
96232
96233 /*
96234- * This is like a special single-page "expand_{down|up}wards()",
96235- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96236- * doesn't hit another vma.
96237- */
96238-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96239-{
96240- address &= PAGE_MASK;
96241- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96242- struct vm_area_struct *prev = vma->vm_prev;
96243-
96244- /*
96245- * Is there a mapping abutting this one below?
96246- *
96247- * That's only ok if it's the same stack mapping
96248- * that has gotten split..
96249- */
96250- if (prev && prev->vm_end == address)
96251- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96252-
96253- return expand_downwards(vma, address - PAGE_SIZE);
96254- }
96255- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96256- struct vm_area_struct *next = vma->vm_next;
96257-
96258- /* As VM_GROWSDOWN but s/below/above/ */
96259- if (next && next->vm_start == address + PAGE_SIZE)
96260- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96261-
96262- return expand_upwards(vma, address + PAGE_SIZE);
96263- }
96264- return 0;
96265-}
96266-
96267-/*
96268 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96269 * but allow concurrent faults), and pte mapped but not yet locked.
96270 * We return with mmap_sem still held, but pte unmapped and unlocked.
96271@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96272 unsigned int flags)
96273 {
96274 struct mem_cgroup *memcg;
96275- struct page *page;
96276+ struct page *page = NULL;
96277 spinlock_t *ptl;
96278 pte_t entry;
96279
96280- pte_unmap(page_table);
96281-
96282- /* Check if we need to add a guard page to the stack */
96283- if (check_stack_guard_page(vma, address) < 0)
96284- return VM_FAULT_SIGSEGV;
96285-
96286- /* Use the zero-page for reads */
96287 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
96288 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96289 vma->vm_page_prot));
96290- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96291+ ptl = pte_lockptr(mm, pmd);
96292+ spin_lock(ptl);
96293 if (!pte_none(*page_table))
96294 goto unlock;
96295 goto setpte;
96296 }
96297
96298 /* Allocate our own private page. */
96299+ pte_unmap(page_table);
96300+
96301 if (unlikely(anon_vma_prepare(vma)))
96302 goto oom;
96303 page = alloc_zeroed_user_highpage_movable(vma, address);
96304@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96305 if (!pte_none(*page_table))
96306 goto release;
96307
96308+#ifdef CONFIG_PAX_SEGMEXEC
96309+ if (pax_find_mirror_vma(vma))
96310+ BUG_ON(!trylock_page(page));
96311+#endif
96312+
96313 inc_mm_counter_fast(mm, MM_ANONPAGES);
96314 page_add_new_anon_rmap(page, vma, address);
96315 mem_cgroup_commit_charge(page, memcg, false);
96316@@ -2677,6 +2871,12 @@ setpte:
96317
96318 /* No need to invalidate - it was non-present before */
96319 update_mmu_cache(vma, address, page_table);
96320+
96321+#ifdef CONFIG_PAX_SEGMEXEC
96322+ if (page)
96323+ pax_mirror_anon_pte(vma, address, page, ptl);
96324+#endif
96325+
96326 unlock:
96327 pte_unmap_unlock(page_table, ptl);
96328 return 0;
96329@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96330 return ret;
96331 }
96332 do_set_pte(vma, address, fault_page, pte, false, false);
96333+
96334+#ifdef CONFIG_PAX_SEGMEXEC
96335+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96336+#endif
96337+
96338 unlock_page(fault_page);
96339 unlock_out:
96340 pte_unmap_unlock(pte, ptl);
96341@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96342 page_cache_release(fault_page);
96343 goto uncharge_out;
96344 }
96345+
96346+#ifdef CONFIG_PAX_SEGMEXEC
96347+ if (pax_find_mirror_vma(vma))
96348+ BUG_ON(!trylock_page(new_page));
96349+#endif
96350+
96351 do_set_pte(vma, address, new_page, pte, true, true);
96352+
96353+#ifdef CONFIG_PAX_SEGMEXEC
96354+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96355+#endif
96356+
96357 mem_cgroup_commit_charge(new_page, memcg, false);
96358 lru_cache_add_active_or_unevictable(new_page, vma);
96359 pte_unmap_unlock(pte, ptl);
96360@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96361 return ret;
96362 }
96363 do_set_pte(vma, address, fault_page, pte, true, false);
96364+
96365+#ifdef CONFIG_PAX_SEGMEXEC
96366+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96367+#endif
96368+
96369 pte_unmap_unlock(pte, ptl);
96370
96371 if (set_page_dirty(fault_page))
96372@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96373 if (flags & FAULT_FLAG_WRITE)
96374 flush_tlb_fix_spurious_fault(vma, address);
96375 }
96376+
96377+#ifdef CONFIG_PAX_SEGMEXEC
96378+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96379+ return 0;
96380+#endif
96381+
96382 unlock:
96383 pte_unmap_unlock(pte, ptl);
96384 return 0;
96385@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96386 pmd_t *pmd;
96387 pte_t *pte;
96388
96389+#ifdef CONFIG_PAX_SEGMEXEC
96390+ struct vm_area_struct *vma_m;
96391+#endif
96392+
96393 if (unlikely(is_vm_hugetlb_page(vma)))
96394 return hugetlb_fault(mm, vma, address, flags);
96395
96396+#ifdef CONFIG_PAX_SEGMEXEC
96397+ vma_m = pax_find_mirror_vma(vma);
96398+ if (vma_m) {
96399+ unsigned long address_m;
96400+ pgd_t *pgd_m;
96401+ pud_t *pud_m;
96402+ pmd_t *pmd_m;
96403+
96404+ if (vma->vm_start > vma_m->vm_start) {
96405+ address_m = address;
96406+ address -= SEGMEXEC_TASK_SIZE;
96407+ vma = vma_m;
96408+ } else
96409+ address_m = address + SEGMEXEC_TASK_SIZE;
96410+
96411+ pgd_m = pgd_offset(mm, address_m);
96412+ pud_m = pud_alloc(mm, pgd_m, address_m);
96413+ if (!pud_m)
96414+ return VM_FAULT_OOM;
96415+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96416+ if (!pmd_m)
96417+ return VM_FAULT_OOM;
96418+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96419+ return VM_FAULT_OOM;
96420+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96421+ }
96422+#endif
96423+
96424 pgd = pgd_offset(mm, address);
96425 pud = pud_alloc(mm, pgd, address);
96426 if (!pud)
96427@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96428 spin_unlock(&mm->page_table_lock);
96429 return 0;
96430 }
96431+
96432+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96433+{
96434+ pud_t *new = pud_alloc_one(mm, address);
96435+ if (!new)
96436+ return -ENOMEM;
96437+
96438+ smp_wmb(); /* See comment in __pte_alloc */
96439+
96440+ spin_lock(&mm->page_table_lock);
96441+ if (pgd_present(*pgd)) /* Another has populated it */
96442+ pud_free(mm, new);
96443+ else
96444+ pgd_populate_kernel(mm, pgd, new);
96445+ spin_unlock(&mm->page_table_lock);
96446+ return 0;
96447+}
96448 #endif /* __PAGETABLE_PUD_FOLDED */
96449
96450 #ifndef __PAGETABLE_PMD_FOLDED
96451@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96452 spin_unlock(&mm->page_table_lock);
96453 return 0;
96454 }
96455+
96456+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96457+{
96458+ pmd_t *new = pmd_alloc_one(mm, address);
96459+ if (!new)
96460+ return -ENOMEM;
96461+
96462+ smp_wmb(); /* See comment in __pte_alloc */
96463+
96464+ spin_lock(&mm->page_table_lock);
96465+#ifndef __ARCH_HAS_4LEVEL_HACK
96466+ if (pud_present(*pud)) /* Another has populated it */
96467+ pmd_free(mm, new);
96468+ else
96469+ pud_populate_kernel(mm, pud, new);
96470+#else
96471+ if (pgd_present(*pud)) /* Another has populated it */
96472+ pmd_free(mm, new);
96473+ else
96474+ pgd_populate_kernel(mm, pud, new);
96475+#endif /* __ARCH_HAS_4LEVEL_HACK */
96476+ spin_unlock(&mm->page_table_lock);
96477+ return 0;
96478+}
96479 #endif /* __PAGETABLE_PMD_FOLDED */
96480
96481 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96482@@ -3550,8 +3850,8 @@ out:
96483 return ret;
96484 }
96485
96486-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96487- void *buf, int len, int write)
96488+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96489+ void *buf, size_t len, int write)
96490 {
96491 resource_size_t phys_addr;
96492 unsigned long prot = 0;
96493@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96494 * Access another process' address space as given in mm. If non-NULL, use the
96495 * given task for page fault accounting.
96496 */
96497-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96498- unsigned long addr, void *buf, int len, int write)
96499+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96500+ unsigned long addr, void *buf, size_t len, int write)
96501 {
96502 struct vm_area_struct *vma;
96503 void *old_buf = buf;
96504@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96505 down_read(&mm->mmap_sem);
96506 /* ignore errors, just check how much was successfully transferred */
96507 while (len) {
96508- int bytes, ret, offset;
96509+ ssize_t bytes, ret, offset;
96510 void *maddr;
96511 struct page *page = NULL;
96512
96513@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96514 *
96515 * The caller must hold a reference on @mm.
96516 */
96517-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96518- void *buf, int len, int write)
96519+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96520+ void *buf, size_t len, int write)
96521 {
96522 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96523 }
96524@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96525 * Source/target buffer must be kernel space,
96526 * Do not walk the page table directly, use get_user_pages
96527 */
96528-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96529- void *buf, int len, int write)
96530+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96531+ void *buf, size_t len, int write)
96532 {
96533 struct mm_struct *mm;
96534- int ret;
96535+ ssize_t ret;
96536
96537 mm = get_task_mm(tsk);
96538 if (!mm)
96539diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96540index 0e0961b..c9143b9 100644
96541--- a/mm/mempolicy.c
96542+++ b/mm/mempolicy.c
96543@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96544 unsigned long vmstart;
96545 unsigned long vmend;
96546
96547+#ifdef CONFIG_PAX_SEGMEXEC
96548+ struct vm_area_struct *vma_m;
96549+#endif
96550+
96551 vma = find_vma(mm, start);
96552 if (!vma || vma->vm_start > start)
96553 return -EFAULT;
96554@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96555 err = vma_replace_policy(vma, new_pol);
96556 if (err)
96557 goto out;
96558+
96559+#ifdef CONFIG_PAX_SEGMEXEC
96560+ vma_m = pax_find_mirror_vma(vma);
96561+ if (vma_m) {
96562+ err = vma_replace_policy(vma_m, new_pol);
96563+ if (err)
96564+ goto out;
96565+ }
96566+#endif
96567+
96568 }
96569
96570 out:
96571@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96572
96573 if (end < start)
96574 return -EINVAL;
96575+
96576+#ifdef CONFIG_PAX_SEGMEXEC
96577+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96578+ if (end > SEGMEXEC_TASK_SIZE)
96579+ return -EINVAL;
96580+ } else
96581+#endif
96582+
96583+ if (end > TASK_SIZE)
96584+ return -EINVAL;
96585+
96586 if (end == start)
96587 return 0;
96588
96589@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96590 */
96591 tcred = __task_cred(task);
96592 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96593- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96594- !capable(CAP_SYS_NICE)) {
96595+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96596 rcu_read_unlock();
96597 err = -EPERM;
96598 goto out_put;
96599@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96600 goto out;
96601 }
96602
96603+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96604+ if (mm != current->mm &&
96605+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96606+ mmput(mm);
96607+ err = -EPERM;
96608+ goto out;
96609+ }
96610+#endif
96611+
96612 err = do_migrate_pages(mm, old, new,
96613 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96614
96615diff --git a/mm/migrate.c b/mm/migrate.c
96616index 344cdf6..07399500 100644
96617--- a/mm/migrate.c
96618+++ b/mm/migrate.c
96619@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96620 */
96621 tcred = __task_cred(task);
96622 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96623- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96624- !capable(CAP_SYS_NICE)) {
96625+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96626 rcu_read_unlock();
96627 err = -EPERM;
96628 goto out;
96629diff --git a/mm/mlock.c b/mm/mlock.c
96630index 73cf098..ab547c7 100644
96631--- a/mm/mlock.c
96632+++ b/mm/mlock.c
96633@@ -14,6 +14,7 @@
96634 #include <linux/pagevec.h>
96635 #include <linux/mempolicy.h>
96636 #include <linux/syscalls.h>
96637+#include <linux/security.h>
96638 #include <linux/sched.h>
96639 #include <linux/export.h>
96640 #include <linux/rmap.h>
96641@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96642 {
96643 unsigned long nstart, end, tmp;
96644 struct vm_area_struct * vma, * prev;
96645- int error;
96646+ int error = 0;
96647
96648 VM_BUG_ON(start & ~PAGE_MASK);
96649 VM_BUG_ON(len != PAGE_ALIGN(len));
96650@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96651 return -EINVAL;
96652 if (end == start)
96653 return 0;
96654+ if (end > TASK_SIZE)
96655+ return -EINVAL;
96656+
96657 vma = find_vma(current->mm, start);
96658 if (!vma || vma->vm_start > start)
96659 return -ENOMEM;
96660@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96661 for (nstart = start ; ; ) {
96662 vm_flags_t newflags;
96663
96664+#ifdef CONFIG_PAX_SEGMEXEC
96665+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96666+ break;
96667+#endif
96668+
96669 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96670
96671 newflags = vma->vm_flags & ~VM_LOCKED;
96672@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96673 locked += current->mm->locked_vm;
96674
96675 /* check against resource limits */
96676+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96677 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96678 error = do_mlock(start, len, 1);
96679
96680@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96681 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96682 vm_flags_t newflags;
96683
96684+#ifdef CONFIG_PAX_SEGMEXEC
96685+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96686+ break;
96687+#endif
96688+
96689 newflags = vma->vm_flags & ~VM_LOCKED;
96690 if (flags & MCL_CURRENT)
96691 newflags |= VM_LOCKED;
96692@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96693 lock_limit >>= PAGE_SHIFT;
96694
96695 ret = -ENOMEM;
96696+
96697+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96698+
96699 down_write(&current->mm->mmap_sem);
96700-
96701 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96702 capable(CAP_IPC_LOCK))
96703 ret = do_mlockall(flags);
96704diff --git a/mm/mmap.c b/mm/mmap.c
96705index 7f684d5..bb9333f 100644
96706--- a/mm/mmap.c
96707+++ b/mm/mmap.c
96708@@ -41,6 +41,7 @@
96709 #include <linux/notifier.h>
96710 #include <linux/memory.h>
96711 #include <linux/printk.h>
96712+#include <linux/random.h>
96713
96714 #include <asm/uaccess.h>
96715 #include <asm/cacheflush.h>
96716@@ -57,6 +58,16 @@
96717 #define arch_rebalance_pgtables(addr, len) (addr)
96718 #endif
96719
96720+static inline void verify_mm_writelocked(struct mm_struct *mm)
96721+{
96722+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96723+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96724+ up_read(&mm->mmap_sem);
96725+ BUG();
96726+ }
96727+#endif
96728+}
96729+
96730 static void unmap_region(struct mm_struct *mm,
96731 struct vm_area_struct *vma, struct vm_area_struct *prev,
96732 unsigned long start, unsigned long end);
96733@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96734 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96735 *
96736 */
96737-pgprot_t protection_map[16] = {
96738+pgprot_t protection_map[16] __read_only = {
96739 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96740 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96741 };
96742
96743-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96744+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96745 {
96746- return __pgprot(pgprot_val(protection_map[vm_flags &
96747+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96748 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96749 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96750+
96751+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96752+ if (!(__supported_pte_mask & _PAGE_NX) &&
96753+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96754+ (vm_flags & (VM_READ | VM_WRITE)))
96755+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96756+#endif
96757+
96758+ return prot;
96759 }
96760 EXPORT_SYMBOL(vm_get_page_prot);
96761
96762@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
96763 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96764 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
96765 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
96766+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96767 /*
96768 * Make sure vm_committed_as in one cacheline and not cacheline shared with
96769 * other variables. It can be updated by several CPUs frequently.
96770@@ -152,7 +173,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
96771 */
96772 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
96773 {
96774- unsigned long free, allowed, reserve;
96775+ long free, allowed, reserve;
96776
96777 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
96778 -(s64)vm_committed_as_batch * num_online_cpus(),
96779@@ -220,7 +241,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
96780 */
96781 if (mm) {
96782 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
96783- allowed -= min(mm->total_vm / 32, reserve);
96784+ allowed -= min_t(long, mm->total_vm / 32, reserve);
96785 }
96786
96787 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
96788@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96789 struct vm_area_struct *next = vma->vm_next;
96790
96791 might_sleep();
96792+ BUG_ON(vma->vm_mirror);
96793 if (vma->vm_ops && vma->vm_ops->close)
96794 vma->vm_ops->close(vma);
96795 if (vma->vm_file)
96796@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
96797
96798 SYSCALL_DEFINE1(brk, unsigned long, brk)
96799 {
96800+ unsigned long rlim;
96801 unsigned long retval;
96802 unsigned long newbrk, oldbrk;
96803 struct mm_struct *mm = current->mm;
96804@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96805 * segment grow beyond its set limit the in case where the limit is
96806 * not page aligned -Ram Gupta
96807 */
96808- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
96809+ rlim = rlimit(RLIMIT_DATA);
96810+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96811+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
96812+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
96813+ rlim = 4096 * PAGE_SIZE;
96814+#endif
96815+ if (check_data_rlimit(rlim, brk, mm->start_brk,
96816 mm->end_data, mm->start_data))
96817 goto out;
96818
96819@@ -978,6 +1007,12 @@ static int
96820 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96821 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96822 {
96823+
96824+#ifdef CONFIG_PAX_SEGMEXEC
96825+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96826+ return 0;
96827+#endif
96828+
96829 if (is_mergeable_vma(vma, file, vm_flags) &&
96830 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96831 if (vma->vm_pgoff == vm_pgoff)
96832@@ -997,6 +1032,12 @@ static int
96833 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96834 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96835 {
96836+
96837+#ifdef CONFIG_PAX_SEGMEXEC
96838+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96839+ return 0;
96840+#endif
96841+
96842 if (is_mergeable_vma(vma, file, vm_flags) &&
96843 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96844 pgoff_t vm_pglen;
96845@@ -1046,6 +1087,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96846 struct vm_area_struct *area, *next;
96847 int err;
96848
96849+#ifdef CONFIG_PAX_SEGMEXEC
96850+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96851+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96852+
96853+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96854+#endif
96855+
96856 /*
96857 * We later require that vma->vm_flags == vm_flags,
96858 * so this tests vma->vm_flags & VM_SPECIAL, too.
96859@@ -1061,6 +1109,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96860 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96861 next = next->vm_next;
96862
96863+#ifdef CONFIG_PAX_SEGMEXEC
96864+ if (prev)
96865+ prev_m = pax_find_mirror_vma(prev);
96866+ if (area)
96867+ area_m = pax_find_mirror_vma(area);
96868+ if (next)
96869+ next_m = pax_find_mirror_vma(next);
96870+#endif
96871+
96872 /*
96873 * Can it merge with the predecessor?
96874 */
96875@@ -1080,9 +1137,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96876 /* cases 1, 6 */
96877 err = vma_adjust(prev, prev->vm_start,
96878 next->vm_end, prev->vm_pgoff, NULL);
96879- } else /* cases 2, 5, 7 */
96880+
96881+#ifdef CONFIG_PAX_SEGMEXEC
96882+ if (!err && prev_m)
96883+ err = vma_adjust(prev_m, prev_m->vm_start,
96884+ next_m->vm_end, prev_m->vm_pgoff, NULL);
96885+#endif
96886+
96887+ } else { /* cases 2, 5, 7 */
96888 err = vma_adjust(prev, prev->vm_start,
96889 end, prev->vm_pgoff, NULL);
96890+
96891+#ifdef CONFIG_PAX_SEGMEXEC
96892+ if (!err && prev_m)
96893+ err = vma_adjust(prev_m, prev_m->vm_start,
96894+ end_m, prev_m->vm_pgoff, NULL);
96895+#endif
96896+
96897+ }
96898 if (err)
96899 return NULL;
96900 khugepaged_enter_vma_merge(prev, vm_flags);
96901@@ -1096,12 +1168,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96902 mpol_equal(policy, vma_policy(next)) &&
96903 can_vma_merge_before(next, vm_flags,
96904 anon_vma, file, pgoff+pglen)) {
96905- if (prev && addr < prev->vm_end) /* case 4 */
96906+ if (prev && addr < prev->vm_end) { /* case 4 */
96907 err = vma_adjust(prev, prev->vm_start,
96908 addr, prev->vm_pgoff, NULL);
96909- else /* cases 3, 8 */
96910+
96911+#ifdef CONFIG_PAX_SEGMEXEC
96912+ if (!err && prev_m)
96913+ err = vma_adjust(prev_m, prev_m->vm_start,
96914+ addr_m, prev_m->vm_pgoff, NULL);
96915+#endif
96916+
96917+ } else { /* cases 3, 8 */
96918 err = vma_adjust(area, addr, next->vm_end,
96919 next->vm_pgoff - pglen, NULL);
96920+
96921+#ifdef CONFIG_PAX_SEGMEXEC
96922+ if (!err && area_m)
96923+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
96924+ next_m->vm_pgoff - pglen, NULL);
96925+#endif
96926+
96927+ }
96928 if (err)
96929 return NULL;
96930 khugepaged_enter_vma_merge(area, vm_flags);
96931@@ -1210,8 +1297,10 @@ none:
96932 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96933 struct file *file, long pages)
96934 {
96935- const unsigned long stack_flags
96936- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
96937+
96938+#ifdef CONFIG_PAX_RANDMMAP
96939+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
96940+#endif
96941
96942 mm->total_vm += pages;
96943
96944@@ -1219,7 +1308,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96945 mm->shared_vm += pages;
96946 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
96947 mm->exec_vm += pages;
96948- } else if (flags & stack_flags)
96949+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
96950 mm->stack_vm += pages;
96951 }
96952 #endif /* CONFIG_PROC_FS */
96953@@ -1249,6 +1338,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
96954 locked += mm->locked_vm;
96955 lock_limit = rlimit(RLIMIT_MEMLOCK);
96956 lock_limit >>= PAGE_SHIFT;
96957+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
96958 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
96959 return -EAGAIN;
96960 }
96961@@ -1275,7 +1365,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96962 * (the exception is when the underlying filesystem is noexec
96963 * mounted, in which case we dont add PROT_EXEC.)
96964 */
96965- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
96966+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
96967 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
96968 prot |= PROT_EXEC;
96969
96970@@ -1301,7 +1391,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96971 /* Obtain the address to map to. we verify (or select) it and ensure
96972 * that it represents a valid section of the address space.
96973 */
96974- addr = get_unmapped_area(file, addr, len, pgoff, flags);
96975+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
96976 if (addr & ~PAGE_MASK)
96977 return addr;
96978
96979@@ -1312,6 +1402,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96980 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
96981 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
96982
96983+#ifdef CONFIG_PAX_MPROTECT
96984+ if (mm->pax_flags & MF_PAX_MPROTECT) {
96985+
96986+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
96987+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
96988+ mm->binfmt->handle_mmap)
96989+ mm->binfmt->handle_mmap(file);
96990+#endif
96991+
96992+#ifndef CONFIG_PAX_MPROTECT_COMPAT
96993+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
96994+ gr_log_rwxmmap(file);
96995+
96996+#ifdef CONFIG_PAX_EMUPLT
96997+ vm_flags &= ~VM_EXEC;
96998+#else
96999+ return -EPERM;
97000+#endif
97001+
97002+ }
97003+
97004+ if (!(vm_flags & VM_EXEC))
97005+ vm_flags &= ~VM_MAYEXEC;
97006+#else
97007+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97008+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97009+#endif
97010+ else
97011+ vm_flags &= ~VM_MAYWRITE;
97012+ }
97013+#endif
97014+
97015+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97016+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97017+ vm_flags &= ~VM_PAGEEXEC;
97018+#endif
97019+
97020 if (flags & MAP_LOCKED)
97021 if (!can_do_mlock())
97022 return -EPERM;
97023@@ -1399,6 +1526,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97024 vm_flags |= VM_NORESERVE;
97025 }
97026
97027+ if (!gr_acl_handle_mmap(file, prot))
97028+ return -EACCES;
97029+
97030 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97031 if (!IS_ERR_VALUE(addr) &&
97032 ((vm_flags & VM_LOCKED) ||
97033@@ -1492,7 +1622,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97034 vm_flags_t vm_flags = vma->vm_flags;
97035
97036 /* If it was private or non-writable, the write bit is already clear */
97037- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97038+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97039 return 0;
97040
97041 /* The backer wishes to know when pages are first written to? */
97042@@ -1543,7 +1673,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97043 struct rb_node **rb_link, *rb_parent;
97044 unsigned long charged = 0;
97045
97046+#ifdef CONFIG_PAX_SEGMEXEC
97047+ struct vm_area_struct *vma_m = NULL;
97048+#endif
97049+
97050+ /*
97051+ * mm->mmap_sem is required to protect against another thread
97052+ * changing the mappings in case we sleep.
97053+ */
97054+ verify_mm_writelocked(mm);
97055+
97056 /* Check against address space limit. */
97057+
97058+#ifdef CONFIG_PAX_RANDMMAP
97059+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97060+#endif
97061+
97062 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97063 unsigned long nr_pages;
97064
97065@@ -1562,11 +1707,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97066
97067 /* Clear old maps */
97068 error = -ENOMEM;
97069-munmap_back:
97070 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97071 if (do_munmap(mm, addr, len))
97072 return -ENOMEM;
97073- goto munmap_back;
97074+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97075 }
97076
97077 /*
97078@@ -1597,6 +1741,16 @@ munmap_back:
97079 goto unacct_error;
97080 }
97081
97082+#ifdef CONFIG_PAX_SEGMEXEC
97083+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97084+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97085+ if (!vma_m) {
97086+ error = -ENOMEM;
97087+ goto free_vma;
97088+ }
97089+ }
97090+#endif
97091+
97092 vma->vm_mm = mm;
97093 vma->vm_start = addr;
97094 vma->vm_end = addr + len;
97095@@ -1627,6 +1781,13 @@ munmap_back:
97096 if (error)
97097 goto unmap_and_free_vma;
97098
97099+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97100+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97101+ vma->vm_flags |= VM_PAGEEXEC;
97102+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97103+ }
97104+#endif
97105+
97106 /* Can addr have changed??
97107 *
97108 * Answer: Yes, several device drivers can do it in their
97109@@ -1645,6 +1806,12 @@ munmap_back:
97110 }
97111
97112 vma_link(mm, vma, prev, rb_link, rb_parent);
97113+
97114+#ifdef CONFIG_PAX_SEGMEXEC
97115+ if (vma_m)
97116+ BUG_ON(pax_mirror_vma(vma_m, vma));
97117+#endif
97118+
97119 /* Once vma denies write, undo our temporary denial count */
97120 if (file) {
97121 if (vm_flags & VM_SHARED)
97122@@ -1657,6 +1824,7 @@ out:
97123 perf_event_mmap(vma);
97124
97125 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97126+ track_exec_limit(mm, addr, addr + len, vm_flags);
97127 if (vm_flags & VM_LOCKED) {
97128 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97129 vma == get_gate_vma(current->mm)))
97130@@ -1694,6 +1862,12 @@ allow_write_and_free_vma:
97131 if (vm_flags & VM_DENYWRITE)
97132 allow_write_access(file);
97133 free_vma:
97134+
97135+#ifdef CONFIG_PAX_SEGMEXEC
97136+ if (vma_m)
97137+ kmem_cache_free(vm_area_cachep, vma_m);
97138+#endif
97139+
97140 kmem_cache_free(vm_area_cachep, vma);
97141 unacct_error:
97142 if (charged)
97143@@ -1701,7 +1875,63 @@ unacct_error:
97144 return error;
97145 }
97146
97147-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97148+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97149+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97150+{
97151+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97152+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97153+
97154+ return 0;
97155+}
97156+#endif
97157+
97158+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97159+{
97160+ if (!vma) {
97161+#ifdef CONFIG_STACK_GROWSUP
97162+ if (addr > sysctl_heap_stack_gap)
97163+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97164+ else
97165+ vma = find_vma(current->mm, 0);
97166+ if (vma && (vma->vm_flags & VM_GROWSUP))
97167+ return false;
97168+#endif
97169+ return true;
97170+ }
97171+
97172+ if (addr + len > vma->vm_start)
97173+ return false;
97174+
97175+ if (vma->vm_flags & VM_GROWSDOWN)
97176+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97177+#ifdef CONFIG_STACK_GROWSUP
97178+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97179+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97180+#endif
97181+ else if (offset)
97182+ return offset <= vma->vm_start - addr - len;
97183+
97184+ return true;
97185+}
97186+
97187+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97188+{
97189+ if (vma->vm_start < len)
97190+ return -ENOMEM;
97191+
97192+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97193+ if (offset <= vma->vm_start - len)
97194+ return vma->vm_start - len - offset;
97195+ else
97196+ return -ENOMEM;
97197+ }
97198+
97199+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97200+ return vma->vm_start - len - sysctl_heap_stack_gap;
97201+ return -ENOMEM;
97202+}
97203+
97204+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97205 {
97206 /*
97207 * We implement the search by looking for an rbtree node that
97208@@ -1749,11 +1979,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97209 }
97210 }
97211
97212- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97213+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97214 check_current:
97215 /* Check if current node has a suitable gap */
97216 if (gap_start > high_limit)
97217 return -ENOMEM;
97218+
97219+ if (gap_end - gap_start > info->threadstack_offset)
97220+ gap_start += info->threadstack_offset;
97221+ else
97222+ gap_start = gap_end;
97223+
97224+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97225+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97226+ gap_start += sysctl_heap_stack_gap;
97227+ else
97228+ gap_start = gap_end;
97229+ }
97230+ if (vma->vm_flags & VM_GROWSDOWN) {
97231+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97232+ gap_end -= sysctl_heap_stack_gap;
97233+ else
97234+ gap_end = gap_start;
97235+ }
97236 if (gap_end >= low_limit && gap_end - gap_start >= length)
97237 goto found;
97238
97239@@ -1803,7 +2051,7 @@ found:
97240 return gap_start;
97241 }
97242
97243-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97244+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97245 {
97246 struct mm_struct *mm = current->mm;
97247 struct vm_area_struct *vma;
97248@@ -1857,6 +2105,24 @@ check_current:
97249 gap_end = vma->vm_start;
97250 if (gap_end < low_limit)
97251 return -ENOMEM;
97252+
97253+ if (gap_end - gap_start > info->threadstack_offset)
97254+ gap_end -= info->threadstack_offset;
97255+ else
97256+ gap_end = gap_start;
97257+
97258+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97259+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97260+ gap_start += sysctl_heap_stack_gap;
97261+ else
97262+ gap_start = gap_end;
97263+ }
97264+ if (vma->vm_flags & VM_GROWSDOWN) {
97265+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97266+ gap_end -= sysctl_heap_stack_gap;
97267+ else
97268+ gap_end = gap_start;
97269+ }
97270 if (gap_start <= high_limit && gap_end - gap_start >= length)
97271 goto found;
97272
97273@@ -1920,6 +2186,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97274 struct mm_struct *mm = current->mm;
97275 struct vm_area_struct *vma;
97276 struct vm_unmapped_area_info info;
97277+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97278
97279 if (len > TASK_SIZE - mmap_min_addr)
97280 return -ENOMEM;
97281@@ -1927,11 +2194,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97282 if (flags & MAP_FIXED)
97283 return addr;
97284
97285+#ifdef CONFIG_PAX_RANDMMAP
97286+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97287+#endif
97288+
97289 if (addr) {
97290 addr = PAGE_ALIGN(addr);
97291 vma = find_vma(mm, addr);
97292 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97293- (!vma || addr + len <= vma->vm_start))
97294+ check_heap_stack_gap(vma, addr, len, offset))
97295 return addr;
97296 }
97297
97298@@ -1940,6 +2211,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97299 info.low_limit = mm->mmap_base;
97300 info.high_limit = TASK_SIZE;
97301 info.align_mask = 0;
97302+ info.threadstack_offset = offset;
97303 return vm_unmapped_area(&info);
97304 }
97305 #endif
97306@@ -1958,6 +2230,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97307 struct mm_struct *mm = current->mm;
97308 unsigned long addr = addr0;
97309 struct vm_unmapped_area_info info;
97310+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97311
97312 /* requested length too big for entire address space */
97313 if (len > TASK_SIZE - mmap_min_addr)
97314@@ -1966,12 +2239,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97315 if (flags & MAP_FIXED)
97316 return addr;
97317
97318+#ifdef CONFIG_PAX_RANDMMAP
97319+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97320+#endif
97321+
97322 /* requesting a specific address */
97323 if (addr) {
97324 addr = PAGE_ALIGN(addr);
97325 vma = find_vma(mm, addr);
97326 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97327- (!vma || addr + len <= vma->vm_start))
97328+ check_heap_stack_gap(vma, addr, len, offset))
97329 return addr;
97330 }
97331
97332@@ -1980,6 +2257,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97333 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97334 info.high_limit = mm->mmap_base;
97335 info.align_mask = 0;
97336+ info.threadstack_offset = offset;
97337 addr = vm_unmapped_area(&info);
97338
97339 /*
97340@@ -1992,6 +2270,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97341 VM_BUG_ON(addr != -ENOMEM);
97342 info.flags = 0;
97343 info.low_limit = TASK_UNMAPPED_BASE;
97344+
97345+#ifdef CONFIG_PAX_RANDMMAP
97346+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97347+ info.low_limit += mm->delta_mmap;
97348+#endif
97349+
97350 info.high_limit = TASK_SIZE;
97351 addr = vm_unmapped_area(&info);
97352 }
97353@@ -2092,6 +2376,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97354 return vma;
97355 }
97356
97357+#ifdef CONFIG_PAX_SEGMEXEC
97358+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97359+{
97360+ struct vm_area_struct *vma_m;
97361+
97362+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97363+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97364+ BUG_ON(vma->vm_mirror);
97365+ return NULL;
97366+ }
97367+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97368+ vma_m = vma->vm_mirror;
97369+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97370+ BUG_ON(vma->vm_file != vma_m->vm_file);
97371+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97372+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97373+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97374+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97375+ return vma_m;
97376+}
97377+#endif
97378+
97379 /*
97380 * Verify that the stack growth is acceptable and
97381 * update accounting. This is shared with both the
97382@@ -2109,8 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97383
97384 /* Stack limit test */
97385 actual_size = size;
97386- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
97387- actual_size -= PAGE_SIZE;
97388+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
97389 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97390 return -ENOMEM;
97391
97392@@ -2121,6 +2426,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97393 locked = mm->locked_vm + grow;
97394 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97395 limit >>= PAGE_SHIFT;
97396+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97397 if (locked > limit && !capable(CAP_IPC_LOCK))
97398 return -ENOMEM;
97399 }
97400@@ -2150,37 +2456,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97401 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97402 * vma is the last one with address > vma->vm_end. Have to extend vma.
97403 */
97404+#ifndef CONFIG_IA64
97405+static
97406+#endif
97407 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97408 {
97409 int error;
97410+ bool locknext;
97411
97412 if (!(vma->vm_flags & VM_GROWSUP))
97413 return -EFAULT;
97414
97415+ /* Also guard against wrapping around to address 0. */
97416+ if (address < PAGE_ALIGN(address+1))
97417+ address = PAGE_ALIGN(address+1);
97418+ else
97419+ return -ENOMEM;
97420+
97421 /*
97422 * We must make sure the anon_vma is allocated
97423 * so that the anon_vma locking is not a noop.
97424 */
97425 if (unlikely(anon_vma_prepare(vma)))
97426 return -ENOMEM;
97427+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97428+ if (locknext && anon_vma_prepare(vma->vm_next))
97429+ return -ENOMEM;
97430 vma_lock_anon_vma(vma);
97431+ if (locknext)
97432+ vma_lock_anon_vma(vma->vm_next);
97433
97434 /*
97435 * vma->vm_start/vm_end cannot change under us because the caller
97436 * is required to hold the mmap_sem in read mode. We need the
97437- * anon_vma lock to serialize against concurrent expand_stacks.
97438- * Also guard against wrapping around to address 0.
97439+ * anon_vma locks to serialize against concurrent expand_stacks
97440+ * and expand_upwards.
97441 */
97442- if (address < PAGE_ALIGN(address+4))
97443- address = PAGE_ALIGN(address+4);
97444- else {
97445- vma_unlock_anon_vma(vma);
97446- return -ENOMEM;
97447- }
97448 error = 0;
97449
97450 /* Somebody else might have raced and expanded it already */
97451- if (address > vma->vm_end) {
97452+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97453+ error = -ENOMEM;
97454+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97455 unsigned long size, grow;
97456
97457 size = address - vma->vm_start;
97458@@ -2215,6 +2532,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97459 }
97460 }
97461 }
97462+ if (locknext)
97463+ vma_unlock_anon_vma(vma->vm_next);
97464 vma_unlock_anon_vma(vma);
97465 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97466 validate_mm(vma->vm_mm);
97467@@ -2229,6 +2548,8 @@ int expand_downwards(struct vm_area_struct *vma,
97468 unsigned long address)
97469 {
97470 int error;
97471+ bool lockprev = false;
97472+ struct vm_area_struct *prev;
97473
97474 /*
97475 * We must make sure the anon_vma is allocated
97476@@ -2242,6 +2563,15 @@ int expand_downwards(struct vm_area_struct *vma,
97477 if (error)
97478 return error;
97479
97480+ prev = vma->vm_prev;
97481+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97482+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97483+#endif
97484+ if (lockprev && anon_vma_prepare(prev))
97485+ return -ENOMEM;
97486+ if (lockprev)
97487+ vma_lock_anon_vma(prev);
97488+
97489 vma_lock_anon_vma(vma);
97490
97491 /*
97492@@ -2251,9 +2581,17 @@ int expand_downwards(struct vm_area_struct *vma,
97493 */
97494
97495 /* Somebody else might have raced and expanded it already */
97496- if (address < vma->vm_start) {
97497+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97498+ error = -ENOMEM;
97499+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97500 unsigned long size, grow;
97501
97502+#ifdef CONFIG_PAX_SEGMEXEC
97503+ struct vm_area_struct *vma_m;
97504+
97505+ vma_m = pax_find_mirror_vma(vma);
97506+#endif
97507+
97508 size = vma->vm_end - address;
97509 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97510
97511@@ -2278,13 +2616,27 @@ int expand_downwards(struct vm_area_struct *vma,
97512 vma->vm_pgoff -= grow;
97513 anon_vma_interval_tree_post_update_vma(vma);
97514 vma_gap_update(vma);
97515+
97516+#ifdef CONFIG_PAX_SEGMEXEC
97517+ if (vma_m) {
97518+ anon_vma_interval_tree_pre_update_vma(vma_m);
97519+ vma_m->vm_start -= grow << PAGE_SHIFT;
97520+ vma_m->vm_pgoff -= grow;
97521+ anon_vma_interval_tree_post_update_vma(vma_m);
97522+ vma_gap_update(vma_m);
97523+ }
97524+#endif
97525+
97526 spin_unlock(&vma->vm_mm->page_table_lock);
97527
97528+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97529 perf_event_mmap(vma);
97530 }
97531 }
97532 }
97533 vma_unlock_anon_vma(vma);
97534+ if (lockprev)
97535+ vma_unlock_anon_vma(prev);
97536 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97537 validate_mm(vma->vm_mm);
97538 return error;
97539@@ -2384,6 +2736,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97540 do {
97541 long nrpages = vma_pages(vma);
97542
97543+#ifdef CONFIG_PAX_SEGMEXEC
97544+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97545+ vma = remove_vma(vma);
97546+ continue;
97547+ }
97548+#endif
97549+
97550 if (vma->vm_flags & VM_ACCOUNT)
97551 nr_accounted += nrpages;
97552 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97553@@ -2428,6 +2787,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97554 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97555 vma->vm_prev = NULL;
97556 do {
97557+
97558+#ifdef CONFIG_PAX_SEGMEXEC
97559+ if (vma->vm_mirror) {
97560+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97561+ vma->vm_mirror->vm_mirror = NULL;
97562+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97563+ vma->vm_mirror = NULL;
97564+ }
97565+#endif
97566+
97567 vma_rb_erase(vma, &mm->mm_rb);
97568 mm->map_count--;
97569 tail_vma = vma;
97570@@ -2455,14 +2824,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97571 struct vm_area_struct *new;
97572 int err = -ENOMEM;
97573
97574+#ifdef CONFIG_PAX_SEGMEXEC
97575+ struct vm_area_struct *vma_m, *new_m = NULL;
97576+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97577+#endif
97578+
97579 if (is_vm_hugetlb_page(vma) && (addr &
97580 ~(huge_page_mask(hstate_vma(vma)))))
97581 return -EINVAL;
97582
97583+#ifdef CONFIG_PAX_SEGMEXEC
97584+ vma_m = pax_find_mirror_vma(vma);
97585+#endif
97586+
97587 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97588 if (!new)
97589 goto out_err;
97590
97591+#ifdef CONFIG_PAX_SEGMEXEC
97592+ if (vma_m) {
97593+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97594+ if (!new_m) {
97595+ kmem_cache_free(vm_area_cachep, new);
97596+ goto out_err;
97597+ }
97598+ }
97599+#endif
97600+
97601 /* most fields are the same, copy all, and then fixup */
97602 *new = *vma;
97603
97604@@ -2475,6 +2863,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97605 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97606 }
97607
97608+#ifdef CONFIG_PAX_SEGMEXEC
97609+ if (vma_m) {
97610+ *new_m = *vma_m;
97611+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97612+ new_m->vm_mirror = new;
97613+ new->vm_mirror = new_m;
97614+
97615+ if (new_below)
97616+ new_m->vm_end = addr_m;
97617+ else {
97618+ new_m->vm_start = addr_m;
97619+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97620+ }
97621+ }
97622+#endif
97623+
97624 err = vma_dup_policy(vma, new);
97625 if (err)
97626 goto out_free_vma;
97627@@ -2495,6 +2899,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97628 else
97629 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97630
97631+#ifdef CONFIG_PAX_SEGMEXEC
97632+ if (!err && vma_m) {
97633+ struct mempolicy *pol = vma_policy(new);
97634+
97635+ if (anon_vma_clone(new_m, vma_m))
97636+ goto out_free_mpol;
97637+
97638+ mpol_get(pol);
97639+ set_vma_policy(new_m, pol);
97640+
97641+ if (new_m->vm_file)
97642+ get_file(new_m->vm_file);
97643+
97644+ if (new_m->vm_ops && new_m->vm_ops->open)
97645+ new_m->vm_ops->open(new_m);
97646+
97647+ if (new_below)
97648+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97649+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97650+ else
97651+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97652+
97653+ if (err) {
97654+ if (new_m->vm_ops && new_m->vm_ops->close)
97655+ new_m->vm_ops->close(new_m);
97656+ if (new_m->vm_file)
97657+ fput(new_m->vm_file);
97658+ mpol_put(pol);
97659+ }
97660+ }
97661+#endif
97662+
97663 /* Success. */
97664 if (!err)
97665 return 0;
97666@@ -2504,10 +2940,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97667 new->vm_ops->close(new);
97668 if (new->vm_file)
97669 fput(new->vm_file);
97670- unlink_anon_vmas(new);
97671 out_free_mpol:
97672 mpol_put(vma_policy(new));
97673 out_free_vma:
97674+
97675+#ifdef CONFIG_PAX_SEGMEXEC
97676+ if (new_m) {
97677+ unlink_anon_vmas(new_m);
97678+ kmem_cache_free(vm_area_cachep, new_m);
97679+ }
97680+#endif
97681+
97682+ unlink_anon_vmas(new);
97683 kmem_cache_free(vm_area_cachep, new);
97684 out_err:
97685 return err;
97686@@ -2520,6 +2964,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97687 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97688 unsigned long addr, int new_below)
97689 {
97690+
97691+#ifdef CONFIG_PAX_SEGMEXEC
97692+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97693+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97694+ if (mm->map_count >= sysctl_max_map_count-1)
97695+ return -ENOMEM;
97696+ } else
97697+#endif
97698+
97699 if (mm->map_count >= sysctl_max_map_count)
97700 return -ENOMEM;
97701
97702@@ -2531,11 +2984,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97703 * work. This now handles partial unmappings.
97704 * Jeremy Fitzhardinge <jeremy@goop.org>
97705 */
97706+#ifdef CONFIG_PAX_SEGMEXEC
97707 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97708 {
97709+ int ret = __do_munmap(mm, start, len);
97710+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97711+ return ret;
97712+
97713+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97714+}
97715+
97716+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97717+#else
97718+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97719+#endif
97720+{
97721 unsigned long end;
97722 struct vm_area_struct *vma, *prev, *last;
97723
97724+ /*
97725+ * mm->mmap_sem is required to protect against another thread
97726+ * changing the mappings in case we sleep.
97727+ */
97728+ verify_mm_writelocked(mm);
97729+
97730 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97731 return -EINVAL;
97732
97733@@ -2613,6 +3085,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97734 /* Fix up all other VM information */
97735 remove_vma_list(mm, vma);
97736
97737+ track_exec_limit(mm, start, end, 0UL);
97738+
97739 return 0;
97740 }
97741
97742@@ -2621,6 +3095,13 @@ int vm_munmap(unsigned long start, size_t len)
97743 int ret;
97744 struct mm_struct *mm = current->mm;
97745
97746+
97747+#ifdef CONFIG_PAX_SEGMEXEC
97748+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97749+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97750+ return -EINVAL;
97751+#endif
97752+
97753 down_write(&mm->mmap_sem);
97754 ret = do_munmap(mm, start, len);
97755 up_write(&mm->mmap_sem);
97756@@ -2634,16 +3115,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97757 return vm_munmap(addr, len);
97758 }
97759
97760-static inline void verify_mm_writelocked(struct mm_struct *mm)
97761-{
97762-#ifdef CONFIG_DEBUG_VM
97763- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97764- WARN_ON(1);
97765- up_read(&mm->mmap_sem);
97766- }
97767-#endif
97768-}
97769-
97770 /*
97771 * this is really a simplified "do_mmap". it only handles
97772 * anonymous maps. eventually we may be able to do some
97773@@ -2657,6 +3128,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97774 struct rb_node **rb_link, *rb_parent;
97775 pgoff_t pgoff = addr >> PAGE_SHIFT;
97776 int error;
97777+ unsigned long charged;
97778
97779 len = PAGE_ALIGN(len);
97780 if (!len)
97781@@ -2664,10 +3136,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97782
97783 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97784
97785+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97786+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97787+ flags &= ~VM_EXEC;
97788+
97789+#ifdef CONFIG_PAX_MPROTECT
97790+ if (mm->pax_flags & MF_PAX_MPROTECT)
97791+ flags &= ~VM_MAYEXEC;
97792+#endif
97793+
97794+ }
97795+#endif
97796+
97797 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97798 if (error & ~PAGE_MASK)
97799 return error;
97800
97801+ charged = len >> PAGE_SHIFT;
97802+
97803 error = mlock_future_check(mm, mm->def_flags, len);
97804 if (error)
97805 return error;
97806@@ -2681,21 +3167,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97807 /*
97808 * Clear old maps. this also does some error checking for us
97809 */
97810- munmap_back:
97811 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97812 if (do_munmap(mm, addr, len))
97813 return -ENOMEM;
97814- goto munmap_back;
97815+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97816 }
97817
97818 /* Check against address space limits *after* clearing old maps... */
97819- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97820+ if (!may_expand_vm(mm, charged))
97821 return -ENOMEM;
97822
97823 if (mm->map_count > sysctl_max_map_count)
97824 return -ENOMEM;
97825
97826- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
97827+ if (security_vm_enough_memory_mm(mm, charged))
97828 return -ENOMEM;
97829
97830 /* Can we just expand an old private anonymous mapping? */
97831@@ -2709,7 +3194,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97832 */
97833 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97834 if (!vma) {
97835- vm_unacct_memory(len >> PAGE_SHIFT);
97836+ vm_unacct_memory(charged);
97837 return -ENOMEM;
97838 }
97839
97840@@ -2723,10 +3208,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97841 vma_link(mm, vma, prev, rb_link, rb_parent);
97842 out:
97843 perf_event_mmap(vma);
97844- mm->total_vm += len >> PAGE_SHIFT;
97845+ mm->total_vm += charged;
97846 if (flags & VM_LOCKED)
97847- mm->locked_vm += (len >> PAGE_SHIFT);
97848+ mm->locked_vm += charged;
97849 vma->vm_flags |= VM_SOFTDIRTY;
97850+ track_exec_limit(mm, addr, addr + len, flags);
97851 return addr;
97852 }
97853
97854@@ -2788,6 +3274,7 @@ void exit_mmap(struct mm_struct *mm)
97855 while (vma) {
97856 if (vma->vm_flags & VM_ACCOUNT)
97857 nr_accounted += vma_pages(vma);
97858+ vma->vm_mirror = NULL;
97859 vma = remove_vma(vma);
97860 }
97861 vm_unacct_memory(nr_accounted);
97862@@ -2805,6 +3292,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97863 struct vm_area_struct *prev;
97864 struct rb_node **rb_link, *rb_parent;
97865
97866+#ifdef CONFIG_PAX_SEGMEXEC
97867+ struct vm_area_struct *vma_m = NULL;
97868+#endif
97869+
97870+ if (security_mmap_addr(vma->vm_start))
97871+ return -EPERM;
97872+
97873 /*
97874 * The vm_pgoff of a purely anonymous vma should be irrelevant
97875 * until its first write fault, when page's anon_vma and index
97876@@ -2828,7 +3322,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97877 security_vm_enough_memory_mm(mm, vma_pages(vma)))
97878 return -ENOMEM;
97879
97880+#ifdef CONFIG_PAX_SEGMEXEC
97881+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
97882+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97883+ if (!vma_m)
97884+ return -ENOMEM;
97885+ }
97886+#endif
97887+
97888 vma_link(mm, vma, prev, rb_link, rb_parent);
97889+
97890+#ifdef CONFIG_PAX_SEGMEXEC
97891+ if (vma_m)
97892+ BUG_ON(pax_mirror_vma(vma_m, vma));
97893+#endif
97894+
97895 return 0;
97896 }
97897
97898@@ -2847,6 +3355,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97899 struct rb_node **rb_link, *rb_parent;
97900 bool faulted_in_anon_vma = true;
97901
97902+ BUG_ON(vma->vm_mirror);
97903+
97904 /*
97905 * If anonymous vma has not yet been faulted, update new pgoff
97906 * to match new location, to increase its chance of merging.
97907@@ -2911,6 +3421,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97908 return NULL;
97909 }
97910
97911+#ifdef CONFIG_PAX_SEGMEXEC
97912+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
97913+{
97914+ struct vm_area_struct *prev_m;
97915+ struct rb_node **rb_link_m, *rb_parent_m;
97916+ struct mempolicy *pol_m;
97917+
97918+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
97919+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
97920+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
97921+ *vma_m = *vma;
97922+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
97923+ if (anon_vma_clone(vma_m, vma))
97924+ return -ENOMEM;
97925+ pol_m = vma_policy(vma_m);
97926+ mpol_get(pol_m);
97927+ set_vma_policy(vma_m, pol_m);
97928+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
97929+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
97930+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
97931+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
97932+ if (vma_m->vm_file)
97933+ get_file(vma_m->vm_file);
97934+ if (vma_m->vm_ops && vma_m->vm_ops->open)
97935+ vma_m->vm_ops->open(vma_m);
97936+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
97937+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
97938+ vma_m->vm_mirror = vma;
97939+ vma->vm_mirror = vma_m;
97940+ return 0;
97941+}
97942+#endif
97943+
97944 /*
97945 * Return true if the calling process may expand its vm space by the passed
97946 * number of pages
97947@@ -2922,6 +3465,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
97948
97949 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
97950
97951+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
97952 if (cur + npages > lim)
97953 return 0;
97954 return 1;
97955@@ -3004,6 +3548,22 @@ static struct vm_area_struct *__install_special_mapping(
97956 vma->vm_start = addr;
97957 vma->vm_end = addr + len;
97958
97959+#ifdef CONFIG_PAX_MPROTECT
97960+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97961+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97962+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
97963+ return ERR_PTR(-EPERM);
97964+ if (!(vm_flags & VM_EXEC))
97965+ vm_flags &= ~VM_MAYEXEC;
97966+#else
97967+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97968+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97969+#endif
97970+ else
97971+ vm_flags &= ~VM_MAYWRITE;
97972+ }
97973+#endif
97974+
97975 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
97976 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97977
97978diff --git a/mm/mprotect.c b/mm/mprotect.c
97979index ace9345..63320dc 100644
97980--- a/mm/mprotect.c
97981+++ b/mm/mprotect.c
97982@@ -24,10 +24,18 @@
97983 #include <linux/migrate.h>
97984 #include <linux/perf_event.h>
97985 #include <linux/ksm.h>
97986+#include <linux/sched/sysctl.h>
97987+
97988+#ifdef CONFIG_PAX_MPROTECT
97989+#include <linux/elf.h>
97990+#include <linux/binfmts.h>
97991+#endif
97992+
97993 #include <asm/uaccess.h>
97994 #include <asm/pgtable.h>
97995 #include <asm/cacheflush.h>
97996 #include <asm/tlbflush.h>
97997+#include <asm/mmu_context.h>
97998
97999 /*
98000 * For a prot_numa update we only hold mmap_sem for read so there is a
98001@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98002 return pages;
98003 }
98004
98005+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98006+/* called while holding the mmap semaphor for writing except stack expansion */
98007+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98008+{
98009+ unsigned long oldlimit, newlimit = 0UL;
98010+
98011+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98012+ return;
98013+
98014+ spin_lock(&mm->page_table_lock);
98015+ oldlimit = mm->context.user_cs_limit;
98016+ if ((prot & VM_EXEC) && oldlimit < end)
98017+ /* USER_CS limit moved up */
98018+ newlimit = end;
98019+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98020+ /* USER_CS limit moved down */
98021+ newlimit = start;
98022+
98023+ if (newlimit) {
98024+ mm->context.user_cs_limit = newlimit;
98025+
98026+#ifdef CONFIG_SMP
98027+ wmb();
98028+ cpus_clear(mm->context.cpu_user_cs_mask);
98029+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98030+#endif
98031+
98032+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98033+ }
98034+ spin_unlock(&mm->page_table_lock);
98035+ if (newlimit == end) {
98036+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98037+
98038+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98039+ if (is_vm_hugetlb_page(vma))
98040+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98041+ else
98042+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98043+ }
98044+}
98045+#endif
98046+
98047 int
98048 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98049 unsigned long start, unsigned long end, unsigned long newflags)
98050@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98051 int error;
98052 int dirty_accountable = 0;
98053
98054+#ifdef CONFIG_PAX_SEGMEXEC
98055+ struct vm_area_struct *vma_m = NULL;
98056+ unsigned long start_m, end_m;
98057+
98058+ start_m = start + SEGMEXEC_TASK_SIZE;
98059+ end_m = end + SEGMEXEC_TASK_SIZE;
98060+#endif
98061+
98062 if (newflags == oldflags) {
98063 *pprev = vma;
98064 return 0;
98065 }
98066
98067+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98068+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98069+
98070+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98071+ return -ENOMEM;
98072+
98073+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98074+ return -ENOMEM;
98075+ }
98076+
98077 /*
98078 * If we make a private mapping writable we increase our commit;
98079 * but (without finer accounting) cannot reduce our commit if we
98080@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98081 }
98082 }
98083
98084+#ifdef CONFIG_PAX_SEGMEXEC
98085+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98086+ if (start != vma->vm_start) {
98087+ error = split_vma(mm, vma, start, 1);
98088+ if (error)
98089+ goto fail;
98090+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98091+ *pprev = (*pprev)->vm_next;
98092+ }
98093+
98094+ if (end != vma->vm_end) {
98095+ error = split_vma(mm, vma, end, 0);
98096+ if (error)
98097+ goto fail;
98098+ }
98099+
98100+ if (pax_find_mirror_vma(vma)) {
98101+ error = __do_munmap(mm, start_m, end_m - start_m);
98102+ if (error)
98103+ goto fail;
98104+ } else {
98105+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98106+ if (!vma_m) {
98107+ error = -ENOMEM;
98108+ goto fail;
98109+ }
98110+ vma->vm_flags = newflags;
98111+ error = pax_mirror_vma(vma_m, vma);
98112+ if (error) {
98113+ vma->vm_flags = oldflags;
98114+ goto fail;
98115+ }
98116+ }
98117+ }
98118+#endif
98119+
98120 /*
98121 * First try to merge with previous and/or next vma.
98122 */
98123@@ -314,7 +418,19 @@ success:
98124 * vm_flags and vm_page_prot are protected by the mmap_sem
98125 * held in write mode.
98126 */
98127+
98128+#ifdef CONFIG_PAX_SEGMEXEC
98129+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98130+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98131+#endif
98132+
98133 vma->vm_flags = newflags;
98134+
98135+#ifdef CONFIG_PAX_MPROTECT
98136+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98137+ mm->binfmt->handle_mprotect(vma, newflags);
98138+#endif
98139+
98140 dirty_accountable = vma_wants_writenotify(vma);
98141 vma_set_page_prot(vma);
98142
98143@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98144 end = start + len;
98145 if (end <= start)
98146 return -ENOMEM;
98147+
98148+#ifdef CONFIG_PAX_SEGMEXEC
98149+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98150+ if (end > SEGMEXEC_TASK_SIZE)
98151+ return -EINVAL;
98152+ } else
98153+#endif
98154+
98155+ if (end > TASK_SIZE)
98156+ return -EINVAL;
98157+
98158 if (!arch_validate_prot(prot))
98159 return -EINVAL;
98160
98161@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98162 /*
98163 * Does the application expect PROT_READ to imply PROT_EXEC:
98164 */
98165- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98166+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98167 prot |= PROT_EXEC;
98168
98169 vm_flags = calc_vm_prot_bits(prot);
98170@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98171 if (start > vma->vm_start)
98172 prev = vma;
98173
98174+#ifdef CONFIG_PAX_MPROTECT
98175+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98176+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98177+#endif
98178+
98179 for (nstart = start ; ; ) {
98180 unsigned long newflags;
98181
98182@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98183
98184 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98185 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98186+ if (prot & (PROT_WRITE | PROT_EXEC))
98187+ gr_log_rwxmprotect(vma);
98188+
98189+ error = -EACCES;
98190+ goto out;
98191+ }
98192+
98193+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98194 error = -EACCES;
98195 goto out;
98196 }
98197@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98198 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98199 if (error)
98200 goto out;
98201+
98202+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98203+
98204 nstart = tmp;
98205
98206 if (nstart < prev->vm_end)
98207diff --git a/mm/mremap.c b/mm/mremap.c
98208index 17fa018..6f7892b 100644
98209--- a/mm/mremap.c
98210+++ b/mm/mremap.c
98211@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98212 continue;
98213 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98214 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98215+
98216+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98217+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98218+ pte = pte_exprotect(pte);
98219+#endif
98220+
98221 pte = move_soft_dirty_pte(pte);
98222 set_pte_at(mm, new_addr, new_pte, pte);
98223 }
98224@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98225 if (is_vm_hugetlb_page(vma))
98226 goto Einval;
98227
98228+#ifdef CONFIG_PAX_SEGMEXEC
98229+ if (pax_find_mirror_vma(vma))
98230+ goto Einval;
98231+#endif
98232+
98233 /* We can't remap across vm area boundaries */
98234 if (old_len > vma->vm_end - addr)
98235 goto Efault;
98236@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98237 unsigned long ret = -EINVAL;
98238 unsigned long charged = 0;
98239 unsigned long map_flags;
98240+ unsigned long pax_task_size = TASK_SIZE;
98241
98242 if (new_addr & ~PAGE_MASK)
98243 goto out;
98244
98245- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98246+#ifdef CONFIG_PAX_SEGMEXEC
98247+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98248+ pax_task_size = SEGMEXEC_TASK_SIZE;
98249+#endif
98250+
98251+ pax_task_size -= PAGE_SIZE;
98252+
98253+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98254 goto out;
98255
98256 /* Check if the location we're moving into overlaps the
98257 * old location at all, and fail if it does.
98258 */
98259- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98260- goto out;
98261-
98262- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98263+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98264 goto out;
98265
98266 ret = do_munmap(mm, new_addr, new_len);
98267@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98268 unsigned long ret = -EINVAL;
98269 unsigned long charged = 0;
98270 bool locked = false;
98271+ unsigned long pax_task_size = TASK_SIZE;
98272
98273 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98274 return ret;
98275@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98276 if (!new_len)
98277 return ret;
98278
98279+#ifdef CONFIG_PAX_SEGMEXEC
98280+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98281+ pax_task_size = SEGMEXEC_TASK_SIZE;
98282+#endif
98283+
98284+ pax_task_size -= PAGE_SIZE;
98285+
98286+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98287+ old_len > pax_task_size || addr > pax_task_size-old_len)
98288+ return ret;
98289+
98290 down_write(&current->mm->mmap_sem);
98291
98292 if (flags & MREMAP_FIXED) {
98293@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98294 new_addr = addr;
98295 }
98296 ret = addr;
98297+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98298 goto out;
98299 }
98300 }
98301@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98302 goto out;
98303 }
98304
98305+ map_flags = vma->vm_flags;
98306 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98307+ if (!(ret & ~PAGE_MASK)) {
98308+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98309+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98310+ }
98311 }
98312 out:
98313 if (ret & ~PAGE_MASK)
98314diff --git a/mm/nommu.c b/mm/nommu.c
98315index 28bd8c4..98a6fe3 100644
98316--- a/mm/nommu.c
98317+++ b/mm/nommu.c
98318@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98319 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98320 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98321 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98322-int heap_stack_gap = 0;
98323
98324 atomic_long_t mmap_pages_allocated;
98325
98326@@ -858,15 +857,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98327 EXPORT_SYMBOL(find_vma);
98328
98329 /*
98330- * find a VMA
98331- * - we don't extend stack VMAs under NOMMU conditions
98332- */
98333-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98334-{
98335- return find_vma(mm, addr);
98336-}
98337-
98338-/*
98339 * expand a stack to a given address
98340 * - not supported under NOMMU conditions
98341 */
98342@@ -1562,6 +1552,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98343
98344 /* most fields are the same, copy all, and then fixup */
98345 *new = *vma;
98346+ INIT_LIST_HEAD(&new->anon_vma_chain);
98347 *region = *vma->vm_region;
98348 new->vm_region = region;
98349
98350@@ -1895,7 +1886,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
98351 */
98352 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
98353 {
98354- unsigned long free, allowed, reserve;
98355+ long free, allowed, reserve;
98356
98357 vm_acct_memory(pages);
98358
98359@@ -1959,7 +1950,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
98360 */
98361 if (mm) {
98362 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
98363- allowed -= min(mm->total_vm / 32, reserve);
98364+ allowed -= min_t(long, mm->total_vm / 32, reserve);
98365 }
98366
98367 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
98368@@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98369 }
98370 EXPORT_SYMBOL(generic_file_remap_pages);
98371
98372-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98373- unsigned long addr, void *buf, int len, int write)
98374+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98375+ unsigned long addr, void *buf, size_t len, int write)
98376 {
98377 struct vm_area_struct *vma;
98378
98379@@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98380 *
98381 * The caller must hold a reference on @mm.
98382 */
98383-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98384- void *buf, int len, int write)
98385+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98386+ void *buf, size_t len, int write)
98387 {
98388 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98389 }
98390@@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98391 * Access another process' address space.
98392 * - source/target buffer must be kernel space
98393 */
98394-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98395+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98396 {
98397 struct mm_struct *mm;
98398
98399diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98400index 6f43352..e44bf41 100644
98401--- a/mm/page-writeback.c
98402+++ b/mm/page-writeback.c
98403@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98404 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98405 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98406 */
98407-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98408+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98409 unsigned long thresh,
98410 unsigned long bg_thresh,
98411 unsigned long dirty,
98412diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98413index 8e20f9c..e235009 100644
98414--- a/mm/page_alloc.c
98415+++ b/mm/page_alloc.c
98416@@ -60,6 +60,7 @@
98417 #include <linux/hugetlb.h>
98418 #include <linux/sched/rt.h>
98419 #include <linux/page_owner.h>
98420+#include <linux/random.h>
98421
98422 #include <asm/sections.h>
98423 #include <asm/tlbflush.h>
98424@@ -358,7 +359,7 @@ out:
98425 * This usage means that zero-order pages may not be compound.
98426 */
98427
98428-static void free_compound_page(struct page *page)
98429+void free_compound_page(struct page *page)
98430 {
98431 __free_pages_ok(page, compound_order(page));
98432 }
98433@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
98434 __mod_zone_freepage_state(zone, (1 << order), migratetype);
98435 }
98436 #else
98437-struct page_ext_operations debug_guardpage_ops = { NULL, };
98438+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
98439 static inline void set_page_guard(struct zone *zone, struct page *page,
98440 unsigned int order, int migratetype) {}
98441 static inline void clear_page_guard(struct zone *zone, struct page *page,
98442@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98443 int i;
98444 int bad = 0;
98445
98446+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98447+ unsigned long index = 1UL << order;
98448+#endif
98449+
98450 VM_BUG_ON_PAGE(PageTail(page), page);
98451 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
98452
98453@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98454 debug_check_no_obj_freed(page_address(page),
98455 PAGE_SIZE << order);
98456 }
98457+
98458+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98459+ for (; index; --index)
98460+ sanitize_highpage(page + index - 1);
98461+#endif
98462+
98463 arch_free_page(page, order);
98464 kernel_map_pages(page, 1 << order, 0);
98465
98466@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98467 local_irq_restore(flags);
98468 }
98469
98470+#ifdef CONFIG_PAX_LATENT_ENTROPY
98471+bool __meminitdata extra_latent_entropy;
98472+
98473+static int __init setup_pax_extra_latent_entropy(char *str)
98474+{
98475+ extra_latent_entropy = true;
98476+ return 0;
98477+}
98478+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98479+
98480+volatile u64 latent_entropy __latent_entropy;
98481+EXPORT_SYMBOL(latent_entropy);
98482+#endif
98483+
98484 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98485 {
98486 unsigned int nr_pages = 1 << order;
98487@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98488 __ClearPageReserved(p);
98489 set_page_count(p, 0);
98490
98491+#ifdef CONFIG_PAX_LATENT_ENTROPY
98492+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98493+ u64 hash = 0;
98494+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98495+ const u64 *data = lowmem_page_address(page);
98496+
98497+ for (index = 0; index < end; index++)
98498+ hash ^= hash + data[index];
98499+ latent_entropy ^= hash;
98500+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98501+ }
98502+#endif
98503+
98504 page_zone(page)->managed_pages += nr_pages;
98505 set_page_refcounted(page);
98506 __free_pages(page, order);
98507@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98508 arch_alloc_page(page, order);
98509 kernel_map_pages(page, 1 << order, 1);
98510
98511+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98512 if (gfp_flags & __GFP_ZERO)
98513 prep_zero_page(page, order, gfp_flags);
98514+#endif
98515
98516 if (order && (gfp_flags & __GFP_COMP))
98517 prep_compound_page(page, order);
98518@@ -1702,7 +1742,7 @@ again:
98519 }
98520
98521 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98522- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98523+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98524 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
98525 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98526
98527@@ -2023,7 +2063,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98528 do {
98529 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98530 high_wmark_pages(zone) - low_wmark_pages(zone) -
98531- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98532+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98533 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98534 } while (zone++ != preferred_zone);
98535 }
98536@@ -2382,8 +2422,15 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
98537 if (high_zoneidx < ZONE_NORMAL)
98538 goto out;
98539 /* The OOM killer does not compensate for light reclaim */
98540- if (!(gfp_mask & __GFP_FS))
98541+ if (!(gfp_mask & __GFP_FS)) {
98542+ /*
98543+ * XXX: Page reclaim didn't yield anything,
98544+ * and the OOM killer can't be invoked, but
98545+ * keep looping as per should_alloc_retry().
98546+ */
98547+ *did_some_progress = 1;
98548 goto out;
98549+ }
98550 /*
98551 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
98552 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
98553@@ -5776,7 +5823,7 @@ static void __setup_per_zone_wmarks(void)
98554
98555 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98556 high_wmark_pages(zone) - low_wmark_pages(zone) -
98557- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98558+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98559
98560 setup_zone_migrate_reserve(zone);
98561 spin_unlock_irqrestore(&zone->lock, flags);
98562diff --git a/mm/percpu.c b/mm/percpu.c
98563index d39e2f4..de5f4b4 100644
98564--- a/mm/percpu.c
98565+++ b/mm/percpu.c
98566@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98567 static unsigned int pcpu_high_unit_cpu __read_mostly;
98568
98569 /* the address of the first chunk which starts with the kernel static area */
98570-void *pcpu_base_addr __read_mostly;
98571+void *pcpu_base_addr __read_only;
98572 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98573
98574 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98575diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98576index 5077afc..846c9ef 100644
98577--- a/mm/process_vm_access.c
98578+++ b/mm/process_vm_access.c
98579@@ -13,6 +13,7 @@
98580 #include <linux/uio.h>
98581 #include <linux/sched.h>
98582 #include <linux/highmem.h>
98583+#include <linux/security.h>
98584 #include <linux/ptrace.h>
98585 #include <linux/slab.h>
98586 #include <linux/syscalls.h>
98587@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98588 ssize_t iov_len;
98589 size_t total_len = iov_iter_count(iter);
98590
98591+ return -ENOSYS; // PaX: until properly audited
98592+
98593 /*
98594 * Work out how many pages of struct pages we're going to need
98595 * when eventually calling get_user_pages
98596 */
98597 for (i = 0; i < riovcnt; i++) {
98598 iov_len = rvec[i].iov_len;
98599- if (iov_len > 0) {
98600- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98601- + iov_len)
98602- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98603- / PAGE_SIZE + 1;
98604- nr_pages = max(nr_pages, nr_pages_iov);
98605- }
98606+ if (iov_len <= 0)
98607+ continue;
98608+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98609+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98610+ nr_pages = max(nr_pages, nr_pages_iov);
98611 }
98612
98613 if (nr_pages == 0)
98614@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98615 goto free_proc_pages;
98616 }
98617
98618+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98619+ rc = -EPERM;
98620+ goto put_task_struct;
98621+ }
98622+
98623 mm = mm_access(task, PTRACE_MODE_ATTACH);
98624 if (!mm || IS_ERR(mm)) {
98625 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98626diff --git a/mm/rmap.c b/mm/rmap.c
98627index 71cd5bd..e259089 100644
98628--- a/mm/rmap.c
98629+++ b/mm/rmap.c
98630@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98631 struct anon_vma *anon_vma = vma->anon_vma;
98632 struct anon_vma_chain *avc;
98633
98634+#ifdef CONFIG_PAX_SEGMEXEC
98635+ struct anon_vma_chain *avc_m = NULL;
98636+#endif
98637+
98638 might_sleep();
98639 if (unlikely(!anon_vma)) {
98640 struct mm_struct *mm = vma->vm_mm;
98641@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98642 if (!avc)
98643 goto out_enomem;
98644
98645+#ifdef CONFIG_PAX_SEGMEXEC
98646+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98647+ if (!avc_m)
98648+ goto out_enomem_free_avc;
98649+#endif
98650+
98651 anon_vma = find_mergeable_anon_vma(vma);
98652 allocated = NULL;
98653 if (!anon_vma) {
98654@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98655 /* page_table_lock to protect against threads */
98656 spin_lock(&mm->page_table_lock);
98657 if (likely(!vma->anon_vma)) {
98658+
98659+#ifdef CONFIG_PAX_SEGMEXEC
98660+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98661+
98662+ if (vma_m) {
98663+ BUG_ON(vma_m->anon_vma);
98664+ vma_m->anon_vma = anon_vma;
98665+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98666+ anon_vma->degree++;
98667+ avc_m = NULL;
98668+ }
98669+#endif
98670+
98671 vma->anon_vma = anon_vma;
98672 anon_vma_chain_link(vma, avc, anon_vma);
98673 /* vma reference or self-parent link for new root */
98674@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98675
98676 if (unlikely(allocated))
98677 put_anon_vma(allocated);
98678+
98679+#ifdef CONFIG_PAX_SEGMEXEC
98680+ if (unlikely(avc_m))
98681+ anon_vma_chain_free(avc_m);
98682+#endif
98683+
98684 if (unlikely(avc))
98685 anon_vma_chain_free(avc);
98686 }
98687 return 0;
98688
98689 out_enomem_free_avc:
98690+
98691+#ifdef CONFIG_PAX_SEGMEXEC
98692+ if (avc_m)
98693+ anon_vma_chain_free(avc_m);
98694+#endif
98695+
98696 anon_vma_chain_free(avc);
98697 out_enomem:
98698 return -ENOMEM;
98699@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98700 * good chance of avoiding scanning the whole hierarchy when it searches where
98701 * page is mapped.
98702 */
98703-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98704+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98705 {
98706 struct anon_vma_chain *avc, *pavc;
98707 struct anon_vma *root = NULL;
98708@@ -296,7 +331,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98709 * the corresponding VMA in the parent process is attached to.
98710 * Returns 0 on success, non-zero on failure.
98711 */
98712-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98713+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98714 {
98715 struct anon_vma_chain *avc;
98716 struct anon_vma *anon_vma;
98717@@ -416,8 +451,10 @@ static void anon_vma_ctor(void *data)
98718 void __init anon_vma_init(void)
98719 {
98720 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98721- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98722- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98723+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98724+ anon_vma_ctor);
98725+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98726+ SLAB_PANIC|SLAB_NO_SANITIZE);
98727 }
98728
98729 /*
98730diff --git a/mm/shmem.c b/mm/shmem.c
98731index 993e6ba..a962ba3 100644
98732--- a/mm/shmem.c
98733+++ b/mm/shmem.c
98734@@ -33,7 +33,7 @@
98735 #include <linux/swap.h>
98736 #include <linux/aio.h>
98737
98738-static struct vfsmount *shm_mnt;
98739+struct vfsmount *shm_mnt;
98740
98741 #ifdef CONFIG_SHMEM
98742 /*
98743@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98744 #define BOGO_DIRENT_SIZE 20
98745
98746 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98747-#define SHORT_SYMLINK_LEN 128
98748+#define SHORT_SYMLINK_LEN 64
98749
98750 /*
98751 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98752@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98753 static int shmem_xattr_validate(const char *name)
98754 {
98755 struct { const char *prefix; size_t len; } arr[] = {
98756+
98757+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98758+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98759+#endif
98760+
98761 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98762 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98763 };
98764@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98765 if (err)
98766 return err;
98767
98768+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98769+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98770+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98771+ return -EOPNOTSUPP;
98772+ if (size > 8)
98773+ return -EINVAL;
98774+ }
98775+#endif
98776+
98777 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98778 }
98779
98780@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98781 int err = -ENOMEM;
98782
98783 /* Round up to L1_CACHE_BYTES to resist false sharing */
98784- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98785- L1_CACHE_BYTES), GFP_KERNEL);
98786+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98787 if (!sbinfo)
98788 return -ENOMEM;
98789
98790diff --git a/mm/slab.c b/mm/slab.c
98791index 65b5dcb..d53d866 100644
98792--- a/mm/slab.c
98793+++ b/mm/slab.c
98794@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98795 if ((x)->max_freeable < i) \
98796 (x)->max_freeable = i; \
98797 } while (0)
98798-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98799-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98800-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98801-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98802+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98803+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98804+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98805+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98806+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
98807+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
98808 #else
98809 #define STATS_INC_ACTIVE(x) do { } while (0)
98810 #define STATS_DEC_ACTIVE(x) do { } while (0)
98811@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98812 #define STATS_INC_ALLOCMISS(x) do { } while (0)
98813 #define STATS_INC_FREEHIT(x) do { } while (0)
98814 #define STATS_INC_FREEMISS(x) do { } while (0)
98815+#define STATS_INC_SANITIZED(x) do { } while (0)
98816+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
98817 #endif
98818
98819 #if DEBUG
98820@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
98821 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98822 */
98823 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98824- const struct page *page, void *obj)
98825+ const struct page *page, const void *obj)
98826 {
98827 u32 offset = (obj - page->s_mem);
98828 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98829@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
98830 * structures first. Without this, further allocations will bug.
98831 */
98832 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
98833- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
98834+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98835 slab_state = PARTIAL_NODE;
98836
98837 slab_early_init = 0;
98838@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
98839
98840 cachep = find_mergeable(size, align, flags, name, ctor);
98841 if (cachep) {
98842- cachep->refcount++;
98843+ atomic_inc(&cachep->refcount);
98844
98845 /*
98846 * Adjust the object sizes so that we clear
98847@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
98848 struct array_cache *ac = cpu_cache_get(cachep);
98849
98850 check_irq_off();
98851+
98852+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98853+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
98854+ STATS_INC_NOT_SANITIZED(cachep);
98855+ else {
98856+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
98857+
98858+ if (cachep->ctor)
98859+ cachep->ctor(objp);
98860+
98861+ STATS_INC_SANITIZED(cachep);
98862+ }
98863+#endif
98864+
98865 kmemleak_free_recursive(objp, cachep->flags);
98866 objp = cache_free_debugcheck(cachep, objp, caller);
98867
98868@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
98869 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
98870 }
98871
98872-void *__kmalloc_node(size_t size, gfp_t flags, int node)
98873+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
98874 {
98875 return __do_kmalloc_node(size, flags, node, _RET_IP_);
98876 }
98877@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
98878 * @flags: the type of memory to allocate (see kmalloc).
98879 * @caller: function caller for debug tracking of the caller
98880 */
98881-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
98882+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
98883 unsigned long caller)
98884 {
98885 struct kmem_cache *cachep;
98886@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
98887
98888 if (unlikely(ZERO_OR_NULL_PTR(objp)))
98889 return;
98890+ VM_BUG_ON(!virt_addr_valid(objp));
98891 local_irq_save(flags);
98892 kfree_debugcheck(objp);
98893 c = virt_to_cache(objp);
98894@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
98895 }
98896 /* cpu stats */
98897 {
98898- unsigned long allochit = atomic_read(&cachep->allochit);
98899- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98900- unsigned long freehit = atomic_read(&cachep->freehit);
98901- unsigned long freemiss = atomic_read(&cachep->freemiss);
98902+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98903+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98904+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98905+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98906
98907 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98908 allochit, allocmiss, freehit, freemiss);
98909 }
98910+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98911+ {
98912+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
98913+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
98914+
98915+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
98916+ }
98917+#endif
98918 #endif
98919 }
98920
98921@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
98922 static int __init slab_proc_init(void)
98923 {
98924 #ifdef CONFIG_DEBUG_SLAB_LEAK
98925- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98926+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
98927 #endif
98928 return 0;
98929 }
98930 module_init(slab_proc_init);
98931 #endif
98932
98933+bool is_usercopy_object(const void *ptr)
98934+{
98935+ struct page *page;
98936+ struct kmem_cache *cachep;
98937+
98938+ if (ZERO_OR_NULL_PTR(ptr))
98939+ return false;
98940+
98941+ if (!slab_is_available())
98942+ return false;
98943+
98944+ if (!virt_addr_valid(ptr))
98945+ return false;
98946+
98947+ page = virt_to_head_page(ptr);
98948+
98949+ if (!PageSlab(page))
98950+ return false;
98951+
98952+ cachep = page->slab_cache;
98953+ return cachep->flags & SLAB_USERCOPY;
98954+}
98955+
98956+#ifdef CONFIG_PAX_USERCOPY
98957+const char *check_heap_object(const void *ptr, unsigned long n)
98958+{
98959+ struct page *page;
98960+ struct kmem_cache *cachep;
98961+ unsigned int objnr;
98962+ unsigned long offset;
98963+
98964+ if (ZERO_OR_NULL_PTR(ptr))
98965+ return "<null>";
98966+
98967+ if (!virt_addr_valid(ptr))
98968+ return NULL;
98969+
98970+ page = virt_to_head_page(ptr);
98971+
98972+ if (!PageSlab(page))
98973+ return NULL;
98974+
98975+ cachep = page->slab_cache;
98976+ if (!(cachep->flags & SLAB_USERCOPY))
98977+ return cachep->name;
98978+
98979+ objnr = obj_to_index(cachep, page, ptr);
98980+ BUG_ON(objnr >= cachep->num);
98981+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
98982+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
98983+ return NULL;
98984+
98985+ return cachep->name;
98986+}
98987+#endif
98988+
98989 /**
98990 * ksize - get the actual amount of memory allocated for a given object
98991 * @objp: Pointer to the object
98992diff --git a/mm/slab.h b/mm/slab.h
98993index 1cf40054..10ad563 100644
98994--- a/mm/slab.h
98995+++ b/mm/slab.h
98996@@ -22,7 +22,7 @@ struct kmem_cache {
98997 unsigned int align; /* Alignment as calculated */
98998 unsigned long flags; /* Active flags on the slab */
98999 const char *name; /* Slab name for sysfs */
99000- int refcount; /* Use counter */
99001+ atomic_t refcount; /* Use counter */
99002 void (*ctor)(void *); /* Called on object slot creation */
99003 struct list_head list; /* List of all slab caches on the system */
99004 };
99005@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
99006 /* The slab cache that manages slab cache information */
99007 extern struct kmem_cache *kmem_cache;
99008
99009+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99010+#ifdef CONFIG_X86_64
99011+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99012+#else
99013+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99014+#endif
99015+enum pax_sanitize_mode {
99016+ PAX_SANITIZE_SLAB_OFF = 0,
99017+ PAX_SANITIZE_SLAB_FAST,
99018+ PAX_SANITIZE_SLAB_FULL,
99019+};
99020+extern enum pax_sanitize_mode pax_sanitize_slab;
99021+#endif
99022+
99023 unsigned long calculate_alignment(unsigned long flags,
99024 unsigned long align, unsigned long size);
99025
99026@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
99027
99028 /* Legal flag mask for kmem_cache_create(), for various configurations */
99029 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99030- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99031+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99032+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99033
99034 #if defined(CONFIG_DEBUG_SLAB)
99035 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99036@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99037 return s;
99038
99039 page = virt_to_head_page(x);
99040+
99041+ BUG_ON(!PageSlab(page));
99042+
99043 cachep = page->slab_cache;
99044 if (slab_equal_or_root(cachep, s))
99045 return cachep;
99046diff --git a/mm/slab_common.c b/mm/slab_common.c
99047index e03dd6f..c475838 100644
99048--- a/mm/slab_common.c
99049+++ b/mm/slab_common.c
99050@@ -25,11 +25,35 @@
99051
99052 #include "slab.h"
99053
99054-enum slab_state slab_state;
99055+enum slab_state slab_state __read_only;
99056 LIST_HEAD(slab_caches);
99057 DEFINE_MUTEX(slab_mutex);
99058 struct kmem_cache *kmem_cache;
99059
99060+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99061+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99062+static int __init pax_sanitize_slab_setup(char *str)
99063+{
99064+ if (!str)
99065+ return 0;
99066+
99067+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99068+ pr_info("PaX slab sanitization: %s\n", "disabled");
99069+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99070+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99071+ pr_info("PaX slab sanitization: %s\n", "fast");
99072+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99073+ } else if (!strcmp(str, "full")) {
99074+ pr_info("PaX slab sanitization: %s\n", "full");
99075+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99076+ } else
99077+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99078+
99079+ return 0;
99080+}
99081+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99082+#endif
99083+
99084 /*
99085 * Set of flags that will prevent slab merging
99086 */
99087@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99088 * Merge control. If this is set then no merging of slab caches will occur.
99089 * (Could be removed. This was introduced to pacify the merge skeptics.)
99090 */
99091-static int slab_nomerge;
99092+static int slab_nomerge = 1;
99093
99094 static int __init setup_slab_nomerge(char *str)
99095 {
99096@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
99097 /*
99098 * We may have set a slab to be unmergeable during bootstrap.
99099 */
99100- if (s->refcount < 0)
99101+ if (atomic_read(&s->refcount) < 0)
99102 return 1;
99103
99104 return 0;
99105@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99106 if (err)
99107 goto out_free_cache;
99108
99109- s->refcount = 1;
99110+ atomic_set(&s->refcount, 1);
99111 list_add(&s->list, &slab_caches);
99112 out:
99113 if (err)
99114@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99115 */
99116 flags &= CACHE_CREATE_MASK;
99117
99118+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99119+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99120+ flags |= SLAB_NO_SANITIZE;
99121+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99122+ flags &= ~SLAB_NO_SANITIZE;
99123+#endif
99124+
99125 s = __kmem_cache_alias(name, size, align, flags, ctor);
99126 if (s)
99127 goto out_unlock;
99128@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99129
99130 mutex_lock(&slab_mutex);
99131
99132- s->refcount--;
99133- if (s->refcount)
99134+ if (!atomic_dec_and_test(&s->refcount))
99135 goto out_unlock;
99136
99137 if (memcg_cleanup_cache_params(s) != 0)
99138@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99139 rcu_barrier();
99140
99141 memcg_free_cache_params(s);
99142-#ifdef SLAB_SUPPORTS_SYSFS
99143+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99144 sysfs_slab_remove(s);
99145 #else
99146 slab_kmem_cache_release(s);
99147@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99148 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99149 name, size, err);
99150
99151- s->refcount = -1; /* Exempt from merging for now */
99152+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99153 }
99154
99155 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99156@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99157
99158 create_boot_cache(s, name, size, flags);
99159 list_add(&s->list, &slab_caches);
99160- s->refcount = 1;
99161+ atomic_set(&s->refcount, 1);
99162 return s;
99163 }
99164
99165@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99166 EXPORT_SYMBOL(kmalloc_dma_caches);
99167 #endif
99168
99169+#ifdef CONFIG_PAX_USERCOPY_SLABS
99170+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99171+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99172+#endif
99173+
99174 /*
99175 * Conversion table for small slabs sizes / 8 to the index in the
99176 * kmalloc array. This is necessary for slabs < 192 since we have non power
99177@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99178 return kmalloc_dma_caches[index];
99179
99180 #endif
99181+
99182+#ifdef CONFIG_PAX_USERCOPY_SLABS
99183+ if (unlikely((flags & GFP_USERCOPY)))
99184+ return kmalloc_usercopy_caches[index];
99185+
99186+#endif
99187+
99188 return kmalloc_caches[index];
99189 }
99190
99191@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99192 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99193 if (!kmalloc_caches[i]) {
99194 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99195- 1 << i, flags);
99196+ 1 << i, SLAB_USERCOPY | flags);
99197 }
99198
99199 /*
99200@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99201 * earlier power of two caches
99202 */
99203 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99204- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99205+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99206
99207 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99208- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99209+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99210 }
99211
99212 /* Kmalloc array is now usable */
99213@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99214 }
99215 }
99216 #endif
99217+
99218+#ifdef CONFIG_PAX_USERCOPY_SLABS
99219+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99220+ struct kmem_cache *s = kmalloc_caches[i];
99221+
99222+ if (s) {
99223+ int size = kmalloc_size(i);
99224+ char *n = kasprintf(GFP_NOWAIT,
99225+ "usercopy-kmalloc-%d", size);
99226+
99227+ BUG_ON(!n);
99228+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99229+ size, SLAB_USERCOPY | flags);
99230+ }
99231+ }
99232+#endif
99233+
99234 }
99235 #endif /* !CONFIG_SLOB */
99236
99237@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
99238 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99239 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99240 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99241+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99242+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99243+#endif
99244 #endif
99245 seq_putc(m, '\n');
99246 }
99247@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
99248 module_init(slab_proc_init);
99249 #endif /* CONFIG_SLABINFO */
99250
99251-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
99252+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
99253 gfp_t flags)
99254 {
99255 void *ret;
99256diff --git a/mm/slob.c b/mm/slob.c
99257index 96a8620..46b3f12 100644
99258--- a/mm/slob.c
99259+++ b/mm/slob.c
99260@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99261 /*
99262 * Return the size of a slob block.
99263 */
99264-static slobidx_t slob_units(slob_t *s)
99265+static slobidx_t slob_units(const slob_t *s)
99266 {
99267 if (s->units > 0)
99268 return s->units;
99269@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99270 /*
99271 * Return the next free slob block pointer after this one.
99272 */
99273-static slob_t *slob_next(slob_t *s)
99274+static slob_t *slob_next(const slob_t *s)
99275 {
99276 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99277 slobidx_t next;
99278@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99279 /*
99280 * Returns true if s is the last free block in its page.
99281 */
99282-static int slob_last(slob_t *s)
99283+static int slob_last(const slob_t *s)
99284 {
99285 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99286 }
99287
99288-static void *slob_new_pages(gfp_t gfp, int order, int node)
99289+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99290 {
99291- void *page;
99292+ struct page *page;
99293
99294 #ifdef CONFIG_NUMA
99295 if (node != NUMA_NO_NODE)
99296@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99297 if (!page)
99298 return NULL;
99299
99300- return page_address(page);
99301+ __SetPageSlab(page);
99302+ return page;
99303 }
99304
99305-static void slob_free_pages(void *b, int order)
99306+static void slob_free_pages(struct page *sp, int order)
99307 {
99308 if (current->reclaim_state)
99309 current->reclaim_state->reclaimed_slab += 1 << order;
99310- free_pages((unsigned long)b, order);
99311+ __ClearPageSlab(sp);
99312+ page_mapcount_reset(sp);
99313+ sp->private = 0;
99314+ __free_pages(sp, order);
99315 }
99316
99317 /*
99318@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99319
99320 /* Not enough space: must allocate a new page */
99321 if (!b) {
99322- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99323- if (!b)
99324+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99325+ if (!sp)
99326 return NULL;
99327- sp = virt_to_page(b);
99328- __SetPageSlab(sp);
99329+ b = page_address(sp);
99330
99331 spin_lock_irqsave(&slob_lock, flags);
99332 sp->units = SLOB_UNITS(PAGE_SIZE);
99333 sp->freelist = b;
99334+ sp->private = 0;
99335 INIT_LIST_HEAD(&sp->lru);
99336 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99337 set_slob_page_free(sp, slob_list);
99338@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99339 /*
99340 * slob_free: entry point into the slob allocator.
99341 */
99342-static void slob_free(void *block, int size)
99343+static void slob_free(struct kmem_cache *c, void *block, int size)
99344 {
99345 struct page *sp;
99346 slob_t *prev, *next, *b = (slob_t *)block;
99347@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99348 if (slob_page_free(sp))
99349 clear_slob_page_free(sp);
99350 spin_unlock_irqrestore(&slob_lock, flags);
99351- __ClearPageSlab(sp);
99352- page_mapcount_reset(sp);
99353- slob_free_pages(b, 0);
99354+ slob_free_pages(sp, 0);
99355 return;
99356 }
99357
99358+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99359+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99360+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99361+#endif
99362+
99363 if (!slob_page_free(sp)) {
99364 /* This slob page is about to become partially free. Easy! */
99365 sp->units = units;
99366@@ -424,11 +431,10 @@ out:
99367 */
99368
99369 static __always_inline void *
99370-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99371+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99372 {
99373- unsigned int *m;
99374- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99375- void *ret;
99376+ slob_t *m;
99377+ void *ret = NULL;
99378
99379 gfp &= gfp_allowed_mask;
99380
99381@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99382
99383 if (!m)
99384 return NULL;
99385- *m = size;
99386+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99387+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99388+ m[0].units = size;
99389+ m[1].units = align;
99390 ret = (void *)m + align;
99391
99392 trace_kmalloc_node(caller, ret,
99393 size, size + align, gfp, node);
99394 } else {
99395 unsigned int order = get_order(size);
99396+ struct page *page;
99397
99398 if (likely(order))
99399 gfp |= __GFP_COMP;
99400- ret = slob_new_pages(gfp, order, node);
99401+ page = slob_new_pages(gfp, order, node);
99402+ if (page) {
99403+ ret = page_address(page);
99404+ page->private = size;
99405+ }
99406
99407 trace_kmalloc_node(caller, ret,
99408 size, PAGE_SIZE << order, gfp, node);
99409 }
99410
99411- kmemleak_alloc(ret, size, 1, gfp);
99412 return ret;
99413 }
99414
99415-void *__kmalloc(size_t size, gfp_t gfp)
99416+static __always_inline void *
99417+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99418+{
99419+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99420+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99421+
99422+ if (!ZERO_OR_NULL_PTR(ret))
99423+ kmemleak_alloc(ret, size, 1, gfp);
99424+ return ret;
99425+}
99426+
99427+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
99428 {
99429 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
99430 }
99431@@ -491,34 +515,112 @@ void kfree(const void *block)
99432 return;
99433 kmemleak_free(block);
99434
99435+ VM_BUG_ON(!virt_addr_valid(block));
99436 sp = virt_to_page(block);
99437- if (PageSlab(sp)) {
99438+ VM_BUG_ON(!PageSlab(sp));
99439+ if (!sp->private) {
99440 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99441- unsigned int *m = (unsigned int *)(block - align);
99442- slob_free(m, *m + align);
99443- } else
99444+ slob_t *m = (slob_t *)(block - align);
99445+ slob_free(NULL, m, m[0].units + align);
99446+ } else {
99447+ __ClearPageSlab(sp);
99448+ page_mapcount_reset(sp);
99449+ sp->private = 0;
99450 __free_pages(sp, compound_order(sp));
99451+ }
99452 }
99453 EXPORT_SYMBOL(kfree);
99454
99455+bool is_usercopy_object(const void *ptr)
99456+{
99457+ if (!slab_is_available())
99458+ return false;
99459+
99460+ // PAX: TODO
99461+
99462+ return false;
99463+}
99464+
99465+#ifdef CONFIG_PAX_USERCOPY
99466+const char *check_heap_object(const void *ptr, unsigned long n)
99467+{
99468+ struct page *page;
99469+ const slob_t *free;
99470+ const void *base;
99471+ unsigned long flags;
99472+
99473+ if (ZERO_OR_NULL_PTR(ptr))
99474+ return "<null>";
99475+
99476+ if (!virt_addr_valid(ptr))
99477+ return NULL;
99478+
99479+ page = virt_to_head_page(ptr);
99480+ if (!PageSlab(page))
99481+ return NULL;
99482+
99483+ if (page->private) {
99484+ base = page;
99485+ if (base <= ptr && n <= page->private - (ptr - base))
99486+ return NULL;
99487+ return "<slob>";
99488+ }
99489+
99490+ /* some tricky double walking to find the chunk */
99491+ spin_lock_irqsave(&slob_lock, flags);
99492+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99493+ free = page->freelist;
99494+
99495+ while (!slob_last(free) && (void *)free <= ptr) {
99496+ base = free + slob_units(free);
99497+ free = slob_next(free);
99498+ }
99499+
99500+ while (base < (void *)free) {
99501+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99502+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99503+ int offset;
99504+
99505+ if (ptr < base + align)
99506+ break;
99507+
99508+ offset = ptr - base - align;
99509+ if (offset >= m) {
99510+ base += size;
99511+ continue;
99512+ }
99513+
99514+ if (n > m - offset)
99515+ break;
99516+
99517+ spin_unlock_irqrestore(&slob_lock, flags);
99518+ return NULL;
99519+ }
99520+
99521+ spin_unlock_irqrestore(&slob_lock, flags);
99522+ return "<slob>";
99523+}
99524+#endif
99525+
99526 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99527 size_t ksize(const void *block)
99528 {
99529 struct page *sp;
99530 int align;
99531- unsigned int *m;
99532+ slob_t *m;
99533
99534 BUG_ON(!block);
99535 if (unlikely(block == ZERO_SIZE_PTR))
99536 return 0;
99537
99538 sp = virt_to_page(block);
99539- if (unlikely(!PageSlab(sp)))
99540- return PAGE_SIZE << compound_order(sp);
99541+ VM_BUG_ON(!PageSlab(sp));
99542+ if (sp->private)
99543+ return sp->private;
99544
99545 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99546- m = (unsigned int *)(block - align);
99547- return SLOB_UNITS(*m) * SLOB_UNIT;
99548+ m = (slob_t *)(block - align);
99549+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99550 }
99551 EXPORT_SYMBOL(ksize);
99552
99553@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99554
99555 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99556 {
99557- void *b;
99558+ void *b = NULL;
99559
99560 flags &= gfp_allowed_mask;
99561
99562 lockdep_trace_alloc(flags);
99563
99564+#ifdef CONFIG_PAX_USERCOPY_SLABS
99565+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99566+#else
99567 if (c->size < PAGE_SIZE) {
99568 b = slob_alloc(c->size, flags, c->align, node);
99569 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99570 SLOB_UNITS(c->size) * SLOB_UNIT,
99571 flags, node);
99572 } else {
99573- b = slob_new_pages(flags, get_order(c->size), node);
99574+ struct page *sp;
99575+
99576+ sp = slob_new_pages(flags, get_order(c->size), node);
99577+ if (sp) {
99578+ b = page_address(sp);
99579+ sp->private = c->size;
99580+ }
99581 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99582 PAGE_SIZE << get_order(c->size),
99583 flags, node);
99584 }
99585+#endif
99586
99587 if (b && c->ctor)
99588 c->ctor(b);
99589@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
99590 EXPORT_SYMBOL(kmem_cache_alloc);
99591
99592 #ifdef CONFIG_NUMA
99593-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99594+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
99595 {
99596 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
99597 }
99598@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99599 EXPORT_SYMBOL(kmem_cache_alloc_node);
99600 #endif
99601
99602-static void __kmem_cache_free(void *b, int size)
99603+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99604 {
99605- if (size < PAGE_SIZE)
99606- slob_free(b, size);
99607+ struct page *sp;
99608+
99609+ sp = virt_to_page(b);
99610+ BUG_ON(!PageSlab(sp));
99611+ if (!sp->private)
99612+ slob_free(c, b, size);
99613 else
99614- slob_free_pages(b, get_order(size));
99615+ slob_free_pages(sp, get_order(size));
99616 }
99617
99618 static void kmem_rcu_free(struct rcu_head *head)
99619@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99620 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99621 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99622
99623- __kmem_cache_free(b, slob_rcu->size);
99624+ __kmem_cache_free(NULL, b, slob_rcu->size);
99625 }
99626
99627 void kmem_cache_free(struct kmem_cache *c, void *b)
99628 {
99629+ int size = c->size;
99630+
99631+#ifdef CONFIG_PAX_USERCOPY_SLABS
99632+ if (size + c->align < PAGE_SIZE) {
99633+ size += c->align;
99634+ b -= c->align;
99635+ }
99636+#endif
99637+
99638 kmemleak_free_recursive(b, c->flags);
99639 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99640 struct slob_rcu *slob_rcu;
99641- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99642- slob_rcu->size = c->size;
99643+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99644+ slob_rcu->size = size;
99645 call_rcu(&slob_rcu->head, kmem_rcu_free);
99646 } else {
99647- __kmem_cache_free(b, c->size);
99648+ __kmem_cache_free(c, b, size);
99649 }
99650
99651+#ifdef CONFIG_PAX_USERCOPY_SLABS
99652+ trace_kfree(_RET_IP_, b);
99653+#else
99654 trace_kmem_cache_free(_RET_IP_, b);
99655+#endif
99656+
99657 }
99658 EXPORT_SYMBOL(kmem_cache_free);
99659
99660diff --git a/mm/slub.c b/mm/slub.c
99661index fe376fe..2f5757c 100644
99662--- a/mm/slub.c
99663+++ b/mm/slub.c
99664@@ -197,7 +197,7 @@ struct track {
99665
99666 enum track_item { TRACK_ALLOC, TRACK_FREE };
99667
99668-#ifdef CONFIG_SYSFS
99669+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99670 static int sysfs_slab_add(struct kmem_cache *);
99671 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99672 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99673@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
99674 if (!t->addr)
99675 return;
99676
99677- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99678+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99679 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99680 #ifdef CONFIG_STACKTRACE
99681 {
99682@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99683
99684 slab_free_hook(s, x);
99685
99686+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99687+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99688+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99689+ if (s->ctor)
99690+ s->ctor(x);
99691+ }
99692+#endif
99693+
99694 redo:
99695 /*
99696 * Determine the currently cpus per cpu slab.
99697@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99698 s->inuse = size;
99699
99700 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99701+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99702+ (!(flags & SLAB_NO_SANITIZE)) ||
99703+#endif
99704 s->ctor)) {
99705 /*
99706 * Relocate free pointer after the object if it is not
99707@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
99708
99709 __setup("slub_min_objects=", setup_slub_min_objects);
99710
99711-void *__kmalloc(size_t size, gfp_t flags)
99712+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
99713 {
99714 struct kmem_cache *s;
99715 void *ret;
99716@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
99717 return ptr;
99718 }
99719
99720-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99721+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99722 {
99723 struct kmem_cache *s;
99724 void *ret;
99725@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99726 EXPORT_SYMBOL(__kmalloc_node);
99727 #endif
99728
99729+bool is_usercopy_object(const void *ptr)
99730+{
99731+ struct page *page;
99732+ struct kmem_cache *s;
99733+
99734+ if (ZERO_OR_NULL_PTR(ptr))
99735+ return false;
99736+
99737+ if (!slab_is_available())
99738+ return false;
99739+
99740+ if (!virt_addr_valid(ptr))
99741+ return false;
99742+
99743+ page = virt_to_head_page(ptr);
99744+
99745+ if (!PageSlab(page))
99746+ return false;
99747+
99748+ s = page->slab_cache;
99749+ return s->flags & SLAB_USERCOPY;
99750+}
99751+
99752+#ifdef CONFIG_PAX_USERCOPY
99753+const char *check_heap_object(const void *ptr, unsigned long n)
99754+{
99755+ struct page *page;
99756+ struct kmem_cache *s;
99757+ unsigned long offset;
99758+
99759+ if (ZERO_OR_NULL_PTR(ptr))
99760+ return "<null>";
99761+
99762+ if (!virt_addr_valid(ptr))
99763+ return NULL;
99764+
99765+ page = virt_to_head_page(ptr);
99766+
99767+ if (!PageSlab(page))
99768+ return NULL;
99769+
99770+ s = page->slab_cache;
99771+ if (!(s->flags & SLAB_USERCOPY))
99772+ return s->name;
99773+
99774+ offset = (ptr - page_address(page)) % s->size;
99775+ if (offset <= s->object_size && n <= s->object_size - offset)
99776+ return NULL;
99777+
99778+ return s->name;
99779+}
99780+#endif
99781+
99782 size_t ksize(const void *object)
99783 {
99784 struct page *page;
99785@@ -3336,6 +3400,7 @@ void kfree(const void *x)
99786 if (unlikely(ZERO_OR_NULL_PTR(x)))
99787 return;
99788
99789+ VM_BUG_ON(!virt_addr_valid(x));
99790 page = virt_to_head_page(x);
99791 if (unlikely(!PageSlab(page))) {
99792 BUG_ON(!PageCompound(page));
99793@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99794 int i;
99795 struct kmem_cache *c;
99796
99797- s->refcount++;
99798+ atomic_inc(&s->refcount);
99799
99800 /*
99801 * Adjust the object sizes so that we clear
99802@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99803 }
99804
99805 if (sysfs_slab_alias(s, name)) {
99806- s->refcount--;
99807+ atomic_dec(&s->refcount);
99808 s = NULL;
99809 }
99810 }
99811@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
99812 }
99813 #endif
99814
99815-#ifdef CONFIG_SYSFS
99816+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99817 static int count_inuse(struct page *page)
99818 {
99819 return page->inuse;
99820@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
99821 len += sprintf(buf + len, "%7ld ", l->count);
99822
99823 if (l->addr)
99824+#ifdef CONFIG_GRKERNSEC_HIDESYM
99825+ len += sprintf(buf + len, "%pS", NULL);
99826+#else
99827 len += sprintf(buf + len, "%pS", (void *)l->addr);
99828+#endif
99829 else
99830 len += sprintf(buf + len, "<not-available>");
99831
99832@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
99833 validate_slab_cache(kmalloc_caches[9]);
99834 }
99835 #else
99836-#ifdef CONFIG_SYSFS
99837+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99838 static void resiliency_test(void) {};
99839 #endif
99840 #endif
99841
99842-#ifdef CONFIG_SYSFS
99843+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99844 enum slab_stat_type {
99845 SL_ALL, /* All slabs */
99846 SL_PARTIAL, /* Only partially allocated slabs */
99847@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
99848 {
99849 if (!s->ctor)
99850 return 0;
99851+#ifdef CONFIG_GRKERNSEC_HIDESYM
99852+ return sprintf(buf, "%pS\n", NULL);
99853+#else
99854 return sprintf(buf, "%pS\n", s->ctor);
99855+#endif
99856 }
99857 SLAB_ATTR_RO(ctor);
99858
99859 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99860 {
99861- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
99862+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
99863 }
99864 SLAB_ATTR_RO(aliases);
99865
99866@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
99867 SLAB_ATTR_RO(cache_dma);
99868 #endif
99869
99870+#ifdef CONFIG_PAX_USERCOPY_SLABS
99871+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
99872+{
99873+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
99874+}
99875+SLAB_ATTR_RO(usercopy);
99876+#endif
99877+
99878+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99879+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
99880+{
99881+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
99882+}
99883+SLAB_ATTR_RO(sanitize);
99884+#endif
99885+
99886 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
99887 {
99888 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
99889@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
99890 * as well as cause other issues like converting a mergeable
99891 * cache into an umergeable one.
99892 */
99893- if (s->refcount > 1)
99894+ if (atomic_read(&s->refcount) > 1)
99895 return -EINVAL;
99896
99897 s->flags &= ~SLAB_TRACE;
99898@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
99899 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
99900 size_t length)
99901 {
99902- if (s->refcount > 1)
99903+ if (atomic_read(&s->refcount) > 1)
99904 return -EINVAL;
99905
99906 s->flags &= ~SLAB_FAILSLAB;
99907@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
99908 #ifdef CONFIG_ZONE_DMA
99909 &cache_dma_attr.attr,
99910 #endif
99911+#ifdef CONFIG_PAX_USERCOPY_SLABS
99912+ &usercopy_attr.attr,
99913+#endif
99914+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99915+ &sanitize_attr.attr,
99916+#endif
99917 #ifdef CONFIG_NUMA
99918 &remote_node_defrag_ratio_attr.attr,
99919 #endif
99920@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
99921 return name;
99922 }
99923
99924+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99925 static int sysfs_slab_add(struct kmem_cache *s)
99926 {
99927 int err;
99928@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
99929 kobject_del(&s->kobj);
99930 kobject_put(&s->kobj);
99931 }
99932+#endif
99933
99934 /*
99935 * Need to buffer aliases during bootup until sysfs becomes
99936@@ -5161,6 +5258,7 @@ struct saved_alias {
99937
99938 static struct saved_alias *alias_list;
99939
99940+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99941 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99942 {
99943 struct saved_alias *al;
99944@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99945 alias_list = al;
99946 return 0;
99947 }
99948+#endif
99949
99950 static int __init slab_sysfs_init(void)
99951 {
99952diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
99953index 4cba9c2..b4f9fcc 100644
99954--- a/mm/sparse-vmemmap.c
99955+++ b/mm/sparse-vmemmap.c
99956@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
99957 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
99958 if (!p)
99959 return NULL;
99960- pud_populate(&init_mm, pud, p);
99961+ pud_populate_kernel(&init_mm, pud, p);
99962 }
99963 return pud;
99964 }
99965@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
99966 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
99967 if (!p)
99968 return NULL;
99969- pgd_populate(&init_mm, pgd, p);
99970+ pgd_populate_kernel(&init_mm, pgd, p);
99971 }
99972 return pgd;
99973 }
99974diff --git a/mm/sparse.c b/mm/sparse.c
99975index d1b48b6..6e8590e 100644
99976--- a/mm/sparse.c
99977+++ b/mm/sparse.c
99978@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
99979
99980 for (i = 0; i < PAGES_PER_SECTION; i++) {
99981 if (PageHWPoison(&memmap[i])) {
99982- atomic_long_sub(1, &num_poisoned_pages);
99983+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
99984 ClearPageHWPoison(&memmap[i]);
99985 }
99986 }
99987diff --git a/mm/swap.c b/mm/swap.c
99988index 8a12b33..7068e78 100644
99989--- a/mm/swap.c
99990+++ b/mm/swap.c
99991@@ -31,6 +31,7 @@
99992 #include <linux/memcontrol.h>
99993 #include <linux/gfp.h>
99994 #include <linux/uio.h>
99995+#include <linux/hugetlb.h>
99996
99997 #include "internal.h"
99998
99999@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100000
100001 __page_cache_release(page);
100002 dtor = get_compound_page_dtor(page);
100003+ if (!PageHuge(page))
100004+ BUG_ON(dtor != free_compound_page);
100005 (*dtor)(page);
100006 }
100007
100008diff --git a/mm/swapfile.c b/mm/swapfile.c
100009index 63f55cc..31874e6 100644
100010--- a/mm/swapfile.c
100011+++ b/mm/swapfile.c
100012@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100013
100014 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100015 /* Activity counter to indicate that a swapon or swapoff has occurred */
100016-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100017+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100018
100019 static inline unsigned char swap_count(unsigned char ent)
100020 {
100021@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100022 spin_unlock(&swap_lock);
100023
100024 err = 0;
100025- atomic_inc(&proc_poll_event);
100026+ atomic_inc_unchecked(&proc_poll_event);
100027 wake_up_interruptible(&proc_poll_wait);
100028
100029 out_dput:
100030@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100031
100032 poll_wait(file, &proc_poll_wait, wait);
100033
100034- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100035- seq->poll_event = atomic_read(&proc_poll_event);
100036+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100037+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100038 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100039 }
100040
100041@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100042 return ret;
100043
100044 seq = file->private_data;
100045- seq->poll_event = atomic_read(&proc_poll_event);
100046+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100047 return 0;
100048 }
100049
100050@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100051 (frontswap_map) ? "FS" : "");
100052
100053 mutex_unlock(&swapon_mutex);
100054- atomic_inc(&proc_poll_event);
100055+ atomic_inc_unchecked(&proc_poll_event);
100056 wake_up_interruptible(&proc_poll_wait);
100057
100058 if (S_ISREG(inode->i_mode))
100059diff --git a/mm/util.c b/mm/util.c
100060index fec39d4..3e60325 100644
100061--- a/mm/util.c
100062+++ b/mm/util.c
100063@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
100064 void arch_pick_mmap_layout(struct mm_struct *mm)
100065 {
100066 mm->mmap_base = TASK_UNMAPPED_BASE;
100067+
100068+#ifdef CONFIG_PAX_RANDMMAP
100069+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100070+ mm->mmap_base += mm->delta_mmap;
100071+#endif
100072+
100073 mm->get_unmapped_area = arch_get_unmapped_area;
100074 }
100075 #endif
100076@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100077 if (!mm->arg_end)
100078 goto out_mm; /* Shh! No looking before we're done */
100079
100080+ if (gr_acl_handle_procpidmem(task))
100081+ goto out_mm;
100082+
100083 len = mm->arg_end - mm->arg_start;
100084
100085 if (len > buflen)
100086diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100087index 39c3388..25c19be 100644
100088--- a/mm/vmalloc.c
100089+++ b/mm/vmalloc.c
100090@@ -40,6 +40,21 @@ struct vfree_deferred {
100091 };
100092 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100093
100094+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100095+struct stack_deferred_llist {
100096+ struct llist_head list;
100097+ void *stack;
100098+ void *lowmem_stack;
100099+};
100100+
100101+struct stack_deferred {
100102+ struct stack_deferred_llist list;
100103+ struct work_struct wq;
100104+};
100105+
100106+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100107+#endif
100108+
100109 static void __vunmap(const void *, int);
100110
100111 static void free_work(struct work_struct *w)
100112@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
100113 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100114 struct llist_node *llnode = llist_del_all(&p->list);
100115 while (llnode) {
100116- void *p = llnode;
100117+ void *x = llnode;
100118 llnode = llist_next(llnode);
100119- __vunmap(p, 1);
100120+ __vunmap(x, 1);
100121 }
100122 }
100123
100124+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100125+static void unmap_work(struct work_struct *w)
100126+{
100127+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100128+ struct llist_node *llnode = llist_del_all(&p->list.list);
100129+ while (llnode) {
100130+ struct stack_deferred_llist *x =
100131+ llist_entry((struct llist_head *)llnode,
100132+ struct stack_deferred_llist, list);
100133+ void *stack = ACCESS_ONCE(x->stack);
100134+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100135+ llnode = llist_next(llnode);
100136+ __vunmap(stack, 0);
100137+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100138+ }
100139+}
100140+#endif
100141+
100142 /*** Page table manipulation functions ***/
100143
100144 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100145@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100146
100147 pte = pte_offset_kernel(pmd, addr);
100148 do {
100149- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100150- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100151+
100152+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100153+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100154+ BUG_ON(!pte_exec(*pte));
100155+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100156+ continue;
100157+ }
100158+#endif
100159+
100160+ {
100161+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100162+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100163+ }
100164 } while (pte++, addr += PAGE_SIZE, addr != end);
100165 }
100166
100167@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100168 pte = pte_alloc_kernel(pmd, addr);
100169 if (!pte)
100170 return -ENOMEM;
100171+
100172+ pax_open_kernel();
100173 do {
100174 struct page *page = pages[*nr];
100175
100176- if (WARN_ON(!pte_none(*pte)))
100177+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100178+ if (pgprot_val(prot) & _PAGE_NX)
100179+#endif
100180+
100181+ if (!pte_none(*pte)) {
100182+ pax_close_kernel();
100183+ WARN_ON(1);
100184 return -EBUSY;
100185- if (WARN_ON(!page))
100186+ }
100187+ if (!page) {
100188+ pax_close_kernel();
100189+ WARN_ON(1);
100190 return -ENOMEM;
100191+ }
100192 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100193 (*nr)++;
100194 } while (pte++, addr += PAGE_SIZE, addr != end);
100195+ pax_close_kernel();
100196 return 0;
100197 }
100198
100199@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100200 pmd_t *pmd;
100201 unsigned long next;
100202
100203- pmd = pmd_alloc(&init_mm, pud, addr);
100204+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100205 if (!pmd)
100206 return -ENOMEM;
100207 do {
100208@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100209 pud_t *pud;
100210 unsigned long next;
100211
100212- pud = pud_alloc(&init_mm, pgd, addr);
100213+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100214 if (!pud)
100215 return -ENOMEM;
100216 do {
100217@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
100218 if (addr >= MODULES_VADDR && addr < MODULES_END)
100219 return 1;
100220 #endif
100221+
100222+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100223+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100224+ return 1;
100225+#endif
100226+
100227 return is_vmalloc_addr(x);
100228 }
100229
100230@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100231
100232 if (!pgd_none(*pgd)) {
100233 pud_t *pud = pud_offset(pgd, addr);
100234+#ifdef CONFIG_X86
100235+ if (!pud_large(*pud))
100236+#endif
100237 if (!pud_none(*pud)) {
100238 pmd_t *pmd = pmd_offset(pud, addr);
100239+#ifdef CONFIG_X86
100240+ if (!pmd_large(*pmd))
100241+#endif
100242 if (!pmd_none(*pmd)) {
100243 pte_t *ptep, pte;
100244
100245@@ -341,7 +410,7 @@ static void purge_vmap_area_lazy(void);
100246 * Allocate a region of KVA of the specified size and alignment, within the
100247 * vstart and vend.
100248 */
100249-static struct vmap_area *alloc_vmap_area(unsigned long size,
100250+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
100251 unsigned long align,
100252 unsigned long vstart, unsigned long vend,
100253 int node, gfp_t gfp_mask)
100254@@ -1182,13 +1251,23 @@ void __init vmalloc_init(void)
100255 for_each_possible_cpu(i) {
100256 struct vmap_block_queue *vbq;
100257 struct vfree_deferred *p;
100258+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100259+ struct stack_deferred *p2;
100260+#endif
100261
100262 vbq = &per_cpu(vmap_block_queue, i);
100263 spin_lock_init(&vbq->lock);
100264 INIT_LIST_HEAD(&vbq->free);
100265+
100266 p = &per_cpu(vfree_deferred, i);
100267 init_llist_head(&p->list);
100268 INIT_WORK(&p->wq, free_work);
100269+
100270+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100271+ p2 = &per_cpu(stack_deferred, i);
100272+ init_llist_head(&p2->list.list);
100273+ INIT_WORK(&p2->wq, unmap_work);
100274+#endif
100275 }
100276
100277 /* Import existing vmlist entries. */
100278@@ -1313,6 +1392,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100279 struct vm_struct *area;
100280
100281 BUG_ON(in_interrupt());
100282+
100283+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100284+ if (flags & VM_KERNEXEC) {
100285+ if (start != VMALLOC_START || end != VMALLOC_END)
100286+ return NULL;
100287+ start = (unsigned long)MODULES_EXEC_VADDR;
100288+ end = (unsigned long)MODULES_EXEC_END;
100289+ }
100290+#endif
100291+
100292 if (flags & VM_IOREMAP)
100293 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100294
100295@@ -1518,6 +1607,23 @@ void vunmap(const void *addr)
100296 }
100297 EXPORT_SYMBOL(vunmap);
100298
100299+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100300+void unmap_process_stacks(struct task_struct *task)
100301+{
100302+ if (unlikely(in_interrupt())) {
100303+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
100304+ struct stack_deferred_llist *list = task->stack;
100305+ list->stack = task->stack;
100306+ list->lowmem_stack = task->lowmem_stack;
100307+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100308+ schedule_work(&p->wq);
100309+ } else {
100310+ __vunmap(task->stack, 0);
100311+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100312+ }
100313+}
100314+#endif
100315+
100316 /**
100317 * vmap - map an array of pages into virtually contiguous space
100318 * @pages: array of page pointers
100319@@ -1538,6 +1644,11 @@ void *vmap(struct page **pages, unsigned int count,
100320 if (count > totalram_pages)
100321 return NULL;
100322
100323+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100324+ if (!(pgprot_val(prot) & _PAGE_NX))
100325+ flags |= VM_KERNEXEC;
100326+#endif
100327+
100328 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100329 __builtin_return_address(0));
100330 if (!area)
100331@@ -1640,6 +1751,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100332 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100333 goto fail;
100334
100335+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100336+ if (!(pgprot_val(prot) & _PAGE_NX))
100337+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100338+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100339+ else
100340+#endif
100341+
100342 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100343 start, end, node, gfp_mask, caller);
100344 if (!area)
100345@@ -1816,10 +1934,9 @@ EXPORT_SYMBOL(vzalloc_node);
100346 * For tight control over page level allocator and protection flags
100347 * use __vmalloc() instead.
100348 */
100349-
100350 void *vmalloc_exec(unsigned long size)
100351 {
100352- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100353+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100354 NUMA_NO_NODE, __builtin_return_address(0));
100355 }
100356
100357@@ -2126,6 +2243,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100358 {
100359 struct vm_struct *area;
100360
100361+ BUG_ON(vma->vm_mirror);
100362+
100363 size = PAGE_ALIGN(size);
100364
100365 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100366@@ -2608,7 +2727,11 @@ static int s_show(struct seq_file *m, void *p)
100367 v->addr, v->addr + v->size, v->size);
100368
100369 if (v->caller)
100370+#ifdef CONFIG_GRKERNSEC_HIDESYM
100371+ seq_printf(m, " %pK", v->caller);
100372+#else
100373 seq_printf(m, " %pS", v->caller);
100374+#endif
100375
100376 if (v->nr_pages)
100377 seq_printf(m, " pages=%d", v->nr_pages);
100378diff --git a/mm/vmstat.c b/mm/vmstat.c
100379index 1284f89..2e895e31 100644
100380--- a/mm/vmstat.c
100381+++ b/mm/vmstat.c
100382@@ -24,6 +24,7 @@
100383 #include <linux/mm_inline.h>
100384 #include <linux/page_ext.h>
100385 #include <linux/page_owner.h>
100386+#include <linux/grsecurity.h>
100387
100388 #include "internal.h"
100389
100390@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
100391 *
100392 * vm_stat contains the global counters
100393 */
100394-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100395+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100396 EXPORT_SYMBOL(vm_stat);
100397
100398 #ifdef CONFIG_SMP
100399@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
100400
100401 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100402 if (diff[i]) {
100403- atomic_long_add(diff[i], &vm_stat[i]);
100404+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100405 changes++;
100406 }
100407 return changes;
100408@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
100409 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100410 if (v) {
100411
100412- atomic_long_add(v, &zone->vm_stat[i]);
100413+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100414 global_diff[i] += v;
100415 #ifdef CONFIG_NUMA
100416 /* 3 seconds idle till flush */
100417@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
100418
100419 v = p->vm_stat_diff[i];
100420 p->vm_stat_diff[i] = 0;
100421- atomic_long_add(v, &zone->vm_stat[i]);
100422+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100423 global_diff[i] += v;
100424 }
100425 }
100426@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100427 if (pset->vm_stat_diff[i]) {
100428 int v = pset->vm_stat_diff[i];
100429 pset->vm_stat_diff[i] = 0;
100430- atomic_long_add(v, &zone->vm_stat[i]);
100431- atomic_long_add(v, &vm_stat[i]);
100432+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100433+ atomic_long_add_unchecked(v, &vm_stat[i]);
100434 }
100435 }
100436 #endif
100437@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100438 stat_items_size += sizeof(struct vm_event_state);
100439 #endif
100440
100441- v = kmalloc(stat_items_size, GFP_KERNEL);
100442+ v = kzalloc(stat_items_size, GFP_KERNEL);
100443 m->private = v;
100444 if (!v)
100445 return ERR_PTR(-ENOMEM);
100446+
100447+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100448+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100449+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100450+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100451+ && !in_group_p(grsec_proc_gid)
100452+#endif
100453+ )
100454+ return (unsigned long *)m->private + *pos;
100455+#endif
100456+#endif
100457+
100458 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100459 v[i] = global_page_state(i);
100460 v += NR_VM_ZONE_STAT_ITEMS;
100461@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
100462 cpu_notifier_register_done();
100463 #endif
100464 #ifdef CONFIG_PROC_FS
100465- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100466- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100467- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100468- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100469+ {
100470+ mode_t gr_mode = S_IRUGO;
100471+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100472+ gr_mode = S_IRUSR;
100473+#endif
100474+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100475+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100476+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100477+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100478+ }
100479 #endif
100480 return 0;
100481 }
100482diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100483index 64c6bed..b79a5de 100644
100484--- a/net/8021q/vlan.c
100485+++ b/net/8021q/vlan.c
100486@@ -481,7 +481,7 @@ out:
100487 return NOTIFY_DONE;
100488 }
100489
100490-static struct notifier_block vlan_notifier_block __read_mostly = {
100491+static struct notifier_block vlan_notifier_block = {
100492 .notifier_call = vlan_device_event,
100493 };
100494
100495@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100496 err = -EPERM;
100497 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100498 break;
100499- if ((args.u.name_type >= 0) &&
100500- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100501+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100502 struct vlan_net *vn;
100503
100504 vn = net_generic(net, vlan_net_id);
100505diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
100506index 8ac8a5c..991defc 100644
100507--- a/net/8021q/vlan_netlink.c
100508+++ b/net/8021q/vlan_netlink.c
100509@@ -238,7 +238,7 @@ nla_put_failure:
100510 return -EMSGSIZE;
100511 }
100512
100513-struct rtnl_link_ops vlan_link_ops __read_mostly = {
100514+struct rtnl_link_ops vlan_link_ops = {
100515 .kind = "vlan",
100516 .maxtype = IFLA_VLAN_MAX,
100517 .policy = vlan_policy,
100518diff --git a/net/9p/client.c b/net/9p/client.c
100519index e86a9bea..e91f70e 100644
100520--- a/net/9p/client.c
100521+++ b/net/9p/client.c
100522@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100523 len - inline_len);
100524 } else {
100525 err = copy_from_user(ename + inline_len,
100526- uidata, len - inline_len);
100527+ (char __force_user *)uidata, len - inline_len);
100528 if (err) {
100529 err = -EFAULT;
100530 goto out_err;
100531@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100532 kernel_buf = 1;
100533 indata = data;
100534 } else
100535- indata = (__force char *)udata;
100536+ indata = (__force_kernel char *)udata;
100537 /*
100538 * response header len is 11
100539 * PDU Header(7) + IO Size (4)
100540@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100541 kernel_buf = 1;
100542 odata = data;
100543 } else
100544- odata = (char *)udata;
100545+ odata = (char __force_kernel *)udata;
100546 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100547 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100548 fid->fid, offset, rsize);
100549diff --git a/net/9p/mod.c b/net/9p/mod.c
100550index 6ab36ae..6f1841b 100644
100551--- a/net/9p/mod.c
100552+++ b/net/9p/mod.c
100553@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100554 void v9fs_register_trans(struct p9_trans_module *m)
100555 {
100556 spin_lock(&v9fs_trans_lock);
100557- list_add_tail(&m->list, &v9fs_trans_list);
100558+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100559 spin_unlock(&v9fs_trans_lock);
100560 }
100561 EXPORT_SYMBOL(v9fs_register_trans);
100562@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100563 void v9fs_unregister_trans(struct p9_trans_module *m)
100564 {
100565 spin_lock(&v9fs_trans_lock);
100566- list_del_init(&m->list);
100567+ pax_list_del_init((struct list_head *)&m->list);
100568 spin_unlock(&v9fs_trans_lock);
100569 }
100570 EXPORT_SYMBOL(v9fs_unregister_trans);
100571diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100572index 80d08f6..de63fd1 100644
100573--- a/net/9p/trans_fd.c
100574+++ b/net/9p/trans_fd.c
100575@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100576 oldfs = get_fs();
100577 set_fs(get_ds());
100578 /* The cast to a user pointer is valid due to the set_fs() */
100579- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100580+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100581 set_fs(oldfs);
100582
100583 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100584diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100585index af46bc4..f9adfcd 100644
100586--- a/net/appletalk/atalk_proc.c
100587+++ b/net/appletalk/atalk_proc.c
100588@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100589 struct proc_dir_entry *p;
100590 int rc = -ENOMEM;
100591
100592- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100593+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100594 if (!atalk_proc_dir)
100595 goto out;
100596
100597diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100598index 876fbe8..8bbea9f 100644
100599--- a/net/atm/atm_misc.c
100600+++ b/net/atm/atm_misc.c
100601@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100602 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100603 return 1;
100604 atm_return(vcc, truesize);
100605- atomic_inc(&vcc->stats->rx_drop);
100606+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100607 return 0;
100608 }
100609 EXPORT_SYMBOL(atm_charge);
100610@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100611 }
100612 }
100613 atm_return(vcc, guess);
100614- atomic_inc(&vcc->stats->rx_drop);
100615+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100616 return NULL;
100617 }
100618 EXPORT_SYMBOL(atm_alloc_charge);
100619@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100620
100621 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100622 {
100623-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100624+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100625 __SONET_ITEMS
100626 #undef __HANDLE_ITEM
100627 }
100628@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100629
100630 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100631 {
100632-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100633+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100634 __SONET_ITEMS
100635 #undef __HANDLE_ITEM
100636 }
100637diff --git a/net/atm/lec.c b/net/atm/lec.c
100638index 4b98f89..5a2f6cb 100644
100639--- a/net/atm/lec.c
100640+++ b/net/atm/lec.c
100641@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100642 }
100643
100644 static struct lane2_ops lane2_ops = {
100645- lane2_resolve, /* resolve, spec 3.1.3 */
100646- lane2_associate_req, /* associate_req, spec 3.1.4 */
100647- NULL /* associate indicator, spec 3.1.5 */
100648+ .resolve = lane2_resolve,
100649+ .associate_req = lane2_associate_req,
100650+ .associate_indicator = NULL
100651 };
100652
100653 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100654diff --git a/net/atm/lec.h b/net/atm/lec.h
100655index 4149db1..f2ab682 100644
100656--- a/net/atm/lec.h
100657+++ b/net/atm/lec.h
100658@@ -48,7 +48,7 @@ struct lane2_ops {
100659 const u8 *tlvs, u32 sizeoftlvs);
100660 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100661 const u8 *tlvs, u32 sizeoftlvs);
100662-};
100663+} __no_const;
100664
100665 /*
100666 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100667diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100668index d1b2d9a..d549f7f 100644
100669--- a/net/atm/mpoa_caches.c
100670+++ b/net/atm/mpoa_caches.c
100671@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100672
100673
100674 static struct in_cache_ops ingress_ops = {
100675- in_cache_add_entry, /* add_entry */
100676- in_cache_get, /* get */
100677- in_cache_get_with_mask, /* get_with_mask */
100678- in_cache_get_by_vcc, /* get_by_vcc */
100679- in_cache_put, /* put */
100680- in_cache_remove_entry, /* remove_entry */
100681- cache_hit, /* cache_hit */
100682- clear_count_and_expired, /* clear_count */
100683- check_resolving_entries, /* check_resolving */
100684- refresh_entries, /* refresh */
100685- in_destroy_cache /* destroy_cache */
100686+ .add_entry = in_cache_add_entry,
100687+ .get = in_cache_get,
100688+ .get_with_mask = in_cache_get_with_mask,
100689+ .get_by_vcc = in_cache_get_by_vcc,
100690+ .put = in_cache_put,
100691+ .remove_entry = in_cache_remove_entry,
100692+ .cache_hit = cache_hit,
100693+ .clear_count = clear_count_and_expired,
100694+ .check_resolving = check_resolving_entries,
100695+ .refresh = refresh_entries,
100696+ .destroy_cache = in_destroy_cache
100697 };
100698
100699 static struct eg_cache_ops egress_ops = {
100700- eg_cache_add_entry, /* add_entry */
100701- eg_cache_get_by_cache_id, /* get_by_cache_id */
100702- eg_cache_get_by_tag, /* get_by_tag */
100703- eg_cache_get_by_vcc, /* get_by_vcc */
100704- eg_cache_get_by_src_ip, /* get_by_src_ip */
100705- eg_cache_put, /* put */
100706- eg_cache_remove_entry, /* remove_entry */
100707- update_eg_cache_entry, /* update */
100708- clear_expired, /* clear_expired */
100709- eg_destroy_cache /* destroy_cache */
100710+ .add_entry = eg_cache_add_entry,
100711+ .get_by_cache_id = eg_cache_get_by_cache_id,
100712+ .get_by_tag = eg_cache_get_by_tag,
100713+ .get_by_vcc = eg_cache_get_by_vcc,
100714+ .get_by_src_ip = eg_cache_get_by_src_ip,
100715+ .put = eg_cache_put,
100716+ .remove_entry = eg_cache_remove_entry,
100717+ .update = update_eg_cache_entry,
100718+ .clear_expired = clear_expired,
100719+ .destroy_cache = eg_destroy_cache
100720 };
100721
100722
100723diff --git a/net/atm/proc.c b/net/atm/proc.c
100724index bbb6461..cf04016 100644
100725--- a/net/atm/proc.c
100726+++ b/net/atm/proc.c
100727@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100728 const struct k_atm_aal_stats *stats)
100729 {
100730 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100731- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100732- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100733- atomic_read(&stats->rx_drop));
100734+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100735+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100736+ atomic_read_unchecked(&stats->rx_drop));
100737 }
100738
100739 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100740diff --git a/net/atm/resources.c b/net/atm/resources.c
100741index 0447d5d..3cf4728 100644
100742--- a/net/atm/resources.c
100743+++ b/net/atm/resources.c
100744@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100745 static void copy_aal_stats(struct k_atm_aal_stats *from,
100746 struct atm_aal_stats *to)
100747 {
100748-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100749+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100750 __AAL_STAT_ITEMS
100751 #undef __HANDLE_ITEM
100752 }
100753@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100754 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100755 struct atm_aal_stats *to)
100756 {
100757-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100758+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100759 __AAL_STAT_ITEMS
100760 #undef __HANDLE_ITEM
100761 }
100762diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100763index 919a5ce..cc6b444 100644
100764--- a/net/ax25/sysctl_net_ax25.c
100765+++ b/net/ax25/sysctl_net_ax25.c
100766@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100767 {
100768 char path[sizeof("net/ax25/") + IFNAMSIZ];
100769 int k;
100770- struct ctl_table *table;
100771+ ctl_table_no_const *table;
100772
100773 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
100774 if (!table)
100775diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
100776index 1e80539..676c37a 100644
100777--- a/net/batman-adv/bat_iv_ogm.c
100778+++ b/net/batman-adv/bat_iv_ogm.c
100779@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
100780
100781 /* randomize initial seqno to avoid collision */
100782 get_random_bytes(&random_seqno, sizeof(random_seqno));
100783- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100784+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100785
100786 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
100787 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
100788@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
100789 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
100790
100791 /* change sequence number to network order */
100792- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
100793+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
100794 batadv_ogm_packet->seqno = htonl(seqno);
100795- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
100796+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
100797
100798 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
100799
100800@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
100801 return;
100802
100803 /* could be changed by schedule_own_packet() */
100804- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
100805+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
100806
100807 if (ogm_packet->flags & BATADV_DIRECTLINK)
100808 has_directlink_flag = true;
100809diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
100810index 00f9e14..e1c7203 100644
100811--- a/net/batman-adv/fragmentation.c
100812+++ b/net/batman-adv/fragmentation.c
100813@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
100814 frag_header.packet_type = BATADV_UNICAST_FRAG;
100815 frag_header.version = BATADV_COMPAT_VERSION;
100816 frag_header.ttl = BATADV_TTL;
100817- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
100818+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
100819 frag_header.reserved = 0;
100820 frag_header.no = 0;
100821 frag_header.total_size = htons(skb->len);
100822diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
100823index 5467955..75ad4e3 100644
100824--- a/net/batman-adv/soft-interface.c
100825+++ b/net/batman-adv/soft-interface.c
100826@@ -296,7 +296,7 @@ send:
100827 primary_if->net_dev->dev_addr);
100828
100829 /* set broadcast sequence number */
100830- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
100831+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
100832 bcast_packet->seqno = htonl(seqno);
100833
100834 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
100835@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100836 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
100837
100838 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
100839- atomic_set(&bat_priv->bcast_seqno, 1);
100840+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
100841 atomic_set(&bat_priv->tt.vn, 0);
100842 atomic_set(&bat_priv->tt.local_changes, 0);
100843 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
100844@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100845
100846 /* randomize initial seqno to avoid collision */
100847 get_random_bytes(&random_seqno, sizeof(random_seqno));
100848- atomic_set(&bat_priv->frag_seqno, random_seqno);
100849+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
100850
100851 bat_priv->primary_if = NULL;
100852 bat_priv->num_ifaces = 0;
100853@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
100854 return 0;
100855 }
100856
100857-struct rtnl_link_ops batadv_link_ops __read_mostly = {
100858+struct rtnl_link_ops batadv_link_ops = {
100859 .kind = "batadv",
100860 .priv_size = sizeof(struct batadv_priv),
100861 .setup = batadv_softif_init_early,
100862diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
100863index 8854c05..ee5d5497 100644
100864--- a/net/batman-adv/types.h
100865+++ b/net/batman-adv/types.h
100866@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
100867 struct batadv_hard_iface_bat_iv {
100868 unsigned char *ogm_buff;
100869 int ogm_buff_len;
100870- atomic_t ogm_seqno;
100871+ atomic_unchecked_t ogm_seqno;
100872 };
100873
100874 /**
100875@@ -768,7 +768,7 @@ struct batadv_priv {
100876 atomic_t bonding;
100877 atomic_t fragmentation;
100878 atomic_t packet_size_max;
100879- atomic_t frag_seqno;
100880+ atomic_unchecked_t frag_seqno;
100881 #ifdef CONFIG_BATMAN_ADV_BLA
100882 atomic_t bridge_loop_avoidance;
100883 #endif
100884@@ -787,7 +787,7 @@ struct batadv_priv {
100885 #endif
100886 uint32_t isolation_mark;
100887 uint32_t isolation_mark_mask;
100888- atomic_t bcast_seqno;
100889+ atomic_unchecked_t bcast_seqno;
100890 atomic_t bcast_queue_left;
100891 atomic_t batman_queue_left;
100892 char num_ifaces;
100893diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
100894index 2c245fd..dccf543 100644
100895--- a/net/bluetooth/hci_sock.c
100896+++ b/net/bluetooth/hci_sock.c
100897@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
100898 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
100899 }
100900
100901- len = min_t(unsigned int, len, sizeof(uf));
100902+ len = min((size_t)len, sizeof(uf));
100903 if (copy_from_user(&uf, optval, len)) {
100904 err = -EFAULT;
100905 break;
100906diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
100907index d04dc00..d25d576 100644
100908--- a/net/bluetooth/l2cap_core.c
100909+++ b/net/bluetooth/l2cap_core.c
100910@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
100911 break;
100912
100913 case L2CAP_CONF_RFC:
100914- if (olen == sizeof(rfc))
100915- memcpy(&rfc, (void *)val, olen);
100916+ if (olen != sizeof(rfc))
100917+ break;
100918+
100919+ memcpy(&rfc, (void *)val, olen);
100920
100921 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
100922 rfc.mode != chan->mode)
100923diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
100924index f65caf4..c07110c 100644
100925--- a/net/bluetooth/l2cap_sock.c
100926+++ b/net/bluetooth/l2cap_sock.c
100927@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100928 struct sock *sk = sock->sk;
100929 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
100930 struct l2cap_options opts;
100931- int len, err = 0;
100932+ int err = 0;
100933+ size_t len = optlen;
100934 u32 opt;
100935
100936 BT_DBG("sk %p", sk);
100937@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100938 opts.max_tx = chan->max_tx;
100939 opts.txwin_size = chan->tx_win;
100940
100941- len = min_t(unsigned int, sizeof(opts), optlen);
100942+ len = min(sizeof(opts), len);
100943 if (copy_from_user((char *) &opts, optval, len)) {
100944 err = -EFAULT;
100945 break;
100946@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100947 struct bt_security sec;
100948 struct bt_power pwr;
100949 struct l2cap_conn *conn;
100950- int len, err = 0;
100951+ int err = 0;
100952+ size_t len = optlen;
100953 u32 opt;
100954
100955 BT_DBG("sk %p", sk);
100956@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100957
100958 sec.level = BT_SECURITY_LOW;
100959
100960- len = min_t(unsigned int, sizeof(sec), optlen);
100961+ len = min(sizeof(sec), len);
100962 if (copy_from_user((char *) &sec, optval, len)) {
100963 err = -EFAULT;
100964 break;
100965@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100966
100967 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
100968
100969- len = min_t(unsigned int, sizeof(pwr), optlen);
100970+ len = min(sizeof(pwr), len);
100971 if (copy_from_user((char *) &pwr, optval, len)) {
100972 err = -EFAULT;
100973 break;
100974diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
100975index 2348176..b9b6cf2 100644
100976--- a/net/bluetooth/rfcomm/sock.c
100977+++ b/net/bluetooth/rfcomm/sock.c
100978@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
100979 struct sock *sk = sock->sk;
100980 struct bt_security sec;
100981 int err = 0;
100982- size_t len;
100983+ size_t len = optlen;
100984 u32 opt;
100985
100986 BT_DBG("sk %p", sk);
100987@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
100988
100989 sec.level = BT_SECURITY_LOW;
100990
100991- len = min_t(unsigned int, sizeof(sec), optlen);
100992+ len = min(sizeof(sec), len);
100993 if (copy_from_user((char *) &sec, optval, len)) {
100994 err = -EFAULT;
100995 break;
100996diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
100997index 8e385a0..a5bdd8e 100644
100998--- a/net/bluetooth/rfcomm/tty.c
100999+++ b/net/bluetooth/rfcomm/tty.c
101000@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101001 BT_DBG("tty %p id %d", tty, tty->index);
101002
101003 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101004- dev->channel, dev->port.count);
101005+ dev->channel, atomic_read(&dev->port.count));
101006
101007 err = tty_port_open(&dev->port, tty, filp);
101008 if (err)
101009@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101010 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101011
101012 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101013- dev->port.count);
101014+ atomic_read(&dev->port.count));
101015
101016 tty_port_close(&dev->port, tty, filp);
101017 }
101018diff --git a/net/bridge/br.c b/net/bridge/br.c
101019index 44425af..4ee730e 100644
101020--- a/net/bridge/br.c
101021+++ b/net/bridge/br.c
101022@@ -147,6 +147,8 @@ static int __init br_init(void)
101023 {
101024 int err;
101025
101026+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
101027+
101028 err = stp_proto_register(&br_stp_proto);
101029 if (err < 0) {
101030 pr_err("bridge: can't register sap for STP\n");
101031diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
101032index 9f5eb55..45ab9c5 100644
101033--- a/net/bridge/br_netlink.c
101034+++ b/net/bridge/br_netlink.c
101035@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
101036 .get_link_af_size = br_get_link_af_size,
101037 };
101038
101039-struct rtnl_link_ops br_link_ops __read_mostly = {
101040+struct rtnl_link_ops br_link_ops = {
101041 .kind = "bridge",
101042 .priv_size = sizeof(struct net_bridge),
101043 .setup = br_dev_setup,
101044diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101045index d9a8c05..8dadc6c6 100644
101046--- a/net/bridge/netfilter/ebtables.c
101047+++ b/net/bridge/netfilter/ebtables.c
101048@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101049 tmp.valid_hooks = t->table->valid_hooks;
101050 }
101051 mutex_unlock(&ebt_mutex);
101052- if (copy_to_user(user, &tmp, *len) != 0) {
101053+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101054 BUGPRINT("c2u Didn't work\n");
101055 ret = -EFAULT;
101056 break;
101057@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101058 goto out;
101059 tmp.valid_hooks = t->valid_hooks;
101060
101061- if (copy_to_user(user, &tmp, *len) != 0) {
101062+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101063 ret = -EFAULT;
101064 break;
101065 }
101066@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101067 tmp.entries_size = t->table->entries_size;
101068 tmp.valid_hooks = t->table->valid_hooks;
101069
101070- if (copy_to_user(user, &tmp, *len) != 0) {
101071+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101072 ret = -EFAULT;
101073 break;
101074 }
101075diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101076index f5afda1..dcf770a 100644
101077--- a/net/caif/cfctrl.c
101078+++ b/net/caif/cfctrl.c
101079@@ -10,6 +10,7 @@
101080 #include <linux/spinlock.h>
101081 #include <linux/slab.h>
101082 #include <linux/pkt_sched.h>
101083+#include <linux/sched.h>
101084 #include <net/caif/caif_layer.h>
101085 #include <net/caif/cfpkt.h>
101086 #include <net/caif/cfctrl.h>
101087@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101088 memset(&dev_info, 0, sizeof(dev_info));
101089 dev_info.id = 0xff;
101090 cfsrvl_init(&this->serv, 0, &dev_info, false);
101091- atomic_set(&this->req_seq_no, 1);
101092- atomic_set(&this->rsp_seq_no, 1);
101093+ atomic_set_unchecked(&this->req_seq_no, 1);
101094+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101095 this->serv.layer.receive = cfctrl_recv;
101096 sprintf(this->serv.layer.name, "ctrl");
101097 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101098@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101099 struct cfctrl_request_info *req)
101100 {
101101 spin_lock_bh(&ctrl->info_list_lock);
101102- atomic_inc(&ctrl->req_seq_no);
101103- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101104+ atomic_inc_unchecked(&ctrl->req_seq_no);
101105+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101106 list_add_tail(&req->list, &ctrl->list);
101107 spin_unlock_bh(&ctrl->info_list_lock);
101108 }
101109@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101110 if (p != first)
101111 pr_warn("Requests are not received in order\n");
101112
101113- atomic_set(&ctrl->rsp_seq_no,
101114+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101115 p->sequence_no);
101116 list_del(&p->list);
101117 goto out;
101118diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101119index 67a4a36..8d28068 100644
101120--- a/net/caif/chnl_net.c
101121+++ b/net/caif/chnl_net.c
101122@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101123 };
101124
101125
101126-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101127+static struct rtnl_link_ops ipcaif_link_ops = {
101128 .kind = "caif",
101129 .priv_size = sizeof(struct chnl_net),
101130 .setup = ipcaif_net_setup,
101131diff --git a/net/can/af_can.c b/net/can/af_can.c
101132index 66e0804..da61b8f 100644
101133--- a/net/can/af_can.c
101134+++ b/net/can/af_can.c
101135@@ -881,7 +881,7 @@ static const struct net_proto_family can_family_ops = {
101136 };
101137
101138 /* notifier block for netdevice event */
101139-static struct notifier_block can_netdev_notifier __read_mostly = {
101140+static struct notifier_block can_netdev_notifier = {
101141 .notifier_call = can_notifier,
101142 };
101143
101144diff --git a/net/can/bcm.c b/net/can/bcm.c
101145index ee9ffd9..dfdf3d4 100644
101146--- a/net/can/bcm.c
101147+++ b/net/can/bcm.c
101148@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101149 }
101150
101151 /* create /proc/net/can-bcm directory */
101152- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101153+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101154 return 0;
101155 }
101156
101157diff --git a/net/can/gw.c b/net/can/gw.c
101158index 295f62e..0c3b09e 100644
101159--- a/net/can/gw.c
101160+++ b/net/can/gw.c
101161@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101162 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101163
101164 static HLIST_HEAD(cgw_list);
101165-static struct notifier_block notifier;
101166
101167 static struct kmem_cache *cgw_cache __read_mostly;
101168
101169@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101170 return err;
101171 }
101172
101173+static struct notifier_block notifier = {
101174+ .notifier_call = cgw_notifier
101175+};
101176+
101177 static __init int cgw_module_init(void)
101178 {
101179 /* sanitize given module parameter */
101180@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101181 return -ENOMEM;
101182
101183 /* set notifier */
101184- notifier.notifier_call = cgw_notifier;
101185 register_netdevice_notifier(&notifier);
101186
101187 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101188diff --git a/net/can/proc.c b/net/can/proc.c
101189index 1a19b98..df2b4ec 100644
101190--- a/net/can/proc.c
101191+++ b/net/can/proc.c
101192@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101193 void can_init_proc(void)
101194 {
101195 /* create /proc/net/can directory */
101196- can_dir = proc_mkdir("can", init_net.proc_net);
101197+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101198
101199 if (!can_dir) {
101200 printk(KERN_INFO "can: failed to create /proc/net/can . "
101201diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101202index 33a2f20..371bd09 100644
101203--- a/net/ceph/messenger.c
101204+++ b/net/ceph/messenger.c
101205@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101206 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101207
101208 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101209-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101210+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101211
101212 static struct page *zero_page; /* used in certain error cases */
101213
101214@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101215 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101216 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101217
101218- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101219+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101220 s = addr_str[i];
101221
101222 switch (ss->ss_family) {
101223diff --git a/net/compat.c b/net/compat.c
101224index 3236b41..7d8687f 100644
101225--- a/net/compat.c
101226+++ b/net/compat.c
101227@@ -93,20 +93,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
101228
101229 #define CMSG_COMPAT_FIRSTHDR(msg) \
101230 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101231- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101232+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101233 (struct compat_cmsghdr __user *)NULL)
101234
101235 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101236 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101237 (ucmlen) <= (unsigned long) \
101238 ((mhdr)->msg_controllen - \
101239- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101240+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101241
101242 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101243 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101244 {
101245 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101246- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101247+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101248 msg->msg_controllen)
101249 return NULL;
101250 return (struct compat_cmsghdr __user *)ptr;
101251@@ -196,7 +196,7 @@ Efault:
101252
101253 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101254 {
101255- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101256+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101257 struct compat_cmsghdr cmhdr;
101258 struct compat_timeval ctv;
101259 struct compat_timespec cts[3];
101260@@ -252,7 +252,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101261
101262 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101263 {
101264- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101265+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101266 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101267 int fdnum = scm->fp->count;
101268 struct file **fp = scm->fp->fp;
101269@@ -340,7 +340,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101270 return -EFAULT;
101271 old_fs = get_fs();
101272 set_fs(KERNEL_DS);
101273- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101274+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101275 set_fs(old_fs);
101276
101277 return err;
101278@@ -401,7 +401,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101279 len = sizeof(ktime);
101280 old_fs = get_fs();
101281 set_fs(KERNEL_DS);
101282- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101283+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101284 set_fs(old_fs);
101285
101286 if (!err) {
101287@@ -544,7 +544,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101288 case MCAST_JOIN_GROUP:
101289 case MCAST_LEAVE_GROUP:
101290 {
101291- struct compat_group_req __user *gr32 = (void *)optval;
101292+ struct compat_group_req __user *gr32 = (void __user *)optval;
101293 struct group_req __user *kgr =
101294 compat_alloc_user_space(sizeof(struct group_req));
101295 u32 interface;
101296@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101297 case MCAST_BLOCK_SOURCE:
101298 case MCAST_UNBLOCK_SOURCE:
101299 {
101300- struct compat_group_source_req __user *gsr32 = (void *)optval;
101301+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101302 struct group_source_req __user *kgsr = compat_alloc_user_space(
101303 sizeof(struct group_source_req));
101304 u32 interface;
101305@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101306 }
101307 case MCAST_MSFILTER:
101308 {
101309- struct compat_group_filter __user *gf32 = (void *)optval;
101310+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101311 struct group_filter __user *kgf;
101312 u32 interface, fmode, numsrc;
101313
101314@@ -624,7 +624,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101315 char __user *optval, int __user *optlen,
101316 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101317 {
101318- struct compat_group_filter __user *gf32 = (void *)optval;
101319+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101320 struct group_filter __user *kgf;
101321 int __user *koptlen;
101322 u32 interface, fmode, numsrc;
101323@@ -777,7 +777,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101324
101325 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101326 return -EINVAL;
101327- if (copy_from_user(a, args, nas[call]))
101328+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101329 return -EFAULT;
101330 a0 = a[0];
101331 a1 = a[1];
101332diff --git a/net/core/datagram.c b/net/core/datagram.c
101333index df493d6..1145766 100644
101334--- a/net/core/datagram.c
101335+++ b/net/core/datagram.c
101336@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101337 }
101338
101339 kfree_skb(skb);
101340- atomic_inc(&sk->sk_drops);
101341+ atomic_inc_unchecked(&sk->sk_drops);
101342 sk_mem_reclaim_partial(sk);
101343
101344 return err;
101345diff --git a/net/core/dev.c b/net/core/dev.c
101346index 7fe8292..133045e 100644
101347--- a/net/core/dev.c
101348+++ b/net/core/dev.c
101349@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101350 {
101351 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101352 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101353- atomic_long_inc(&dev->rx_dropped);
101354+ atomic_long_inc_unchecked(&dev->rx_dropped);
101355 kfree_skb(skb);
101356 return NET_RX_DROP;
101357 }
101358 }
101359
101360 if (unlikely(!is_skb_forwardable(dev, skb))) {
101361- atomic_long_inc(&dev->rx_dropped);
101362+ atomic_long_inc_unchecked(&dev->rx_dropped);
101363 kfree_skb(skb);
101364 return NET_RX_DROP;
101365 }
101366@@ -2958,7 +2958,7 @@ recursion_alert:
101367 drop:
101368 rcu_read_unlock_bh();
101369
101370- atomic_long_inc(&dev->tx_dropped);
101371+ atomic_long_inc_unchecked(&dev->tx_dropped);
101372 kfree_skb_list(skb);
101373 return rc;
101374 out:
101375@@ -3301,7 +3301,7 @@ enqueue:
101376
101377 local_irq_restore(flags);
101378
101379- atomic_long_inc(&skb->dev->rx_dropped);
101380+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101381 kfree_skb(skb);
101382 return NET_RX_DROP;
101383 }
101384@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
101385 }
101386 EXPORT_SYMBOL(netif_rx_ni);
101387
101388-static void net_tx_action(struct softirq_action *h)
101389+static __latent_entropy void net_tx_action(void)
101390 {
101391 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101392
101393@@ -3711,7 +3711,7 @@ ncls:
101394 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101395 } else {
101396 drop:
101397- atomic_long_inc(&skb->dev->rx_dropped);
101398+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101399 kfree_skb(skb);
101400 /* Jamal, now you will not able to escape explaining
101401 * me how you were going to use this. :-)
101402@@ -4599,7 +4599,7 @@ out_unlock:
101403 return work;
101404 }
101405
101406-static void net_rx_action(struct softirq_action *h)
101407+static __latent_entropy void net_rx_action(void)
101408 {
101409 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101410 unsigned long time_limit = jiffies + 2;
101411@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101412 } else {
101413 netdev_stats_to_stats64(storage, &dev->stats);
101414 }
101415- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101416- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101417+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101418+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101419 return storage;
101420 }
101421 EXPORT_SYMBOL(dev_get_stats);
101422diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101423index b94b1d2..da3ed7c 100644
101424--- a/net/core/dev_ioctl.c
101425+++ b/net/core/dev_ioctl.c
101426@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
101427 no_module = !dev;
101428 if (no_module && capable(CAP_NET_ADMIN))
101429 no_module = request_module("netdev-%s", name);
101430- if (no_module && capable(CAP_SYS_MODULE))
101431+ if (no_module && capable(CAP_SYS_MODULE)) {
101432+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101433+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101434+#else
101435 request_module("%s", name);
101436+#endif
101437+ }
101438 }
101439 EXPORT_SYMBOL(dev_load);
101440
101441diff --git a/net/core/filter.c b/net/core/filter.c
101442index ec9baea..dd6195d 100644
101443--- a/net/core/filter.c
101444+++ b/net/core/filter.c
101445@@ -533,7 +533,11 @@ do_pass:
101446
101447 /* Unkown instruction. */
101448 default:
101449- goto err;
101450+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101451+ fp->code, fp->jt, fp->jf, fp->k);
101452+ kfree(addrs);
101453+ BUG();
101454+ return -EINVAL;
101455 }
101456
101457 insn++;
101458@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101459 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101460 int pc, ret = 0;
101461
101462- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101463+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101464
101465 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101466 if (!masks)
101467@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101468 if (!fp)
101469 return -ENOMEM;
101470
101471- memcpy(fp->insns, fprog->filter, fsize);
101472+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101473
101474 fp->len = fprog->len;
101475 /* Since unattached filters are not copied back to user
101476diff --git a/net/core/flow.c b/net/core/flow.c
101477index a0348fd..340f65d 100644
101478--- a/net/core/flow.c
101479+++ b/net/core/flow.c
101480@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101481 static int flow_entry_valid(struct flow_cache_entry *fle,
101482 struct netns_xfrm *xfrm)
101483 {
101484- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101485+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101486 return 0;
101487 if (fle->object && !fle->object->ops->check(fle->object))
101488 return 0;
101489@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101490 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101491 fcp->hash_count++;
101492 }
101493- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101494+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101495 flo = fle->object;
101496 if (!flo)
101497 goto ret_object;
101498@@ -263,7 +263,7 @@ nocache:
101499 }
101500 flo = resolver(net, key, family, dir, flo, ctx);
101501 if (fle) {
101502- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101503+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101504 if (!IS_ERR(flo))
101505 fle->object = flo;
101506 else
101507@@ -379,7 +379,7 @@ done:
101508 static void flow_cache_flush_task(struct work_struct *work)
101509 {
101510 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
101511- flow_cache_gc_work);
101512+ flow_cache_flush_work);
101513 struct net *net = container_of(xfrm, struct net, xfrm);
101514
101515 flow_cache_flush(net);
101516diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101517index 8d614c9..55752ea 100644
101518--- a/net/core/neighbour.c
101519+++ b/net/core/neighbour.c
101520@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101521 void __user *buffer, size_t *lenp, loff_t *ppos)
101522 {
101523 int size, ret;
101524- struct ctl_table tmp = *ctl;
101525+ ctl_table_no_const tmp = *ctl;
101526
101527 tmp.extra1 = &zero;
101528 tmp.extra2 = &unres_qlen_max;
101529@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101530 void __user *buffer,
101531 size_t *lenp, loff_t *ppos)
101532 {
101533- struct ctl_table tmp = *ctl;
101534+ ctl_table_no_const tmp = *ctl;
101535 int ret;
101536
101537 tmp.extra1 = &zero;
101538diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101539index 2bf8329..2eb1423 100644
101540--- a/net/core/net-procfs.c
101541+++ b/net/core/net-procfs.c
101542@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101543 struct rtnl_link_stats64 temp;
101544 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101545
101546- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101547+ if (gr_proc_is_restricted())
101548+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101549+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101550+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101551+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101552+ else
101553+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101554 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101555 dev->name, stats->rx_bytes, stats->rx_packets,
101556 stats->rx_errors,
101557@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101558 return 0;
101559 }
101560
101561-static const struct seq_operations dev_seq_ops = {
101562+const struct seq_operations dev_seq_ops = {
101563 .start = dev_seq_start,
101564 .next = dev_seq_next,
101565 .stop = dev_seq_stop,
101566@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101567
101568 static int softnet_seq_open(struct inode *inode, struct file *file)
101569 {
101570- return seq_open(file, &softnet_seq_ops);
101571+ return seq_open_restrict(file, &softnet_seq_ops);
101572 }
101573
101574 static const struct file_operations softnet_seq_fops = {
101575@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101576 else
101577 seq_printf(seq, "%04x", ntohs(pt->type));
101578
101579+#ifdef CONFIG_GRKERNSEC_HIDESYM
101580+ seq_printf(seq, " %-8s %pf\n",
101581+ pt->dev ? pt->dev->name : "", NULL);
101582+#else
101583 seq_printf(seq, " %-8s %pf\n",
101584 pt->dev ? pt->dev->name : "", pt->func);
101585+#endif
101586 }
101587
101588 return 0;
101589diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101590index 9993412..2a4672b 100644
101591--- a/net/core/net-sysfs.c
101592+++ b/net/core/net-sysfs.c
101593@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101594 {
101595 struct net_device *netdev = to_net_dev(dev);
101596 return sprintf(buf, fmt_dec,
101597- atomic_read(&netdev->carrier_changes));
101598+ atomic_read_unchecked(&netdev->carrier_changes));
101599 }
101600 static DEVICE_ATTR_RO(carrier_changes);
101601
101602diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101603index ce780c7..6d296b3 100644
101604--- a/net/core/net_namespace.c
101605+++ b/net/core/net_namespace.c
101606@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
101607 int error;
101608 LIST_HEAD(net_exit_list);
101609
101610- list_add_tail(&ops->list, list);
101611+ pax_list_add_tail((struct list_head *)&ops->list, list);
101612 if (ops->init || (ops->id && ops->size)) {
101613 for_each_net(net) {
101614 error = ops_init(ops, net);
101615@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
101616
101617 out_undo:
101618 /* If I have an error cleanup all namespaces I initialized */
101619- list_del(&ops->list);
101620+ pax_list_del((struct list_head *)&ops->list);
101621 ops_exit_list(ops, &net_exit_list);
101622 ops_free_list(ops, &net_exit_list);
101623 return error;
101624@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101625 struct net *net;
101626 LIST_HEAD(net_exit_list);
101627
101628- list_del(&ops->list);
101629+ pax_list_del((struct list_head *)&ops->list);
101630 for_each_net(net)
101631 list_add_tail(&net->exit_list, &net_exit_list);
101632 ops_exit_list(ops, &net_exit_list);
101633@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
101634 mutex_lock(&net_mutex);
101635 error = register_pernet_operations(&pernet_list, ops);
101636 if (!error && (first_device == &pernet_list))
101637- first_device = &ops->list;
101638+ first_device = (struct list_head *)&ops->list;
101639 mutex_unlock(&net_mutex);
101640 return error;
101641 }
101642diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101643index e0ad5d1..04fa7f7 100644
101644--- a/net/core/netpoll.c
101645+++ b/net/core/netpoll.c
101646@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101647 struct udphdr *udph;
101648 struct iphdr *iph;
101649 struct ethhdr *eth;
101650- static atomic_t ip_ident;
101651+ static atomic_unchecked_t ip_ident;
101652 struct ipv6hdr *ip6h;
101653
101654 udp_len = len + sizeof(*udph);
101655@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101656 put_unaligned(0x45, (unsigned char *)iph);
101657 iph->tos = 0;
101658 put_unaligned(htons(ip_len), &(iph->tot_len));
101659- iph->id = htons(atomic_inc_return(&ip_ident));
101660+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101661 iph->frag_off = 0;
101662 iph->ttl = 64;
101663 iph->protocol = IPPROTO_UDP;
101664diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101665index da934fc..d82fded 100644
101666--- a/net/core/pktgen.c
101667+++ b/net/core/pktgen.c
101668@@ -3752,7 +3752,7 @@ static int __net_init pg_net_init(struct net *net)
101669 pn->net = net;
101670 INIT_LIST_HEAD(&pn->pktgen_threads);
101671 pn->pktgen_exiting = false;
101672- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101673+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101674 if (!pn->proc_dir) {
101675 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101676 return -ENODEV;
101677diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101678index 446cbaf..255153c 100644
101679--- a/net/core/rtnetlink.c
101680+++ b/net/core/rtnetlink.c
101681@@ -60,7 +60,7 @@ struct rtnl_link {
101682 rtnl_doit_func doit;
101683 rtnl_dumpit_func dumpit;
101684 rtnl_calcit_func calcit;
101685-};
101686+} __no_const;
101687
101688 static DEFINE_MUTEX(rtnl_mutex);
101689
101690@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101691 * to use the ops for creating device. So do not
101692 * fill up dellink as well. That disables rtnl_dellink.
101693 */
101694- if (ops->setup && !ops->dellink)
101695- ops->dellink = unregister_netdevice_queue;
101696+ if (ops->setup && !ops->dellink) {
101697+ pax_open_kernel();
101698+ *(void **)&ops->dellink = unregister_netdevice_queue;
101699+ pax_close_kernel();
101700+ }
101701
101702- list_add_tail(&ops->list, &link_ops);
101703+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101704 return 0;
101705 }
101706 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101707@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101708 for_each_net(net) {
101709 __rtnl_kill_links(net, ops);
101710 }
101711- list_del(&ops->list);
101712+ pax_list_del((struct list_head *)&ops->list);
101713 }
101714 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101715
101716@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101717 (dev->ifalias &&
101718 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101719 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101720- atomic_read(&dev->carrier_changes)))
101721+ atomic_read_unchecked(&dev->carrier_changes)))
101722 goto nla_put_failure;
101723
101724 if (1) {
101725@@ -2102,6 +2105,10 @@ replay:
101726 if (IS_ERR(dest_net))
101727 return PTR_ERR(dest_net);
101728
101729+ err = -EPERM;
101730+ if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
101731+ goto out;
101732+
101733 dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
101734 if (IS_ERR(dev)) {
101735 err = PTR_ERR(dev);
101736diff --git a/net/core/scm.c b/net/core/scm.c
101737index 3b6899b..cf36238 100644
101738--- a/net/core/scm.c
101739+++ b/net/core/scm.c
101740@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
101741 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101742 {
101743 struct cmsghdr __user *cm
101744- = (__force struct cmsghdr __user *)msg->msg_control;
101745+ = (struct cmsghdr __force_user *)msg->msg_control;
101746 struct cmsghdr cmhdr;
101747 int cmlen = CMSG_LEN(len);
101748 int err;
101749@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101750 err = -EFAULT;
101751 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101752 goto out;
101753- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101754+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101755 goto out;
101756 cmlen = CMSG_SPACE(len);
101757 if (msg->msg_controllen < cmlen)
101758@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
101759 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101760 {
101761 struct cmsghdr __user *cm
101762- = (__force struct cmsghdr __user*)msg->msg_control;
101763+ = (struct cmsghdr __force_user *)msg->msg_control;
101764
101765 int fdmax = 0;
101766 int fdnum = scm->fp->count;
101767@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101768 if (fdnum < fdmax)
101769 fdmax = fdnum;
101770
101771- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101772+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101773 i++, cmfptr++)
101774 {
101775 struct socket *sock;
101776diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101777index 395c15b..7f39726 100644
101778--- a/net/core/skbuff.c
101779+++ b/net/core/skbuff.c
101780@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
101781 __wsum skb_checksum(const struct sk_buff *skb, int offset,
101782 int len, __wsum csum)
101783 {
101784- const struct skb_checksum_ops ops = {
101785+ static const struct skb_checksum_ops ops = {
101786 .update = csum_partial_ext,
101787 .combine = csum_block_add_ext,
101788 };
101789@@ -3363,12 +3363,14 @@ void __init skb_init(void)
101790 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
101791 sizeof(struct sk_buff),
101792 0,
101793- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101794+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101795+ SLAB_NO_SANITIZE,
101796 NULL);
101797 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
101798 sizeof(struct sk_buff_fclones),
101799 0,
101800- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101801+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101802+ SLAB_NO_SANITIZE,
101803 NULL);
101804 }
101805
101806diff --git a/net/core/sock.c b/net/core/sock.c
101807index 1c7a33d..a3817e2 100644
101808--- a/net/core/sock.c
101809+++ b/net/core/sock.c
101810@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101811 struct sk_buff_head *list = &sk->sk_receive_queue;
101812
101813 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
101814- atomic_inc(&sk->sk_drops);
101815+ atomic_inc_unchecked(&sk->sk_drops);
101816 trace_sock_rcvqueue_full(sk, skb);
101817 return -ENOMEM;
101818 }
101819@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101820 return err;
101821
101822 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
101823- atomic_inc(&sk->sk_drops);
101824+ atomic_inc_unchecked(&sk->sk_drops);
101825 return -ENOBUFS;
101826 }
101827
101828@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101829 skb_dst_force(skb);
101830
101831 spin_lock_irqsave(&list->lock, flags);
101832- skb->dropcount = atomic_read(&sk->sk_drops);
101833+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
101834 __skb_queue_tail(list, skb);
101835 spin_unlock_irqrestore(&list->lock, flags);
101836
101837@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101838 skb->dev = NULL;
101839
101840 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
101841- atomic_inc(&sk->sk_drops);
101842+ atomic_inc_unchecked(&sk->sk_drops);
101843 goto discard_and_relse;
101844 }
101845 if (nested)
101846@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101847 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
101848 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
101849 bh_unlock_sock(sk);
101850- atomic_inc(&sk->sk_drops);
101851+ atomic_inc_unchecked(&sk->sk_drops);
101852 goto discard_and_relse;
101853 }
101854
101855@@ -888,6 +888,7 @@ set_rcvbuf:
101856 }
101857 break;
101858
101859+#ifndef GRKERNSEC_BPF_HARDEN
101860 case SO_ATTACH_BPF:
101861 ret = -EINVAL;
101862 if (optlen == sizeof(u32)) {
101863@@ -900,7 +901,7 @@ set_rcvbuf:
101864 ret = sk_attach_bpf(ufd, sk);
101865 }
101866 break;
101867-
101868+#endif
101869 case SO_DETACH_FILTER:
101870 ret = sk_detach_filter(sk);
101871 break;
101872@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101873 struct timeval tm;
101874 } v;
101875
101876- int lv = sizeof(int);
101877- int len;
101878+ unsigned int lv = sizeof(int);
101879+ unsigned int len;
101880
101881 if (get_user(len, optlen))
101882 return -EFAULT;
101883- if (len < 0)
101884+ if (len > INT_MAX)
101885 return -EINVAL;
101886
101887 memset(&v, 0, sizeof(v));
101888@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101889
101890 case SO_PEERNAME:
101891 {
101892- char address[128];
101893+ char address[_K_SS_MAXSIZE];
101894
101895 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
101896 return -ENOTCONN;
101897- if (lv < len)
101898+ if (lv < len || sizeof address < len)
101899 return -EINVAL;
101900 if (copy_to_user(optval, address, len))
101901 return -EFAULT;
101902@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101903
101904 if (len > lv)
101905 len = lv;
101906- if (copy_to_user(optval, &v, len))
101907+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
101908 return -EFAULT;
101909 lenout:
101910 if (put_user(len, optlen))
101911@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
101912 */
101913 smp_wmb();
101914 atomic_set(&sk->sk_refcnt, 1);
101915- atomic_set(&sk->sk_drops, 0);
101916+ atomic_set_unchecked(&sk->sk_drops, 0);
101917 }
101918 EXPORT_SYMBOL(sock_init_data);
101919
101920@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
101921 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
101922 int level, int type)
101923 {
101924+ struct sock_extended_err ee;
101925 struct sock_exterr_skb *serr;
101926 struct sk_buff *skb;
101927 int copied, err;
101928@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
101929 sock_recv_timestamp(msg, sk, skb);
101930
101931 serr = SKB_EXT_ERR(skb);
101932- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
101933+ ee = serr->ee;
101934+ put_cmsg(msg, level, type, sizeof ee, &ee);
101935
101936 msg->msg_flags |= MSG_ERRQUEUE;
101937 err = copied;
101938diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
101939index ad704c7..ca48aff 100644
101940--- a/net/core/sock_diag.c
101941+++ b/net/core/sock_diag.c
101942@@ -9,26 +9,33 @@
101943 #include <linux/inet_diag.h>
101944 #include <linux/sock_diag.h>
101945
101946-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
101947+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
101948 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
101949 static DEFINE_MUTEX(sock_diag_table_mutex);
101950
101951 int sock_diag_check_cookie(void *sk, __u32 *cookie)
101952 {
101953+#ifndef CONFIG_GRKERNSEC_HIDESYM
101954 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
101955 cookie[1] != INET_DIAG_NOCOOKIE) &&
101956 ((u32)(unsigned long)sk != cookie[0] ||
101957 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
101958 return -ESTALE;
101959 else
101960+#endif
101961 return 0;
101962 }
101963 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
101964
101965 void sock_diag_save_cookie(void *sk, __u32 *cookie)
101966 {
101967+#ifdef CONFIG_GRKERNSEC_HIDESYM
101968+ cookie[0] = 0;
101969+ cookie[1] = 0;
101970+#else
101971 cookie[0] = (u32)(unsigned long)sk;
101972 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
101973+#endif
101974 }
101975 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
101976
101977@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
101978 mutex_lock(&sock_diag_table_mutex);
101979 if (sock_diag_handlers[hndl->family])
101980 err = -EBUSY;
101981- else
101982+ else {
101983+ pax_open_kernel();
101984 sock_diag_handlers[hndl->family] = hndl;
101985+ pax_close_kernel();
101986+ }
101987 mutex_unlock(&sock_diag_table_mutex);
101988
101989 return err;
101990@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
101991
101992 mutex_lock(&sock_diag_table_mutex);
101993 BUG_ON(sock_diag_handlers[family] != hnld);
101994+ pax_open_kernel();
101995 sock_diag_handlers[family] = NULL;
101996+ pax_close_kernel();
101997 mutex_unlock(&sock_diag_table_mutex);
101998 }
101999 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102000diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102001index 31baba2..c71485b 100644
102002--- a/net/core/sysctl_net_core.c
102003+++ b/net/core/sysctl_net_core.c
102004@@ -34,7 +34,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102005 {
102006 unsigned int orig_size, size;
102007 int ret, i;
102008- struct ctl_table tmp = {
102009+ ctl_table_no_const tmp = {
102010 .data = &size,
102011 .maxlen = sizeof(size),
102012 .mode = table->mode
102013@@ -202,7 +202,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102014 void __user *buffer, size_t *lenp, loff_t *ppos)
102015 {
102016 char id[IFNAMSIZ];
102017- struct ctl_table tbl = {
102018+ ctl_table_no_const tbl = {
102019 .data = id,
102020 .maxlen = IFNAMSIZ,
102021 };
102022@@ -220,7 +220,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102023 static int proc_do_rss_key(struct ctl_table *table, int write,
102024 void __user *buffer, size_t *lenp, loff_t *ppos)
102025 {
102026- struct ctl_table fake_table;
102027+ ctl_table_no_const fake_table;
102028 char buf[NETDEV_RSS_KEY_LEN * 3];
102029
102030 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102031@@ -284,7 +284,7 @@ static struct ctl_table net_core_table[] = {
102032 .mode = 0444,
102033 .proc_handler = proc_do_rss_key,
102034 },
102035-#ifdef CONFIG_BPF_JIT
102036+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102037 {
102038 .procname = "bpf_jit_enable",
102039 .data = &bpf_jit_enable,
102040@@ -400,13 +400,12 @@ static struct ctl_table netns_core_table[] = {
102041
102042 static __net_init int sysctl_core_net_init(struct net *net)
102043 {
102044- struct ctl_table *tbl;
102045+ ctl_table_no_const *tbl = NULL;
102046
102047 net->core.sysctl_somaxconn = SOMAXCONN;
102048
102049- tbl = netns_core_table;
102050 if (!net_eq(net, &init_net)) {
102051- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102052+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102053 if (tbl == NULL)
102054 goto err_dup;
102055
102056@@ -416,17 +415,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102057 if (net->user_ns != &init_user_ns) {
102058 tbl[0].procname = NULL;
102059 }
102060- }
102061-
102062- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102063+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102064+ } else
102065+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102066 if (net->core.sysctl_hdr == NULL)
102067 goto err_reg;
102068
102069 return 0;
102070
102071 err_reg:
102072- if (tbl != netns_core_table)
102073- kfree(tbl);
102074+ kfree(tbl);
102075 err_dup:
102076 return -ENOMEM;
102077 }
102078@@ -441,7 +439,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102079 kfree(tbl);
102080 }
102081
102082-static __net_initdata struct pernet_operations sysctl_core_ops = {
102083+static __net_initconst struct pernet_operations sysctl_core_ops = {
102084 .init = sysctl_core_net_init,
102085 .exit = sysctl_core_net_exit,
102086 };
102087diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102088index 8102286..a0c2755 100644
102089--- a/net/decnet/af_decnet.c
102090+++ b/net/decnet/af_decnet.c
102091@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102092 .sysctl_rmem = sysctl_decnet_rmem,
102093 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102094 .obj_size = sizeof(struct dn_sock),
102095+ .slab_flags = SLAB_USERCOPY,
102096 };
102097
102098 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102099diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102100index 4400da7..3429972 100644
102101--- a/net/decnet/dn_dev.c
102102+++ b/net/decnet/dn_dev.c
102103@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102104 .extra1 = &min_t3,
102105 .extra2 = &max_t3
102106 },
102107- {0}
102108+ { }
102109 },
102110 };
102111
102112diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102113index 5325b54..a0d4d69 100644
102114--- a/net/decnet/sysctl_net_decnet.c
102115+++ b/net/decnet/sysctl_net_decnet.c
102116@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102117
102118 if (len > *lenp) len = *lenp;
102119
102120- if (copy_to_user(buffer, addr, len))
102121+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102122 return -EFAULT;
102123
102124 *lenp = len;
102125@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102126
102127 if (len > *lenp) len = *lenp;
102128
102129- if (copy_to_user(buffer, devname, len))
102130+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102131 return -EFAULT;
102132
102133 *lenp = len;
102134diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102135index a2c7e4c..3dc9f67 100644
102136--- a/net/hsr/hsr_netlink.c
102137+++ b/net/hsr/hsr_netlink.c
102138@@ -102,7 +102,7 @@ nla_put_failure:
102139 return -EMSGSIZE;
102140 }
102141
102142-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102143+static struct rtnl_link_ops hsr_link_ops = {
102144 .kind = "hsr",
102145 .maxtype = IFLA_HSR_MAX,
102146 .policy = hsr_policy,
102147diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
102148index 27eaa65..7083217 100644
102149--- a/net/ieee802154/6lowpan_rtnl.c
102150+++ b/net/ieee802154/6lowpan_rtnl.c
102151@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102152 dev_put(real_dev);
102153 }
102154
102155-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102156+static struct rtnl_link_ops lowpan_link_ops = {
102157 .kind = "lowpan",
102158 .priv_size = sizeof(struct lowpan_dev_info),
102159 .setup = lowpan_setup,
102160diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102161index 9d980ed..7d01e12 100644
102162--- a/net/ieee802154/reassembly.c
102163+++ b/net/ieee802154/reassembly.c
102164@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102165
102166 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102167 {
102168- struct ctl_table *table;
102169+ ctl_table_no_const *table = NULL;
102170 struct ctl_table_header *hdr;
102171 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102172 net_ieee802154_lowpan(net);
102173
102174- table = lowpan_frags_ns_ctl_table;
102175 if (!net_eq(net, &init_net)) {
102176- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102177+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102178 GFP_KERNEL);
102179 if (table == NULL)
102180 goto err_alloc;
102181@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102182 /* Don't export sysctls to unprivileged users */
102183 if (net->user_ns != &init_user_ns)
102184 table[0].procname = NULL;
102185- }
102186-
102187- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102188+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102189+ } else
102190+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102191 if (hdr == NULL)
102192 goto err_reg;
102193
102194@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102195 return 0;
102196
102197 err_reg:
102198- if (!net_eq(net, &init_net))
102199- kfree(table);
102200+ kfree(table);
102201 err_alloc:
102202 return -ENOMEM;
102203 }
102204diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102205index a44773c..a6ae415 100644
102206--- a/net/ipv4/af_inet.c
102207+++ b/net/ipv4/af_inet.c
102208@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102209 return ip_recv_error(sk, msg, len, addr_len);
102210 #if IS_ENABLED(CONFIG_IPV6)
102211 if (sk->sk_family == AF_INET6)
102212- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102213+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102214 #endif
102215 return -EINVAL;
102216 }
102217diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102218index 214882e..ec032f6 100644
102219--- a/net/ipv4/devinet.c
102220+++ b/net/ipv4/devinet.c
102221@@ -69,7 +69,8 @@
102222
102223 static struct ipv4_devconf ipv4_devconf = {
102224 .data = {
102225- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102226+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102227+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102228 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102229 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102230 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102231@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102232
102233 static struct ipv4_devconf ipv4_devconf_dflt = {
102234 .data = {
102235- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102236+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102237+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102238 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102239 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102240 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102241@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102242 idx = 0;
102243 head = &net->dev_index_head[h];
102244 rcu_read_lock();
102245- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102246+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102247 net->dev_base_seq;
102248 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102249 if (idx < s_idx)
102250@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102251 idx = 0;
102252 head = &net->dev_index_head[h];
102253 rcu_read_lock();
102254- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102255+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102256 net->dev_base_seq;
102257 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102258 if (idx < s_idx)
102259@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102260 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102261 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102262
102263-static struct devinet_sysctl_table {
102264+static const struct devinet_sysctl_table {
102265 struct ctl_table_header *sysctl_header;
102266 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102267 } devinet_sysctl = {
102268@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102269 int err;
102270 struct ipv4_devconf *all, *dflt;
102271 #ifdef CONFIG_SYSCTL
102272- struct ctl_table *tbl = ctl_forward_entry;
102273+ ctl_table_no_const *tbl = NULL;
102274 struct ctl_table_header *forw_hdr;
102275 #endif
102276
102277@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102278 goto err_alloc_dflt;
102279
102280 #ifdef CONFIG_SYSCTL
102281- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102282+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102283 if (tbl == NULL)
102284 goto err_alloc_ctl;
102285
102286@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102287 goto err_reg_dflt;
102288
102289 err = -ENOMEM;
102290- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102291+ if (!net_eq(net, &init_net))
102292+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102293+ else
102294+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102295 if (forw_hdr == NULL)
102296 goto err_reg_ctl;
102297 net->ipv4.forw_hdr = forw_hdr;
102298@@ -2287,8 +2292,7 @@ err_reg_ctl:
102299 err_reg_dflt:
102300 __devinet_sysctl_unregister(all);
102301 err_reg_all:
102302- if (tbl != ctl_forward_entry)
102303- kfree(tbl);
102304+ kfree(tbl);
102305 err_alloc_ctl:
102306 #endif
102307 if (dflt != &ipv4_devconf_dflt)
102308diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102309index 23104a3..9f5570b 100644
102310--- a/net/ipv4/fib_frontend.c
102311+++ b/net/ipv4/fib_frontend.c
102312@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102313 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102314 fib_sync_up(dev);
102315 #endif
102316- atomic_inc(&net->ipv4.dev_addr_genid);
102317+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102318 rt_cache_flush(dev_net(dev));
102319 break;
102320 case NETDEV_DOWN:
102321 fib_del_ifaddr(ifa, NULL);
102322- atomic_inc(&net->ipv4.dev_addr_genid);
102323+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102324 if (ifa->ifa_dev->ifa_list == NULL) {
102325 /* Last address was deleted from this interface.
102326 * Disable IP.
102327@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102328 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102329 fib_sync_up(dev);
102330 #endif
102331- atomic_inc(&net->ipv4.dev_addr_genid);
102332+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102333 rt_cache_flush(net);
102334 break;
102335 case NETDEV_DOWN:
102336diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102337index f99f41b..1879da9 100644
102338--- a/net/ipv4/fib_semantics.c
102339+++ b/net/ipv4/fib_semantics.c
102340@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102341 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102342 nh->nh_gw,
102343 nh->nh_parent->fib_scope);
102344- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102345+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102346
102347 return nh->nh_saddr;
102348 }
102349diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102350index 9111a4e..3576905 100644
102351--- a/net/ipv4/inet_hashtables.c
102352+++ b/net/ipv4/inet_hashtables.c
102353@@ -18,6 +18,7 @@
102354 #include <linux/sched.h>
102355 #include <linux/slab.h>
102356 #include <linux/wait.h>
102357+#include <linux/security.h>
102358
102359 #include <net/inet_connection_sock.h>
102360 #include <net/inet_hashtables.h>
102361@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102362 return inet_ehashfn(net, laddr, lport, faddr, fport);
102363 }
102364
102365+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102366+
102367 /*
102368 * Allocate and initialize a new local port bind bucket.
102369 * The bindhash mutex for snum's hash chain must be held here.
102370@@ -554,6 +557,8 @@ ok:
102371 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102372 spin_unlock(&head->lock);
102373
102374+ gr_update_task_in_ip_table(inet_sk(sk));
102375+
102376 if (tw) {
102377 inet_twsk_deschedule(tw, death_row);
102378 while (twrefcnt) {
102379diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102380index 241afd7..31b95d5 100644
102381--- a/net/ipv4/inetpeer.c
102382+++ b/net/ipv4/inetpeer.c
102383@@ -461,7 +461,7 @@ relookup:
102384 if (p) {
102385 p->daddr = *daddr;
102386 atomic_set(&p->refcnt, 1);
102387- atomic_set(&p->rid, 0);
102388+ atomic_set_unchecked(&p->rid, 0);
102389 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102390 p->rate_tokens = 0;
102391 /* 60*HZ is arbitrary, but chosen enough high so that the first
102392diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102393index e5b6d0d..187c8b0 100644
102394--- a/net/ipv4/ip_fragment.c
102395+++ b/net/ipv4/ip_fragment.c
102396@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
102397 return 0;
102398
102399 start = qp->rid;
102400- end = atomic_inc_return(&peer->rid);
102401+ end = atomic_inc_return_unchecked(&peer->rid);
102402 qp->rid = end;
102403
102404 rc = qp->q.fragments && (end - start) > max;
102405@@ -745,12 +745,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102406
102407 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102408 {
102409- struct ctl_table *table;
102410+ ctl_table_no_const *table = NULL;
102411 struct ctl_table_header *hdr;
102412
102413- table = ip4_frags_ns_ctl_table;
102414 if (!net_eq(net, &init_net)) {
102415- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102416+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102417 if (table == NULL)
102418 goto err_alloc;
102419
102420@@ -764,9 +763,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102421 /* Don't export sysctls to unprivileged users */
102422 if (net->user_ns != &init_user_ns)
102423 table[0].procname = NULL;
102424- }
102425+ hdr = register_net_sysctl(net, "net/ipv4", table);
102426+ } else
102427+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102428
102429- hdr = register_net_sysctl(net, "net/ipv4", table);
102430 if (hdr == NULL)
102431 goto err_reg;
102432
102433@@ -774,8 +774,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102434 return 0;
102435
102436 err_reg:
102437- if (!net_eq(net, &init_net))
102438- kfree(table);
102439+ kfree(table);
102440 err_alloc:
102441 return -ENOMEM;
102442 }
102443diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102444index 4f4bf5b..2c936fe 100644
102445--- a/net/ipv4/ip_gre.c
102446+++ b/net/ipv4/ip_gre.c
102447@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102448 module_param(log_ecn_error, bool, 0644);
102449 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102450
102451-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102452+static struct rtnl_link_ops ipgre_link_ops;
102453 static int ipgre_tunnel_init(struct net_device *dev);
102454
102455 static int ipgre_net_id __read_mostly;
102456@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102457 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
102458 };
102459
102460-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102461+static struct rtnl_link_ops ipgre_link_ops = {
102462 .kind = "gre",
102463 .maxtype = IFLA_GRE_MAX,
102464 .policy = ipgre_policy,
102465@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102466 .fill_info = ipgre_fill_info,
102467 };
102468
102469-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102470+static struct rtnl_link_ops ipgre_tap_ops = {
102471 .kind = "gretap",
102472 .maxtype = IFLA_GRE_MAX,
102473 .policy = ipgre_policy,
102474diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102475index 3d4da2c..40f9c29 100644
102476--- a/net/ipv4/ip_input.c
102477+++ b/net/ipv4/ip_input.c
102478@@ -147,6 +147,10 @@
102479 #include <linux/mroute.h>
102480 #include <linux/netlink.h>
102481
102482+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102483+extern int grsec_enable_blackhole;
102484+#endif
102485+
102486 /*
102487 * Process Router Attention IP option (RFC 2113)
102488 */
102489@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102490 if (!raw) {
102491 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102492 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102493+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102494+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102495+#endif
102496 icmp_send(skb, ICMP_DEST_UNREACH,
102497 ICMP_PROT_UNREACH, 0);
102498 }
102499diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102500index 6b85adb..cd7e5d3 100644
102501--- a/net/ipv4/ip_sockglue.c
102502+++ b/net/ipv4/ip_sockglue.c
102503@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102504 len = min_t(unsigned int, len, opt->optlen);
102505 if (put_user(len, optlen))
102506 return -EFAULT;
102507- if (copy_to_user(optval, opt->__data, len))
102508+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102509+ copy_to_user(optval, opt->__data, len))
102510 return -EFAULT;
102511 return 0;
102512 }
102513@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102514 if (sk->sk_type != SOCK_STREAM)
102515 return -ENOPROTOOPT;
102516
102517- msg.msg_control = (__force void *) optval;
102518+ msg.msg_control = (__force_kernel void *) optval;
102519 msg.msg_controllen = len;
102520 msg.msg_flags = flags;
102521
102522diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102523index 1a7e979..fd05aa4 100644
102524--- a/net/ipv4/ip_vti.c
102525+++ b/net/ipv4/ip_vti.c
102526@@ -45,7 +45,7 @@
102527 #include <net/net_namespace.h>
102528 #include <net/netns/generic.h>
102529
102530-static struct rtnl_link_ops vti_link_ops __read_mostly;
102531+static struct rtnl_link_ops vti_link_ops;
102532
102533 static int vti_net_id __read_mostly;
102534 static int vti_tunnel_init(struct net_device *dev);
102535@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102536 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102537 };
102538
102539-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102540+static struct rtnl_link_ops vti_link_ops = {
102541 .kind = "vti",
102542 .maxtype = IFLA_VTI_MAX,
102543 .policy = vti_policy,
102544diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102545index 7fa18bc..bea16af 100644
102546--- a/net/ipv4/ipconfig.c
102547+++ b/net/ipv4/ipconfig.c
102548@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102549
102550 mm_segment_t oldfs = get_fs();
102551 set_fs(get_ds());
102552- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102553+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102554 set_fs(oldfs);
102555 return res;
102556 }
102557@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102558
102559 mm_segment_t oldfs = get_fs();
102560 set_fs(get_ds());
102561- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102562+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102563 set_fs(oldfs);
102564 return res;
102565 }
102566@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102567
102568 mm_segment_t oldfs = get_fs();
102569 set_fs(get_ds());
102570- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102571+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102572 set_fs(oldfs);
102573 return res;
102574 }
102575diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102576index 40403114..c35c647 100644
102577--- a/net/ipv4/ipip.c
102578+++ b/net/ipv4/ipip.c
102579@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102580 static int ipip_net_id __read_mostly;
102581
102582 static int ipip_tunnel_init(struct net_device *dev);
102583-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102584+static struct rtnl_link_ops ipip_link_ops;
102585
102586 static int ipip_err(struct sk_buff *skb, u32 info)
102587 {
102588@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102589 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
102590 };
102591
102592-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102593+static struct rtnl_link_ops ipip_link_ops = {
102594 .kind = "ipip",
102595 .maxtype = IFLA_IPTUN_MAX,
102596 .policy = ipip_policy,
102597diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102598index f95b6f9..2ee2097 100644
102599--- a/net/ipv4/netfilter/arp_tables.c
102600+++ b/net/ipv4/netfilter/arp_tables.c
102601@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102602 #endif
102603
102604 static int get_info(struct net *net, void __user *user,
102605- const int *len, int compat)
102606+ int len, int compat)
102607 {
102608 char name[XT_TABLE_MAXNAMELEN];
102609 struct xt_table *t;
102610 int ret;
102611
102612- if (*len != sizeof(struct arpt_getinfo)) {
102613- duprintf("length %u != %Zu\n", *len,
102614+ if (len != sizeof(struct arpt_getinfo)) {
102615+ duprintf("length %u != %Zu\n", len,
102616 sizeof(struct arpt_getinfo));
102617 return -EINVAL;
102618 }
102619@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102620 info.size = private->size;
102621 strcpy(info.name, name);
102622
102623- if (copy_to_user(user, &info, *len) != 0)
102624+ if (copy_to_user(user, &info, len) != 0)
102625 ret = -EFAULT;
102626 else
102627 ret = 0;
102628@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102629
102630 switch (cmd) {
102631 case ARPT_SO_GET_INFO:
102632- ret = get_info(sock_net(sk), user, len, 1);
102633+ ret = get_info(sock_net(sk), user, *len, 1);
102634 break;
102635 case ARPT_SO_GET_ENTRIES:
102636 ret = compat_get_entries(sock_net(sk), user, len);
102637@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102638
102639 switch (cmd) {
102640 case ARPT_SO_GET_INFO:
102641- ret = get_info(sock_net(sk), user, len, 0);
102642+ ret = get_info(sock_net(sk), user, *len, 0);
102643 break;
102644
102645 case ARPT_SO_GET_ENTRIES:
102646diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102647index 99e810f..3711b81 100644
102648--- a/net/ipv4/netfilter/ip_tables.c
102649+++ b/net/ipv4/netfilter/ip_tables.c
102650@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102651 #endif
102652
102653 static int get_info(struct net *net, void __user *user,
102654- const int *len, int compat)
102655+ int len, int compat)
102656 {
102657 char name[XT_TABLE_MAXNAMELEN];
102658 struct xt_table *t;
102659 int ret;
102660
102661- if (*len != sizeof(struct ipt_getinfo)) {
102662- duprintf("length %u != %zu\n", *len,
102663+ if (len != sizeof(struct ipt_getinfo)) {
102664+ duprintf("length %u != %zu\n", len,
102665 sizeof(struct ipt_getinfo));
102666 return -EINVAL;
102667 }
102668@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102669 info.size = private->size;
102670 strcpy(info.name, name);
102671
102672- if (copy_to_user(user, &info, *len) != 0)
102673+ if (copy_to_user(user, &info, len) != 0)
102674 ret = -EFAULT;
102675 else
102676 ret = 0;
102677@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102678
102679 switch (cmd) {
102680 case IPT_SO_GET_INFO:
102681- ret = get_info(sock_net(sk), user, len, 1);
102682+ ret = get_info(sock_net(sk), user, *len, 1);
102683 break;
102684 case IPT_SO_GET_ENTRIES:
102685 ret = compat_get_entries(sock_net(sk), user, len);
102686@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102687
102688 switch (cmd) {
102689 case IPT_SO_GET_INFO:
102690- ret = get_info(sock_net(sk), user, len, 0);
102691+ ret = get_info(sock_net(sk), user, *len, 0);
102692 break;
102693
102694 case IPT_SO_GET_ENTRIES:
102695diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102696index e90f83a..3e6acca 100644
102697--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102698+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102699@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102700 spin_lock_init(&cn->lock);
102701
102702 #ifdef CONFIG_PROC_FS
102703- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102704+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102705 if (!cn->procdir) {
102706 pr_err("Unable to proc dir entry\n");
102707 return -ENOMEM;
102708diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102709index 2a3720f..d32b565 100644
102710--- a/net/ipv4/ping.c
102711+++ b/net/ipv4/ping.c
102712@@ -59,7 +59,7 @@ struct ping_table {
102713 };
102714
102715 static struct ping_table ping_table;
102716-struct pingv6_ops pingv6_ops;
102717+struct pingv6_ops *pingv6_ops;
102718 EXPORT_SYMBOL_GPL(pingv6_ops);
102719
102720 static u16 ping_port_rover;
102721@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
102722 kgid_t low, high;
102723 int ret = 0;
102724
102725+ if (sk->sk_family == AF_INET6)
102726+ sk->sk_ipv6only = 1;
102727+
102728 inet_get_ping_group_range_net(net, &low, &high);
102729 if (gid_lte(low, group) && gid_lte(group, high))
102730 return 0;
102731@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102732 if (addr_len < sizeof(*addr))
102733 return -EINVAL;
102734
102735+ if (addr->sin_family != AF_INET &&
102736+ !(addr->sin_family == AF_UNSPEC &&
102737+ addr->sin_addr.s_addr == htonl(INADDR_ANY)))
102738+ return -EAFNOSUPPORT;
102739+
102740 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
102741 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
102742
102743@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102744 return -EINVAL;
102745
102746 if (addr->sin6_family != AF_INET6)
102747- return -EINVAL;
102748+ return -EAFNOSUPPORT;
102749
102750 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
102751 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
102752@@ -350,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102753 return -ENODEV;
102754 }
102755 }
102756- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102757+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102758 scoped);
102759 rcu_read_unlock();
102760
102761@@ -558,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102762 }
102763 #if IS_ENABLED(CONFIG_IPV6)
102764 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102765- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102766+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102767 #endif
102768 }
102769
102770@@ -576,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102771 info, (u8 *)icmph);
102772 #if IS_ENABLED(CONFIG_IPV6)
102773 } else if (family == AF_INET6) {
102774- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102775+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102776 info, (u8 *)icmph);
102777 #endif
102778 }
102779@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
102780 if (msg->msg_namelen < sizeof(*usin))
102781 return -EINVAL;
102782 if (usin->sin_family != AF_INET)
102783- return -EINVAL;
102784+ return -EAFNOSUPPORT;
102785 daddr = usin->sin_addr.s_addr;
102786 /* no remote port */
102787 } else {
102788@@ -911,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102789 }
102790
102791 if (inet6_sk(sk)->rxopt.all)
102792- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102793+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102794 if (skb->protocol == htons(ETH_P_IPV6) &&
102795 inet6_sk(sk)->rxopt.all)
102796- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102797+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102798 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102799 ip_cmsg_recv(msg, skb);
102800 #endif
102801@@ -1109,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
102802 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
102803 0, sock_i_ino(sp),
102804 atomic_read(&sp->sk_refcnt), sp,
102805- atomic_read(&sp->sk_drops));
102806+ atomic_read_unchecked(&sp->sk_drops));
102807 }
102808
102809 static int ping_v4_seq_show(struct seq_file *seq, void *v)
102810diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
102811index 0bb68df..59405fc 100644
102812--- a/net/ipv4/raw.c
102813+++ b/net/ipv4/raw.c
102814@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
102815 int raw_rcv(struct sock *sk, struct sk_buff *skb)
102816 {
102817 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
102818- atomic_inc(&sk->sk_drops);
102819+ atomic_inc_unchecked(&sk->sk_drops);
102820 kfree_skb(skb);
102821 return NET_RX_DROP;
102822 }
102823@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
102824
102825 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
102826 {
102827+ struct icmp_filter filter;
102828+
102829 if (optlen > sizeof(struct icmp_filter))
102830 optlen = sizeof(struct icmp_filter);
102831- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
102832+ if (copy_from_user(&filter, optval, optlen))
102833 return -EFAULT;
102834+ raw_sk(sk)->filter = filter;
102835 return 0;
102836 }
102837
102838 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
102839 {
102840 int len, ret = -EFAULT;
102841+ struct icmp_filter filter;
102842
102843 if (get_user(len, optlen))
102844 goto out;
102845@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
102846 if (len > sizeof(struct icmp_filter))
102847 len = sizeof(struct icmp_filter);
102848 ret = -EFAULT;
102849- if (put_user(len, optlen) ||
102850- copy_to_user(optval, &raw_sk(sk)->filter, len))
102851+ filter = raw_sk(sk)->filter;
102852+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
102853 goto out;
102854 ret = 0;
102855 out: return ret;
102856@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
102857 0, 0L, 0,
102858 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
102859 0, sock_i_ino(sp),
102860- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
102861+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
102862 }
102863
102864 static int raw_seq_show(struct seq_file *seq, void *v)
102865diff --git a/net/ipv4/route.c b/net/ipv4/route.c
102866index 52e1f2b..e736cb4 100644
102867--- a/net/ipv4/route.c
102868+++ b/net/ipv4/route.c
102869@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
102870
102871 static int rt_cache_seq_open(struct inode *inode, struct file *file)
102872 {
102873- return seq_open(file, &rt_cache_seq_ops);
102874+ return seq_open_restrict(file, &rt_cache_seq_ops);
102875 }
102876
102877 static const struct file_operations rt_cache_seq_fops = {
102878@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
102879
102880 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
102881 {
102882- return seq_open(file, &rt_cpu_seq_ops);
102883+ return seq_open_restrict(file, &rt_cpu_seq_ops);
102884 }
102885
102886 static const struct file_operations rt_cpu_seq_fops = {
102887@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
102888
102889 static int rt_acct_proc_open(struct inode *inode, struct file *file)
102890 {
102891- return single_open(file, rt_acct_proc_show, NULL);
102892+ return single_open_restrict(file, rt_acct_proc_show, NULL);
102893 }
102894
102895 static const struct file_operations rt_acct_proc_fops = {
102896@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
102897
102898 #define IP_IDENTS_SZ 2048u
102899 struct ip_ident_bucket {
102900- atomic_t id;
102901+ atomic_unchecked_t id;
102902 u32 stamp32;
102903 };
102904
102905-static struct ip_ident_bucket *ip_idents __read_mostly;
102906+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
102907
102908 /* In order to protect privacy, we add a perturbation to identifiers
102909 * if one generator is seldom used. This makes hard for an attacker
102910@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
102911 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
102912 delta = prandom_u32_max(now - old);
102913
102914- return atomic_add_return(segs + delta, &bucket->id) - segs;
102915+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
102916 }
102917 EXPORT_SYMBOL(ip_idents_reserve);
102918
102919@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
102920 .maxlen = sizeof(int),
102921 .mode = 0200,
102922 .proc_handler = ipv4_sysctl_rtcache_flush,
102923+ .extra1 = &init_net,
102924 },
102925 { },
102926 };
102927
102928 static __net_init int sysctl_route_net_init(struct net *net)
102929 {
102930- struct ctl_table *tbl;
102931+ ctl_table_no_const *tbl = NULL;
102932
102933- tbl = ipv4_route_flush_table;
102934 if (!net_eq(net, &init_net)) {
102935- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
102936+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
102937 if (tbl == NULL)
102938 goto err_dup;
102939
102940 /* Don't export sysctls to unprivileged users */
102941 if (net->user_ns != &init_user_ns)
102942 tbl[0].procname = NULL;
102943- }
102944- tbl[0].extra1 = net;
102945+ tbl[0].extra1 = net;
102946+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
102947+ } else
102948+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
102949
102950- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
102951 if (net->ipv4.route_hdr == NULL)
102952 goto err_reg;
102953 return 0;
102954
102955 err_reg:
102956- if (tbl != ipv4_route_flush_table)
102957- kfree(tbl);
102958+ kfree(tbl);
102959 err_dup:
102960 return -ENOMEM;
102961 }
102962@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
102963
102964 static __net_init int rt_genid_init(struct net *net)
102965 {
102966- atomic_set(&net->ipv4.rt_genid, 0);
102967- atomic_set(&net->fnhe_genid, 0);
102968+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
102969+ atomic_set_unchecked(&net->fnhe_genid, 0);
102970 get_random_bytes(&net->ipv4.dev_addr_genid,
102971 sizeof(net->ipv4.dev_addr_genid));
102972 return 0;
102973@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
102974 {
102975 int rc = 0;
102976
102977- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
102978- if (!ip_idents)
102979- panic("IP: failed to allocate ip_idents\n");
102980-
102981- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
102982+ prandom_bytes(ip_idents, sizeof(ip_idents));
102983
102984 #ifdef CONFIG_IP_ROUTE_CLASSID
102985 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
102986diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
102987index e0ee384..e2688d9 100644
102988--- a/net/ipv4/sysctl_net_ipv4.c
102989+++ b/net/ipv4/sysctl_net_ipv4.c
102990@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
102991 container_of(table->data, struct net, ipv4.ip_local_ports.range);
102992 int ret;
102993 int range[2];
102994- struct ctl_table tmp = {
102995+ ctl_table_no_const tmp = {
102996 .data = &range,
102997 .maxlen = sizeof(range),
102998 .mode = table->mode,
102999@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103000 int ret;
103001 gid_t urange[2];
103002 kgid_t low, high;
103003- struct ctl_table tmp = {
103004+ ctl_table_no_const tmp = {
103005 .data = &urange,
103006 .maxlen = sizeof(urange),
103007 .mode = table->mode,
103008@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103009 void __user *buffer, size_t *lenp, loff_t *ppos)
103010 {
103011 char val[TCP_CA_NAME_MAX];
103012- struct ctl_table tbl = {
103013+ ctl_table_no_const tbl = {
103014 .data = val,
103015 .maxlen = TCP_CA_NAME_MAX,
103016 };
103017@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103018 void __user *buffer, size_t *lenp,
103019 loff_t *ppos)
103020 {
103021- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103022+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103023 int ret;
103024
103025 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103026@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103027 void __user *buffer, size_t *lenp,
103028 loff_t *ppos)
103029 {
103030- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103031+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103032 int ret;
103033
103034 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103035@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103036 void __user *buffer, size_t *lenp,
103037 loff_t *ppos)
103038 {
103039- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103040+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103041 struct tcp_fastopen_context *ctxt;
103042 int ret;
103043 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103044@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
103045
103046 static __net_init int ipv4_sysctl_init_net(struct net *net)
103047 {
103048- struct ctl_table *table;
103049+ ctl_table_no_const *table = NULL;
103050
103051- table = ipv4_net_table;
103052 if (!net_eq(net, &init_net)) {
103053 int i;
103054
103055- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103056+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103057 if (table == NULL)
103058 goto err_alloc;
103059
103060@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103061 table[i].data += (void *)net - (void *)&init_net;
103062 }
103063
103064- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103065+ if (!net_eq(net, &init_net))
103066+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103067+ else
103068+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103069 if (net->ipv4.ipv4_hdr == NULL)
103070 goto err_reg;
103071
103072diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103073index 075ab4d..623bb9d 100644
103074--- a/net/ipv4/tcp_input.c
103075+++ b/net/ipv4/tcp_input.c
103076@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103077 * without any lock. We want to make sure compiler wont store
103078 * intermediate values in this location.
103079 */
103080- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103081+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103082 sk->sk_max_pacing_rate);
103083 }
103084
103085@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103086 * simplifies code)
103087 */
103088 static void
103089-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103090+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103091 struct sk_buff *head, struct sk_buff *tail,
103092 u32 start, u32 end)
103093 {
103094@@ -5506,6 +5506,7 @@ discard:
103095 tcp_paws_reject(&tp->rx_opt, 0))
103096 goto discard_and_undo;
103097
103098+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103099 if (th->syn) {
103100 /* We see SYN without ACK. It is attempt of
103101 * simultaneous connect with crossed SYNs.
103102@@ -5556,6 +5557,7 @@ discard:
103103 goto discard;
103104 #endif
103105 }
103106+#endif
103107 /* "fifth, if neither of the SYN or RST bits is set then
103108 * drop the segment and return."
103109 */
103110@@ -5602,7 +5604,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103111 goto discard;
103112
103113 if (th->syn) {
103114- if (th->fin)
103115+ if (th->fin || th->urg || th->psh)
103116 goto discard;
103117 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103118 return 1;
103119diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103120index d22f544..62f6787 100644
103121--- a/net/ipv4/tcp_ipv4.c
103122+++ b/net/ipv4/tcp_ipv4.c
103123@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103124 int sysctl_tcp_low_latency __read_mostly;
103125 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103126
103127+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103128+extern int grsec_enable_blackhole;
103129+#endif
103130+
103131 #ifdef CONFIG_TCP_MD5SIG
103132 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103133 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103134@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103135 return 0;
103136
103137 reset:
103138+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103139+ if (!grsec_enable_blackhole)
103140+#endif
103141 tcp_v4_send_reset(rsk, skb);
103142 discard:
103143 kfree_skb(skb);
103144@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103145 TCP_SKB_CB(skb)->sacked = 0;
103146
103147 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103148- if (!sk)
103149+ if (!sk) {
103150+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103151+ ret = 1;
103152+#endif
103153 goto no_tcp_socket;
103154-
103155+ }
103156 process:
103157- if (sk->sk_state == TCP_TIME_WAIT)
103158+ if (sk->sk_state == TCP_TIME_WAIT) {
103159+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103160+ ret = 2;
103161+#endif
103162 goto do_time_wait;
103163+ }
103164
103165 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103166 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103167@@ -1698,6 +1712,10 @@ csum_error:
103168 bad_packet:
103169 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103170 } else {
103171+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103172+ if (!grsec_enable_blackhole || (ret == 1 &&
103173+ (skb->dev->flags & IFF_LOOPBACK)))
103174+#endif
103175 tcp_v4_send_reset(NULL, skb);
103176 }
103177
103178diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103179index 63d2680..2db9d6b 100644
103180--- a/net/ipv4/tcp_minisocks.c
103181+++ b/net/ipv4/tcp_minisocks.c
103182@@ -27,6 +27,10 @@
103183 #include <net/inet_common.h>
103184 #include <net/xfrm.h>
103185
103186+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103187+extern int grsec_enable_blackhole;
103188+#endif
103189+
103190 int sysctl_tcp_syncookies __read_mostly = 1;
103191 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103192
103193@@ -739,7 +743,10 @@ embryonic_reset:
103194 * avoid becoming vulnerable to outside attack aiming at
103195 * resetting legit local connections.
103196 */
103197- req->rsk_ops->send_reset(sk, skb);
103198+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103199+ if (!grsec_enable_blackhole)
103200+#endif
103201+ req->rsk_ops->send_reset(sk, skb);
103202 } else if (fastopen) { /* received a valid RST pkt */
103203 reqsk_fastopen_remove(sk, req, true);
103204 tcp_reset(sk);
103205diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103206index ebf5ff5..4d1ff32 100644
103207--- a/net/ipv4/tcp_probe.c
103208+++ b/net/ipv4/tcp_probe.c
103209@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103210 if (cnt + width >= len)
103211 break;
103212
103213- if (copy_to_user(buf + cnt, tbuf, width))
103214+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103215 return -EFAULT;
103216 cnt += width;
103217 }
103218diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103219index 1829c7f..c0b3d52 100644
103220--- a/net/ipv4/tcp_timer.c
103221+++ b/net/ipv4/tcp_timer.c
103222@@ -22,6 +22,10 @@
103223 #include <linux/gfp.h>
103224 #include <net/tcp.h>
103225
103226+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103227+extern int grsec_lastack_retries;
103228+#endif
103229+
103230 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103231 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103232 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103233@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
103234 }
103235 }
103236
103237+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103238+ if ((sk->sk_state == TCP_LAST_ACK) &&
103239+ (grsec_lastack_retries > 0) &&
103240+ (grsec_lastack_retries < retry_until))
103241+ retry_until = grsec_lastack_retries;
103242+#endif
103243+
103244 if (retransmits_timed_out(sk, retry_until,
103245 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103246 /* Has it gone just too far? */
103247diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103248index 13b4dcf..b866a2a 100644
103249--- a/net/ipv4/udp.c
103250+++ b/net/ipv4/udp.c
103251@@ -87,6 +87,7 @@
103252 #include <linux/types.h>
103253 #include <linux/fcntl.h>
103254 #include <linux/module.h>
103255+#include <linux/security.h>
103256 #include <linux/socket.h>
103257 #include <linux/sockios.h>
103258 #include <linux/igmp.h>
103259@@ -114,6 +115,10 @@
103260 #include <net/busy_poll.h>
103261 #include "udp_impl.h"
103262
103263+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103264+extern int grsec_enable_blackhole;
103265+#endif
103266+
103267 struct udp_table udp_table __read_mostly;
103268 EXPORT_SYMBOL(udp_table);
103269
103270@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103271 return true;
103272 }
103273
103274+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103275+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103276+
103277 /*
103278 * This routine is called by the ICMP module when it gets some
103279 * sort of error condition. If err < 0 then the socket should
103280@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103281 dport = usin->sin_port;
103282 if (dport == 0)
103283 return -EINVAL;
103284+
103285+ err = gr_search_udp_sendmsg(sk, usin);
103286+ if (err)
103287+ return err;
103288 } else {
103289 if (sk->sk_state != TCP_ESTABLISHED)
103290 return -EDESTADDRREQ;
103291+
103292+ err = gr_search_udp_sendmsg(sk, NULL);
103293+ if (err)
103294+ return err;
103295+
103296 daddr = inet->inet_daddr;
103297 dport = inet->inet_dport;
103298 /* Open fast path for connected socket.
103299@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
103300 IS_UDPLITE(sk));
103301 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103302 IS_UDPLITE(sk));
103303- atomic_inc(&sk->sk_drops);
103304+ atomic_inc_unchecked(&sk->sk_drops);
103305 __skb_unlink(skb, rcvq);
103306 __skb_queue_tail(&list_kill, skb);
103307 }
103308@@ -1275,6 +1292,10 @@ try_again:
103309 if (!skb)
103310 goto out;
103311
103312+ err = gr_search_udp_recvmsg(sk, skb);
103313+ if (err)
103314+ goto out_free;
103315+
103316 ulen = skb->len - sizeof(struct udphdr);
103317 copied = len;
103318 if (copied > ulen)
103319@@ -1307,7 +1328,7 @@ try_again:
103320 if (unlikely(err)) {
103321 trace_kfree_skb(skb, udp_recvmsg);
103322 if (!peeked) {
103323- atomic_inc(&sk->sk_drops);
103324+ atomic_inc_unchecked(&sk->sk_drops);
103325 UDP_INC_STATS_USER(sock_net(sk),
103326 UDP_MIB_INERRORS, is_udplite);
103327 }
103328@@ -1605,7 +1626,7 @@ csum_error:
103329 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103330 drop:
103331 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103332- atomic_inc(&sk->sk_drops);
103333+ atomic_inc_unchecked(&sk->sk_drops);
103334 kfree_skb(skb);
103335 return -1;
103336 }
103337@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103338 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103339
103340 if (!skb1) {
103341- atomic_inc(&sk->sk_drops);
103342+ atomic_inc_unchecked(&sk->sk_drops);
103343 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103344 IS_UDPLITE(sk));
103345 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103346@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103347 goto csum_error;
103348
103349 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103350+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103351+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103352+#endif
103353 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103354
103355 /*
103356@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103357 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103358 0, sock_i_ino(sp),
103359 atomic_read(&sp->sk_refcnt), sp,
103360- atomic_read(&sp->sk_drops));
103361+ atomic_read_unchecked(&sp->sk_drops));
103362 }
103363
103364 int udp4_seq_show(struct seq_file *seq, void *v)
103365diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103366index 6156f68..d6ab46d 100644
103367--- a/net/ipv4/xfrm4_policy.c
103368+++ b/net/ipv4/xfrm4_policy.c
103369@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103370 fl4->flowi4_tos = iph->tos;
103371 }
103372
103373-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103374+static int xfrm4_garbage_collect(struct dst_ops *ops)
103375 {
103376 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103377
103378- xfrm4_policy_afinfo.garbage_collect(net);
103379+ xfrm_garbage_collect_deferred(net);
103380 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103381 }
103382
103383@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103384
103385 static int __net_init xfrm4_net_init(struct net *net)
103386 {
103387- struct ctl_table *table;
103388+ ctl_table_no_const *table = NULL;
103389 struct ctl_table_header *hdr;
103390
103391- table = xfrm4_policy_table;
103392 if (!net_eq(net, &init_net)) {
103393- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103394+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103395 if (!table)
103396 goto err_alloc;
103397
103398 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103399- }
103400-
103401- hdr = register_net_sysctl(net, "net/ipv4", table);
103402+ hdr = register_net_sysctl(net, "net/ipv4", table);
103403+ } else
103404+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103405 if (!hdr)
103406 goto err_reg;
103407
103408@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103409 return 0;
103410
103411 err_reg:
103412- if (!net_eq(net, &init_net))
103413- kfree(table);
103414+ kfree(table);
103415 err_alloc:
103416 return -ENOMEM;
103417 }
103418diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103419index f7c8bbe..534fa31 100644
103420--- a/net/ipv6/addrconf.c
103421+++ b/net/ipv6/addrconf.c
103422@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103423 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103424 .mtu6 = IPV6_MIN_MTU,
103425 .accept_ra = 1,
103426- .accept_redirects = 1,
103427+ .accept_redirects = 0,
103428 .autoconf = 1,
103429 .force_mld_version = 0,
103430 .mldv1_unsolicited_report_interval = 10 * HZ,
103431@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103432 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103433 .mtu6 = IPV6_MIN_MTU,
103434 .accept_ra = 1,
103435- .accept_redirects = 1,
103436+ .accept_redirects = 0,
103437 .autoconf = 1,
103438 .force_mld_version = 0,
103439 .mldv1_unsolicited_report_interval = 10 * HZ,
103440@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103441 idx = 0;
103442 head = &net->dev_index_head[h];
103443 rcu_read_lock();
103444- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103445+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103446 net->dev_base_seq;
103447 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103448 if (idx < s_idx)
103449@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103450 p.iph.ihl = 5;
103451 p.iph.protocol = IPPROTO_IPV6;
103452 p.iph.ttl = 64;
103453- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103454+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103455
103456 if (ops->ndo_do_ioctl) {
103457 mm_segment_t oldfs = get_fs();
103458@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
103459 .release = seq_release_net,
103460 };
103461
103462+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103463+extern void unregister_ipv6_seq_ops_addr(void);
103464+
103465 static int __net_init if6_proc_net_init(struct net *net)
103466 {
103467- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103468+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103469+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103470+ unregister_ipv6_seq_ops_addr();
103471 return -ENOMEM;
103472+ }
103473 return 0;
103474 }
103475
103476 static void __net_exit if6_proc_net_exit(struct net *net)
103477 {
103478 remove_proc_entry("if_inet6", net->proc_net);
103479+ unregister_ipv6_seq_ops_addr();
103480 }
103481
103482 static struct pernet_operations if6_proc_net_ops = {
103483@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103484 s_ip_idx = ip_idx = cb->args[2];
103485
103486 rcu_read_lock();
103487- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103488+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103489 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103490 idx = 0;
103491 head = &net->dev_index_head[h];
103492@@ -4572,6 +4579,22 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
103493 return 0;
103494 }
103495
103496+static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
103497+ [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
103498+ [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
103499+};
103500+
103501+static int inet6_validate_link_af(const struct net_device *dev,
103502+ const struct nlattr *nla)
103503+{
103504+ struct nlattr *tb[IFLA_INET6_MAX + 1];
103505+
103506+ if (dev && !__in6_dev_get(dev))
103507+ return -EAFNOSUPPORT;
103508+
103509+ return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
103510+}
103511+
103512 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
103513 {
103514 int err = -EINVAL;
103515@@ -4824,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103516 rt_genid_bump_ipv6(net);
103517 break;
103518 }
103519- atomic_inc(&net->ipv6.dev_addr_genid);
103520+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103521 }
103522
103523 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103524@@ -4844,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103525 int *valp = ctl->data;
103526 int val = *valp;
103527 loff_t pos = *ppos;
103528- struct ctl_table lctl;
103529+ ctl_table_no_const lctl;
103530 int ret;
103531
103532 /*
103533@@ -4929,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103534 int *valp = ctl->data;
103535 int val = *valp;
103536 loff_t pos = *ppos;
103537- struct ctl_table lctl;
103538+ ctl_table_no_const lctl;
103539 int ret;
103540
103541 /*
103542@@ -5393,6 +5416,7 @@ static struct rtnl_af_ops inet6_ops = {
103543 .family = AF_INET6,
103544 .fill_link_af = inet6_fill_link_af,
103545 .get_link_af_size = inet6_get_link_af_size,
103546+ .validate_link_af = inet6_validate_link_af,
103547 .set_link_af = inet6_set_link_af,
103548 };
103549
103550diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103551index e8c4400..a4cd5da 100644
103552--- a/net/ipv6/af_inet6.c
103553+++ b/net/ipv6/af_inet6.c
103554@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103555 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103556 net->ipv6.sysctl.flowlabel_consistency = 1;
103557 net->ipv6.sysctl.auto_flowlabels = 0;
103558- atomic_set(&net->ipv6.fib6_sernum, 1);
103559+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
103560
103561 err = ipv6_init_mibs(net);
103562 if (err)
103563diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103564index 49f5e73..ae02d54 100644
103565--- a/net/ipv6/datagram.c
103566+++ b/net/ipv6/datagram.c
103567@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103568 0,
103569 sock_i_ino(sp),
103570 atomic_read(&sp->sk_refcnt), sp,
103571- atomic_read(&sp->sk_drops));
103572+ atomic_read_unchecked(&sp->sk_drops));
103573 }
103574diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103575index d674152..fb5a01d 100644
103576--- a/net/ipv6/icmp.c
103577+++ b/net/ipv6/icmp.c
103578@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103579
103580 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103581 {
103582- struct ctl_table *table;
103583+ ctl_table_no_const *table;
103584
103585 table = kmemdup(ipv6_icmp_table_template,
103586 sizeof(ipv6_icmp_table_template),
103587diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
103588index f1c6d5e..faabef6 100644
103589--- a/net/ipv6/ip6_fib.c
103590+++ b/net/ipv6/ip6_fib.c
103591@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
103592 int new, old;
103593
103594 do {
103595- old = atomic_read(&net->ipv6.fib6_sernum);
103596+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
103597 new = old < INT_MAX ? old + 1 : 1;
103598- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
103599+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
103600 old, new) != old);
103601 return new;
103602 }
103603diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103604index 01ccc28..66861c7 100644
103605--- a/net/ipv6/ip6_gre.c
103606+++ b/net/ipv6/ip6_gre.c
103607@@ -71,8 +71,8 @@ struct ip6gre_net {
103608 struct net_device *fb_tunnel_dev;
103609 };
103610
103611-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103612-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103613+static struct rtnl_link_ops ip6gre_link_ops;
103614+static struct rtnl_link_ops ip6gre_tap_ops;
103615 static int ip6gre_tunnel_init(struct net_device *dev);
103616 static void ip6gre_tunnel_setup(struct net_device *dev);
103617 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103618@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103619 }
103620
103621
103622-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103623+static struct inet6_protocol ip6gre_protocol = {
103624 .handler = ip6gre_rcv,
103625 .err_handler = ip6gre_err,
103626 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103627@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103628 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103629 };
103630
103631-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103632+static struct rtnl_link_ops ip6gre_link_ops = {
103633 .kind = "ip6gre",
103634 .maxtype = IFLA_GRE_MAX,
103635 .policy = ip6gre_policy,
103636@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103637 .fill_info = ip6gre_fill_info,
103638 };
103639
103640-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103641+static struct rtnl_link_ops ip6gre_tap_ops = {
103642 .kind = "ip6gretap",
103643 .maxtype = IFLA_GRE_MAX,
103644 .policy = ip6gre_policy,
103645diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103646index 92b3da5..77837b8 100644
103647--- a/net/ipv6/ip6_tunnel.c
103648+++ b/net/ipv6/ip6_tunnel.c
103649@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103650
103651 static int ip6_tnl_dev_init(struct net_device *dev);
103652 static void ip6_tnl_dev_setup(struct net_device *dev);
103653-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103654+static struct rtnl_link_ops ip6_link_ops;
103655
103656 static int ip6_tnl_net_id __read_mostly;
103657 struct ip6_tnl_net {
103658@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103659 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103660 };
103661
103662-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103663+static struct rtnl_link_ops ip6_link_ops = {
103664 .kind = "ip6tnl",
103665 .maxtype = IFLA_IPTUN_MAX,
103666 .policy = ip6_tnl_policy,
103667diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103668index ace10d0..97a8b49 100644
103669--- a/net/ipv6/ip6_vti.c
103670+++ b/net/ipv6/ip6_vti.c
103671@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103672
103673 static int vti6_dev_init(struct net_device *dev);
103674 static void vti6_dev_setup(struct net_device *dev);
103675-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103676+static struct rtnl_link_ops vti6_link_ops;
103677
103678 static int vti6_net_id __read_mostly;
103679 struct vti6_net {
103680@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103681 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103682 };
103683
103684-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103685+static struct rtnl_link_ops vti6_link_ops = {
103686 .kind = "vti6",
103687 .maxtype = IFLA_VTI_MAX,
103688 .policy = vti6_policy,
103689diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103690index 66980d8d..8aef0d1 100644
103691--- a/net/ipv6/ipv6_sockglue.c
103692+++ b/net/ipv6/ipv6_sockglue.c
103693@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103694 if (sk->sk_type != SOCK_STREAM)
103695 return -ENOPROTOOPT;
103696
103697- msg.msg_control = optval;
103698+ msg.msg_control = (void __force_kernel *)optval;
103699 msg.msg_controllen = len;
103700 msg.msg_flags = flags;
103701
103702diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103703index e080fbb..412b3cf 100644
103704--- a/net/ipv6/netfilter/ip6_tables.c
103705+++ b/net/ipv6/netfilter/ip6_tables.c
103706@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103707 #endif
103708
103709 static int get_info(struct net *net, void __user *user,
103710- const int *len, int compat)
103711+ int len, int compat)
103712 {
103713 char name[XT_TABLE_MAXNAMELEN];
103714 struct xt_table *t;
103715 int ret;
103716
103717- if (*len != sizeof(struct ip6t_getinfo)) {
103718- duprintf("length %u != %zu\n", *len,
103719+ if (len != sizeof(struct ip6t_getinfo)) {
103720+ duprintf("length %u != %zu\n", len,
103721 sizeof(struct ip6t_getinfo));
103722 return -EINVAL;
103723 }
103724@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103725 info.size = private->size;
103726 strcpy(info.name, name);
103727
103728- if (copy_to_user(user, &info, *len) != 0)
103729+ if (copy_to_user(user, &info, len) != 0)
103730 ret = -EFAULT;
103731 else
103732 ret = 0;
103733@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103734
103735 switch (cmd) {
103736 case IP6T_SO_GET_INFO:
103737- ret = get_info(sock_net(sk), user, len, 1);
103738+ ret = get_info(sock_net(sk), user, *len, 1);
103739 break;
103740 case IP6T_SO_GET_ENTRIES:
103741 ret = compat_get_entries(sock_net(sk), user, len);
103742@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103743
103744 switch (cmd) {
103745 case IP6T_SO_GET_INFO:
103746- ret = get_info(sock_net(sk), user, len, 0);
103747+ ret = get_info(sock_net(sk), user, *len, 0);
103748 break;
103749
103750 case IP6T_SO_GET_ENTRIES:
103751diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103752index 6f187c8..34b367f 100644
103753--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103754+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103755@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103756
103757 static int nf_ct_frag6_sysctl_register(struct net *net)
103758 {
103759- struct ctl_table *table;
103760+ ctl_table_no_const *table = NULL;
103761 struct ctl_table_header *hdr;
103762
103763- table = nf_ct_frag6_sysctl_table;
103764 if (!net_eq(net, &init_net)) {
103765- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103766+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103767 GFP_KERNEL);
103768 if (table == NULL)
103769 goto err_alloc;
103770@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103771 table[2].data = &net->nf_frag.frags.high_thresh;
103772 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103773 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103774- }
103775-
103776- hdr = register_net_sysctl(net, "net/netfilter", table);
103777+ hdr = register_net_sysctl(net, "net/netfilter", table);
103778+ } else
103779+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103780 if (hdr == NULL)
103781 goto err_reg;
103782
103783@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103784 return 0;
103785
103786 err_reg:
103787- if (!net_eq(net, &init_net))
103788- kfree(table);
103789+ kfree(table);
103790 err_alloc:
103791 return -ENOMEM;
103792 }
103793diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103794index 2d31483..47aba96 100644
103795--- a/net/ipv6/ping.c
103796+++ b/net/ipv6/ping.c
103797@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103798
103799 if (msg->msg_name) {
103800 DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
103801- if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
103802- u->sin6_family != AF_INET6) {
103803+ if (msg->msg_namelen < sizeof(*u))
103804 return -EINVAL;
103805+ if (u->sin6_family != AF_INET6) {
103806+ return -EAFNOSUPPORT;
103807 }
103808 if (sk->sk_bound_dev_if &&
103809 sk->sk_bound_dev_if != u->sin6_scope_id) {
103810@@ -241,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
103811 };
103812 #endif
103813
103814+static struct pingv6_ops real_pingv6_ops = {
103815+ .ipv6_recv_error = ipv6_recv_error,
103816+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103817+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103818+ .icmpv6_err_convert = icmpv6_err_convert,
103819+ .ipv6_icmp_error = ipv6_icmp_error,
103820+ .ipv6_chk_addr = ipv6_chk_addr,
103821+};
103822+
103823+static struct pingv6_ops dummy_pingv6_ops = {
103824+ .ipv6_recv_error = dummy_ipv6_recv_error,
103825+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103826+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103827+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103828+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103829+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103830+};
103831+
103832 int __init pingv6_init(void)
103833 {
103834 #ifdef CONFIG_PROC_FS
103835@@ -248,13 +267,7 @@ int __init pingv6_init(void)
103836 if (ret)
103837 return ret;
103838 #endif
103839- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
103840- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
103841- pingv6_ops.ip6_datagram_recv_specific_ctl =
103842- ip6_datagram_recv_specific_ctl;
103843- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
103844- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
103845- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
103846+ pingv6_ops = &real_pingv6_ops;
103847 return inet6_register_protosw(&pingv6_protosw);
103848 }
103849
103850@@ -263,14 +276,9 @@ int __init pingv6_init(void)
103851 */
103852 void pingv6_exit(void)
103853 {
103854- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
103855- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
103856- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
103857- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
103858- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
103859- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
103860 #ifdef CONFIG_PROC_FS
103861 unregister_pernet_subsys(&ping_v6_net_ops);
103862 #endif
103863+ pingv6_ops = &dummy_pingv6_ops;
103864 inet6_unregister_protosw(&pingv6_protosw);
103865 }
103866diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
103867index 679253d0..70b653c 100644
103868--- a/net/ipv6/proc.c
103869+++ b/net/ipv6/proc.c
103870@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
103871 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
103872 goto proc_snmp6_fail;
103873
103874- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
103875+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
103876 if (!net->mib.proc_net_devsnmp6)
103877 goto proc_dev_snmp6_fail;
103878 return 0;
103879diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
103880index ee25631..3c3ac5d 100644
103881--- a/net/ipv6/raw.c
103882+++ b/net/ipv6/raw.c
103883@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
103884 {
103885 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
103886 skb_checksum_complete(skb)) {
103887- atomic_inc(&sk->sk_drops);
103888+ atomic_inc_unchecked(&sk->sk_drops);
103889 kfree_skb(skb);
103890 return NET_RX_DROP;
103891 }
103892@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103893 struct raw6_sock *rp = raw6_sk(sk);
103894
103895 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
103896- atomic_inc(&sk->sk_drops);
103897+ atomic_inc_unchecked(&sk->sk_drops);
103898 kfree_skb(skb);
103899 return NET_RX_DROP;
103900 }
103901@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103902
103903 if (inet->hdrincl) {
103904 if (skb_checksum_complete(skb)) {
103905- atomic_inc(&sk->sk_drops);
103906+ atomic_inc_unchecked(&sk->sk_drops);
103907 kfree_skb(skb);
103908 return NET_RX_DROP;
103909 }
103910@@ -609,7 +609,7 @@ out:
103911 return err;
103912 }
103913
103914-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
103915+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
103916 struct flowi6 *fl6, struct dst_entry **dstp,
103917 unsigned int flags)
103918 {
103919@@ -916,12 +916,15 @@ do_confirm:
103920 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
103921 char __user *optval, int optlen)
103922 {
103923+ struct icmp6_filter filter;
103924+
103925 switch (optname) {
103926 case ICMPV6_FILTER:
103927 if (optlen > sizeof(struct icmp6_filter))
103928 optlen = sizeof(struct icmp6_filter);
103929- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
103930+ if (copy_from_user(&filter, optval, optlen))
103931 return -EFAULT;
103932+ raw6_sk(sk)->filter = filter;
103933 return 0;
103934 default:
103935 return -ENOPROTOOPT;
103936@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103937 char __user *optval, int __user *optlen)
103938 {
103939 int len;
103940+ struct icmp6_filter filter;
103941
103942 switch (optname) {
103943 case ICMPV6_FILTER:
103944@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103945 len = sizeof(struct icmp6_filter);
103946 if (put_user(len, optlen))
103947 return -EFAULT;
103948- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
103949+ filter = raw6_sk(sk)->filter;
103950+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
103951 return -EFAULT;
103952 return 0;
103953 default:
103954diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
103955index d7d70e6..bd5e9fc 100644
103956--- a/net/ipv6/reassembly.c
103957+++ b/net/ipv6/reassembly.c
103958@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
103959
103960 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103961 {
103962- struct ctl_table *table;
103963+ ctl_table_no_const *table = NULL;
103964 struct ctl_table_header *hdr;
103965
103966- table = ip6_frags_ns_ctl_table;
103967 if (!net_eq(net, &init_net)) {
103968- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
103969+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
103970 if (table == NULL)
103971 goto err_alloc;
103972
103973@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103974 /* Don't export sysctls to unprivileged users */
103975 if (net->user_ns != &init_user_ns)
103976 table[0].procname = NULL;
103977- }
103978+ hdr = register_net_sysctl(net, "net/ipv6", table);
103979+ } else
103980+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
103981
103982- hdr = register_net_sysctl(net, "net/ipv6", table);
103983 if (hdr == NULL)
103984 goto err_reg;
103985
103986@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103987 return 0;
103988
103989 err_reg:
103990- if (!net_eq(net, &init_net))
103991- kfree(table);
103992+ kfree(table);
103993 err_alloc:
103994 return -ENOMEM;
103995 }
103996diff --git a/net/ipv6/route.c b/net/ipv6/route.c
103997index 49596535..663a24a 100644
103998--- a/net/ipv6/route.c
103999+++ b/net/ipv6/route.c
104000@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
104001
104002 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104003 {
104004- struct ctl_table *table;
104005+ ctl_table_no_const *table;
104006
104007 table = kmemdup(ipv6_route_table_template,
104008 sizeof(ipv6_route_table_template),
104009diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104010index cdbfe5a..e13eb31 100644
104011--- a/net/ipv6/sit.c
104012+++ b/net/ipv6/sit.c
104013@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104014 static void ipip6_dev_free(struct net_device *dev);
104015 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104016 __be32 *v4dst);
104017-static struct rtnl_link_ops sit_link_ops __read_mostly;
104018+static struct rtnl_link_ops sit_link_ops;
104019
104020 static int sit_net_id __read_mostly;
104021 struct sit_net {
104022@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104023 unregister_netdevice_queue(dev, head);
104024 }
104025
104026-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104027+static struct rtnl_link_ops sit_link_ops = {
104028 .kind = "sit",
104029 .maxtype = IFLA_IPTUN_MAX,
104030 .policy = ipip6_policy,
104031diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104032index c5c10fa..2577d51 100644
104033--- a/net/ipv6/sysctl_net_ipv6.c
104034+++ b/net/ipv6/sysctl_net_ipv6.c
104035@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
104036
104037 static int __net_init ipv6_sysctl_net_init(struct net *net)
104038 {
104039- struct ctl_table *ipv6_table;
104040+ ctl_table_no_const *ipv6_table;
104041 struct ctl_table *ipv6_route_table;
104042 struct ctl_table *ipv6_icmp_table;
104043 int err;
104044diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104045index 9c0b54e..5e7bd8f 100644
104046--- a/net/ipv6/tcp_ipv6.c
104047+++ b/net/ipv6/tcp_ipv6.c
104048@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104049 }
104050 }
104051
104052+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104053+extern int grsec_enable_blackhole;
104054+#endif
104055+
104056 static void tcp_v6_hash(struct sock *sk)
104057 {
104058 if (sk->sk_state != TCP_CLOSE) {
104059@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104060 return 0;
104061
104062 reset:
104063+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104064+ if (!grsec_enable_blackhole)
104065+#endif
104066 tcp_v6_send_reset(sk, skb);
104067 discard:
104068 if (opt_skb)
104069@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104070
104071 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
104072 inet6_iif(skb));
104073- if (!sk)
104074+ if (!sk) {
104075+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104076+ ret = 1;
104077+#endif
104078 goto no_tcp_socket;
104079+ }
104080
104081 process:
104082- if (sk->sk_state == TCP_TIME_WAIT)
104083+ if (sk->sk_state == TCP_TIME_WAIT) {
104084+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104085+ ret = 2;
104086+#endif
104087 goto do_time_wait;
104088+ }
104089
104090 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104091 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104092@@ -1499,6 +1514,10 @@ csum_error:
104093 bad_packet:
104094 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104095 } else {
104096+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104097+ if (!grsec_enable_blackhole || (ret == 1 &&
104098+ (skb->dev->flags & IFF_LOOPBACK)))
104099+#endif
104100 tcp_v6_send_reset(NULL, skb);
104101 }
104102
104103diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104104index 189dc4a..458bec0 100644
104105--- a/net/ipv6/udp.c
104106+++ b/net/ipv6/udp.c
104107@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104108 udp_ipv6_hash_secret + net_hash_mix(net));
104109 }
104110
104111+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104112+extern int grsec_enable_blackhole;
104113+#endif
104114+
104115 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104116 {
104117 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104118@@ -448,7 +452,7 @@ try_again:
104119 if (unlikely(err)) {
104120 trace_kfree_skb(skb, udpv6_recvmsg);
104121 if (!peeked) {
104122- atomic_inc(&sk->sk_drops);
104123+ atomic_inc_unchecked(&sk->sk_drops);
104124 if (is_udp4)
104125 UDP_INC_STATS_USER(sock_net(sk),
104126 UDP_MIB_INERRORS,
104127@@ -714,7 +718,7 @@ csum_error:
104128 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104129 drop:
104130 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104131- atomic_inc(&sk->sk_drops);
104132+ atomic_inc_unchecked(&sk->sk_drops);
104133 kfree_skb(skb);
104134 return -1;
104135 }
104136@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104137 if (likely(skb1 == NULL))
104138 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104139 if (!skb1) {
104140- atomic_inc(&sk->sk_drops);
104141+ atomic_inc_unchecked(&sk->sk_drops);
104142 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104143 IS_UDPLITE(sk));
104144 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104145@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104146 goto csum_error;
104147
104148 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104149+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104150+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104151+#endif
104152 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104153
104154 kfree_skb(skb);
104155diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104156index 48bf5a0..691985a 100644
104157--- a/net/ipv6/xfrm6_policy.c
104158+++ b/net/ipv6/xfrm6_policy.c
104159@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104160 }
104161 }
104162
104163-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104164+static int xfrm6_garbage_collect(struct dst_ops *ops)
104165 {
104166 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104167
104168- xfrm6_policy_afinfo.garbage_collect(net);
104169+ xfrm_garbage_collect_deferred(net);
104170 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104171 }
104172
104173@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104174
104175 static int __net_init xfrm6_net_init(struct net *net)
104176 {
104177- struct ctl_table *table;
104178+ ctl_table_no_const *table = NULL;
104179 struct ctl_table_header *hdr;
104180
104181- table = xfrm6_policy_table;
104182 if (!net_eq(net, &init_net)) {
104183- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104184+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104185 if (!table)
104186 goto err_alloc;
104187
104188 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104189- }
104190+ hdr = register_net_sysctl(net, "net/ipv6", table);
104191+ } else
104192+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104193
104194- hdr = register_net_sysctl(net, "net/ipv6", table);
104195 if (!hdr)
104196 goto err_reg;
104197
104198@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104199 return 0;
104200
104201 err_reg:
104202- if (!net_eq(net, &init_net))
104203- kfree(table);
104204+ kfree(table);
104205 err_alloc:
104206 return -ENOMEM;
104207 }
104208diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104209index c1d247e..9e5949d 100644
104210--- a/net/ipx/ipx_proc.c
104211+++ b/net/ipx/ipx_proc.c
104212@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104213 struct proc_dir_entry *p;
104214 int rc = -ENOMEM;
104215
104216- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104217+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104218
104219 if (!ipx_proc_dir)
104220 goto out;
104221diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104222index 40695b9..c1f2cef 100644
104223--- a/net/irda/ircomm/ircomm_tty.c
104224+++ b/net/irda/ircomm/ircomm_tty.c
104225@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104226 add_wait_queue(&port->open_wait, &wait);
104227
104228 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104229- __FILE__, __LINE__, tty->driver->name, port->count);
104230+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104231
104232 spin_lock_irqsave(&port->lock, flags);
104233- port->count--;
104234+ atomic_dec(&port->count);
104235 port->blocked_open++;
104236 spin_unlock_irqrestore(&port->lock, flags);
104237
104238@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104239 }
104240
104241 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104242- __FILE__, __LINE__, tty->driver->name, port->count);
104243+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104244
104245 schedule();
104246 }
104247@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104248
104249 spin_lock_irqsave(&port->lock, flags);
104250 if (!tty_hung_up_p(filp))
104251- port->count++;
104252+ atomic_inc(&port->count);
104253 port->blocked_open--;
104254 spin_unlock_irqrestore(&port->lock, flags);
104255
104256 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104257- __FILE__, __LINE__, tty->driver->name, port->count);
104258+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104259
104260 if (!retval)
104261 port->flags |= ASYNC_NORMAL_ACTIVE;
104262@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104263
104264 /* ++ is not atomic, so this should be protected - Jean II */
104265 spin_lock_irqsave(&self->port.lock, flags);
104266- self->port.count++;
104267+ atomic_inc(&self->port.count);
104268 spin_unlock_irqrestore(&self->port.lock, flags);
104269 tty_port_tty_set(&self->port, tty);
104270
104271 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104272- self->line, self->port.count);
104273+ self->line, atomic_read(&self->port.count));
104274
104275 /* Not really used by us, but lets do it anyway */
104276 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104277@@ -959,7 +959,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104278 tty_kref_put(port->tty);
104279 }
104280 port->tty = NULL;
104281- port->count = 0;
104282+ atomic_set(&port->count, 0);
104283 spin_unlock_irqrestore(&port->lock, flags);
104284
104285 wake_up_interruptible(&port->open_wait);
104286@@ -1306,7 +1306,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104287 seq_putc(m, '\n');
104288
104289 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104290- seq_printf(m, "Open count: %d\n", self->port.count);
104291+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104292 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104293 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104294
104295diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104296index b9ac598..f88cc56 100644
104297--- a/net/irda/irproc.c
104298+++ b/net/irda/irproc.c
104299@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104300 {
104301 int i;
104302
104303- proc_irda = proc_mkdir("irda", init_net.proc_net);
104304+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104305 if (proc_irda == NULL)
104306 return;
104307
104308diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104309index 2e9953b..ed06350 100644
104310--- a/net/iucv/af_iucv.c
104311+++ b/net/iucv/af_iucv.c
104312@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104313 {
104314 char name[12];
104315
104316- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104317+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104318 while (__iucv_get_sock_by_name(name)) {
104319 sprintf(name, "%08x",
104320- atomic_inc_return(&iucv_sk_list.autobind_name));
104321+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104322 }
104323 memcpy(iucv->src_name, name, 8);
104324 }
104325diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104326index 2a6a1fd..6c112b0 100644
104327--- a/net/iucv/iucv.c
104328+++ b/net/iucv/iucv.c
104329@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104330 return NOTIFY_OK;
104331 }
104332
104333-static struct notifier_block __refdata iucv_cpu_notifier = {
104334+static struct notifier_block iucv_cpu_notifier = {
104335 .notifier_call = iucv_cpu_notify,
104336 };
104337
104338diff --git a/net/key/af_key.c b/net/key/af_key.c
104339index f8ac939..1e189bf 100644
104340--- a/net/key/af_key.c
104341+++ b/net/key/af_key.c
104342@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104343 static u32 get_acqseq(void)
104344 {
104345 u32 res;
104346- static atomic_t acqseq;
104347+ static atomic_unchecked_t acqseq;
104348
104349 do {
104350- res = atomic_inc_return(&acqseq);
104351+ res = atomic_inc_return_unchecked(&acqseq);
104352 } while (!res);
104353 return res;
104354 }
104355diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104356index 781b3a2..73a7434 100644
104357--- a/net/l2tp/l2tp_eth.c
104358+++ b/net/l2tp/l2tp_eth.c
104359@@ -42,12 +42,12 @@ struct l2tp_eth {
104360 struct sock *tunnel_sock;
104361 struct l2tp_session *session;
104362 struct list_head list;
104363- atomic_long_t tx_bytes;
104364- atomic_long_t tx_packets;
104365- atomic_long_t tx_dropped;
104366- atomic_long_t rx_bytes;
104367- atomic_long_t rx_packets;
104368- atomic_long_t rx_errors;
104369+ atomic_long_unchecked_t tx_bytes;
104370+ atomic_long_unchecked_t tx_packets;
104371+ atomic_long_unchecked_t tx_dropped;
104372+ atomic_long_unchecked_t rx_bytes;
104373+ atomic_long_unchecked_t rx_packets;
104374+ atomic_long_unchecked_t rx_errors;
104375 };
104376
104377 /* via l2tp_session_priv() */
104378@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104379 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104380
104381 if (likely(ret == NET_XMIT_SUCCESS)) {
104382- atomic_long_add(len, &priv->tx_bytes);
104383- atomic_long_inc(&priv->tx_packets);
104384+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104385+ atomic_long_inc_unchecked(&priv->tx_packets);
104386 } else {
104387- atomic_long_inc(&priv->tx_dropped);
104388+ atomic_long_inc_unchecked(&priv->tx_dropped);
104389 }
104390 return NETDEV_TX_OK;
104391 }
104392@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104393 {
104394 struct l2tp_eth *priv = netdev_priv(dev);
104395
104396- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104397- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104398- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104399- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104400- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104401- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104402+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104403+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104404+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104405+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104406+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104407+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104408 return stats;
104409 }
104410
104411@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104412 nf_reset(skb);
104413
104414 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104415- atomic_long_inc(&priv->rx_packets);
104416- atomic_long_add(data_len, &priv->rx_bytes);
104417+ atomic_long_inc_unchecked(&priv->rx_packets);
104418+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104419 } else {
104420- atomic_long_inc(&priv->rx_errors);
104421+ atomic_long_inc_unchecked(&priv->rx_errors);
104422 }
104423 return;
104424
104425 error:
104426- atomic_long_inc(&priv->rx_errors);
104427+ atomic_long_inc_unchecked(&priv->rx_errors);
104428 kfree_skb(skb);
104429 }
104430
104431diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104432index 1a3c7e0..80f8b0c 100644
104433--- a/net/llc/llc_proc.c
104434+++ b/net/llc/llc_proc.c
104435@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104436 int rc = -ENOMEM;
104437 struct proc_dir_entry *p;
104438
104439- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104440+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104441 if (!llc_proc_dir)
104442 goto out;
104443
104444diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104445index e75d5c5..429fc95 100644
104446--- a/net/mac80211/cfg.c
104447+++ b/net/mac80211/cfg.c
104448@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104449 ret = ieee80211_vif_use_channel(sdata, chandef,
104450 IEEE80211_CHANCTX_EXCLUSIVE);
104451 }
104452- } else if (local->open_count == local->monitors) {
104453+ } else if (local_read(&local->open_count) == local->monitors) {
104454 local->_oper_chandef = *chandef;
104455 ieee80211_hw_config(local, 0);
104456 }
104457@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104458 else
104459 local->probe_req_reg--;
104460
104461- if (!local->open_count)
104462+ if (!local_read(&local->open_count))
104463 break;
104464
104465 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104466@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104467 if (chanctx_conf) {
104468 *chandef = sdata->vif.bss_conf.chandef;
104469 ret = 0;
104470- } else if (local->open_count > 0 &&
104471- local->open_count == local->monitors &&
104472+ } else if (local_read(&local->open_count) > 0 &&
104473+ local_read(&local->open_count) == local->monitors &&
104474 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104475 if (local->use_chanctx)
104476 *chandef = local->monitor_chandef;
104477diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104478index cc6e964..029a3a3 100644
104479--- a/net/mac80211/ieee80211_i.h
104480+++ b/net/mac80211/ieee80211_i.h
104481@@ -29,6 +29,7 @@
104482 #include <net/ieee80211_radiotap.h>
104483 #include <net/cfg80211.h>
104484 #include <net/mac80211.h>
104485+#include <asm/local.h>
104486 #include "key.h"
104487 #include "sta_info.h"
104488 #include "debug.h"
104489@@ -1114,7 +1115,7 @@ struct ieee80211_local {
104490 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104491 spinlock_t queue_stop_reason_lock;
104492
104493- int open_count;
104494+ local_t open_count;
104495 int monitors, cooked_mntrs;
104496 /* number of interfaces with corresponding FIF_ flags */
104497 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104498diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104499index 4173553..e3b5a3f 100644
104500--- a/net/mac80211/iface.c
104501+++ b/net/mac80211/iface.c
104502@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104503 break;
104504 }
104505
104506- if (local->open_count == 0) {
104507+ if (local_read(&local->open_count) == 0) {
104508 res = drv_start(local);
104509 if (res)
104510 goto err_del_bss;
104511@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104512 res = drv_add_interface(local, sdata);
104513 if (res)
104514 goto err_stop;
104515- } else if (local->monitors == 0 && local->open_count == 0) {
104516+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104517 res = ieee80211_add_virtual_monitor(local);
104518 if (res)
104519 goto err_stop;
104520@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104521 atomic_inc(&local->iff_promiscs);
104522
104523 if (coming_up)
104524- local->open_count++;
104525+ local_inc(&local->open_count);
104526
104527 if (hw_reconf_flags)
104528 ieee80211_hw_config(local, hw_reconf_flags);
104529@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104530 err_del_interface:
104531 drv_remove_interface(local, sdata);
104532 err_stop:
104533- if (!local->open_count)
104534+ if (!local_read(&local->open_count))
104535 drv_stop(local);
104536 err_del_bss:
104537 sdata->bss = NULL;
104538@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104539 }
104540
104541 if (going_down)
104542- local->open_count--;
104543+ local_dec(&local->open_count);
104544
104545 switch (sdata->vif.type) {
104546 case NL80211_IFTYPE_AP_VLAN:
104547@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104548 }
104549 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104550
104551- if (local->open_count == 0)
104552+ if (local_read(&local->open_count) == 0)
104553 ieee80211_clear_tx_pending(local);
104554
104555 /*
104556@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104557 if (cancel_scan)
104558 flush_delayed_work(&local->scan_work);
104559
104560- if (local->open_count == 0) {
104561+ if (local_read(&local->open_count) == 0) {
104562 ieee80211_stop_device(local);
104563
104564 /* no reconfiguring after stop! */
104565@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104566 ieee80211_configure_filter(local);
104567 ieee80211_hw_config(local, hw_reconf_flags);
104568
104569- if (local->monitors == local->open_count)
104570+ if (local->monitors == local_read(&local->open_count))
104571 ieee80211_add_virtual_monitor(local);
104572 }
104573
104574diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104575index 6ab99da..f9502d4 100644
104576--- a/net/mac80211/main.c
104577+++ b/net/mac80211/main.c
104578@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104579 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104580 IEEE80211_CONF_CHANGE_POWER);
104581
104582- if (changed && local->open_count) {
104583+ if (changed && local_read(&local->open_count)) {
104584 ret = drv_config(local, changed);
104585 /*
104586 * Goal:
104587diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104588index 4a95fe3..0bfd713 100644
104589--- a/net/mac80211/pm.c
104590+++ b/net/mac80211/pm.c
104591@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104592 struct ieee80211_sub_if_data *sdata;
104593 struct sta_info *sta;
104594
104595- if (!local->open_count)
104596+ if (!local_read(&local->open_count))
104597 goto suspend;
104598
104599 ieee80211_scan_cancel(local);
104600@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104601 cancel_work_sync(&local->dynamic_ps_enable_work);
104602 del_timer_sync(&local->dynamic_ps_timer);
104603
104604- local->wowlan = wowlan && local->open_count;
104605+ local->wowlan = wowlan && local_read(&local->open_count);
104606 if (local->wowlan) {
104607 int err = drv_suspend(local, wowlan);
104608 if (err < 0) {
104609@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104610 WARN_ON(!list_empty(&local->chanctx_list));
104611
104612 /* stop hardware - this must stop RX */
104613- if (local->open_count)
104614+ if (local_read(&local->open_count))
104615 ieee80211_stop_device(local);
104616
104617 suspend:
104618diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104619index d53355b..21f583a 100644
104620--- a/net/mac80211/rate.c
104621+++ b/net/mac80211/rate.c
104622@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104623
104624 ASSERT_RTNL();
104625
104626- if (local->open_count)
104627+ if (local_read(&local->open_count))
104628 return -EBUSY;
104629
104630 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104631diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
104632index 058686a..097821b 100644
104633--- a/net/mac80211/tx.c
104634+++ b/net/mac80211/tx.c
104635@@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
104636 if (tx->sdata->control_port_no_encrypt)
104637 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
104638 info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
104639+ info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
104640 }
104641
104642 return TX_CONTINUE;
104643diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104644index 974ebe7..57bcd3c 100644
104645--- a/net/mac80211/util.c
104646+++ b/net/mac80211/util.c
104647@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104648 }
104649 #endif
104650 /* everything else happens only if HW was up & running */
104651- if (!local->open_count)
104652+ if (!local_read(&local->open_count))
104653 goto wake_up;
104654
104655 /*
104656@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104657 local->in_reconfig = false;
104658 barrier();
104659
104660- if (local->monitors == local->open_count && local->monitors > 0)
104661+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104662 ieee80211_add_virtual_monitor(local);
104663
104664 /*
104665diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104666index b02660f..c0f791c 100644
104667--- a/net/netfilter/Kconfig
104668+++ b/net/netfilter/Kconfig
104669@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
104670
104671 To compile it as a module, choose M here. If unsure, say N.
104672
104673+config NETFILTER_XT_MATCH_GRADM
104674+ tristate '"gradm" match support'
104675+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104676+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104677+ ---help---
104678+ The gradm match allows to match on grsecurity RBAC being enabled.
104679+ It is useful when iptables rules are applied early on bootup to
104680+ prevent connections to the machine (except from a trusted host)
104681+ while the RBAC system is disabled.
104682+
104683 config NETFILTER_XT_MATCH_HASHLIMIT
104684 tristate '"hashlimit" match support'
104685 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104686diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104687index 89f73a9..e4e5bd9 100644
104688--- a/net/netfilter/Makefile
104689+++ b/net/netfilter/Makefile
104690@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104691 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104692 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104693 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104694+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104695 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104696 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104697 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104698diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104699index d259da3..6a32b2c 100644
104700--- a/net/netfilter/ipset/ip_set_core.c
104701+++ b/net/netfilter/ipset/ip_set_core.c
104702@@ -1952,7 +1952,7 @@ done:
104703 return ret;
104704 }
104705
104706-static struct nf_sockopt_ops so_set __read_mostly = {
104707+static struct nf_sockopt_ops so_set = {
104708 .pf = PF_INET,
104709 .get_optmin = SO_IP_SET,
104710 .get_optmax = SO_IP_SET + 1,
104711diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104712index b0f7b62..0541842 100644
104713--- a/net/netfilter/ipvs/ip_vs_conn.c
104714+++ b/net/netfilter/ipvs/ip_vs_conn.c
104715@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104716 /* Increase the refcnt counter of the dest */
104717 ip_vs_dest_hold(dest);
104718
104719- conn_flags = atomic_read(&dest->conn_flags);
104720+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104721 if (cp->protocol != IPPROTO_UDP)
104722 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104723 flags = cp->flags;
104724@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
104725
104726 cp->control = NULL;
104727 atomic_set(&cp->n_control, 0);
104728- atomic_set(&cp->in_pkts, 0);
104729+ atomic_set_unchecked(&cp->in_pkts, 0);
104730
104731 cp->packet_xmit = NULL;
104732 cp->app = NULL;
104733@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104734
104735 /* Don't drop the entry if its number of incoming packets is not
104736 located in [0, 8] */
104737- i = atomic_read(&cp->in_pkts);
104738+ i = atomic_read_unchecked(&cp->in_pkts);
104739 if (i > 8 || i < 0) return 0;
104740
104741 if (!todrop_rate[i]) return 0;
104742diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104743index b87ca32..76c7799 100644
104744--- a/net/netfilter/ipvs/ip_vs_core.c
104745+++ b/net/netfilter/ipvs/ip_vs_core.c
104746@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104747 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104748 /* do not touch skb anymore */
104749
104750- atomic_inc(&cp->in_pkts);
104751+ atomic_inc_unchecked(&cp->in_pkts);
104752 ip_vs_conn_put(cp);
104753 return ret;
104754 }
104755@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104756 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104757 pkts = sysctl_sync_threshold(ipvs);
104758 else
104759- pkts = atomic_add_return(1, &cp->in_pkts);
104760+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104761
104762 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104763 ip_vs_sync_conn(net, cp, pkts);
104764diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104765index b8295a4..17ff579 100644
104766--- a/net/netfilter/ipvs/ip_vs_ctl.c
104767+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104768@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104769 */
104770 ip_vs_rs_hash(ipvs, dest);
104771 }
104772- atomic_set(&dest->conn_flags, conn_flags);
104773+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104774
104775 /* bind the service */
104776 old_svc = rcu_dereference_protected(dest->svc, 1);
104777@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104778 * align with netns init in ip_vs_control_net_init()
104779 */
104780
104781-static struct ctl_table vs_vars[] = {
104782+static ctl_table_no_const vs_vars[] __read_only = {
104783 {
104784 .procname = "amemthresh",
104785 .maxlen = sizeof(int),
104786@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104787 " %-7s %-6d %-10d %-10d\n",
104788 &dest->addr.in6,
104789 ntohs(dest->port),
104790- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104791+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104792 atomic_read(&dest->weight),
104793 atomic_read(&dest->activeconns),
104794 atomic_read(&dest->inactconns));
104795@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104796 "%-7s %-6d %-10d %-10d\n",
104797 ntohl(dest->addr.ip),
104798 ntohs(dest->port),
104799- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104800+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104801 atomic_read(&dest->weight),
104802 atomic_read(&dest->activeconns),
104803 atomic_read(&dest->inactconns));
104804@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104805
104806 entry.addr = dest->addr.ip;
104807 entry.port = dest->port;
104808- entry.conn_flags = atomic_read(&dest->conn_flags);
104809+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104810 entry.weight = atomic_read(&dest->weight);
104811 entry.u_threshold = dest->u_threshold;
104812 entry.l_threshold = dest->l_threshold;
104813@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104814 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104815 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104816 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104817- (atomic_read(&dest->conn_flags) &
104818+ (atomic_read_unchecked(&dest->conn_flags) &
104819 IP_VS_CONN_F_FWD_MASK)) ||
104820 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104821 atomic_read(&dest->weight)) ||
104822@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104823 {
104824 int idx;
104825 struct netns_ipvs *ipvs = net_ipvs(net);
104826- struct ctl_table *tbl;
104827+ ctl_table_no_const *tbl;
104828
104829 atomic_set(&ipvs->dropentry, 0);
104830 spin_lock_init(&ipvs->dropentry_lock);
104831diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104832index 127f140..553d652 100644
104833--- a/net/netfilter/ipvs/ip_vs_lblc.c
104834+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104835@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104836 * IPVS LBLC sysctl table
104837 */
104838 #ifdef CONFIG_SYSCTL
104839-static struct ctl_table vs_vars_table[] = {
104840+static ctl_table_no_const vs_vars_table[] __read_only = {
104841 {
104842 .procname = "lblc_expiration",
104843 .data = NULL,
104844diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
104845index 2229d2d..b32b785 100644
104846--- a/net/netfilter/ipvs/ip_vs_lblcr.c
104847+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
104848@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
104849 * IPVS LBLCR sysctl table
104850 */
104851
104852-static struct ctl_table vs_vars_table[] = {
104853+static ctl_table_no_const vs_vars_table[] __read_only = {
104854 {
104855 .procname = "lblcr_expiration",
104856 .data = NULL,
104857diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
104858index c47ffd7..d233a81 100644
104859--- a/net/netfilter/ipvs/ip_vs_sync.c
104860+++ b/net/netfilter/ipvs/ip_vs_sync.c
104861@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
104862 cp = cp->control;
104863 if (cp) {
104864 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104865- pkts = atomic_add_return(1, &cp->in_pkts);
104866+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104867 else
104868 pkts = sysctl_sync_threshold(ipvs);
104869 ip_vs_sync_conn(net, cp->control, pkts);
104870@@ -771,7 +771,7 @@ control:
104871 if (!cp)
104872 return;
104873 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104874- pkts = atomic_add_return(1, &cp->in_pkts);
104875+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104876 else
104877 pkts = sysctl_sync_threshold(ipvs);
104878 goto sloop;
104879@@ -900,7 +900,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
104880
104881 if (opt)
104882 memcpy(&cp->in_seq, opt, sizeof(*opt));
104883- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104884+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104885 cp->state = state;
104886 cp->old_state = cp->state;
104887 /*
104888diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
104889index 3aedbda..6a63567 100644
104890--- a/net/netfilter/ipvs/ip_vs_xmit.c
104891+++ b/net/netfilter/ipvs/ip_vs_xmit.c
104892@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
104893 else
104894 rc = NF_ACCEPT;
104895 /* do not touch skb anymore */
104896- atomic_inc(&cp->in_pkts);
104897+ atomic_inc_unchecked(&cp->in_pkts);
104898 goto out;
104899 }
104900
104901@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
104902 else
104903 rc = NF_ACCEPT;
104904 /* do not touch skb anymore */
104905- atomic_inc(&cp->in_pkts);
104906+ atomic_inc_unchecked(&cp->in_pkts);
104907 goto out;
104908 }
104909
104910diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
104911index a4b5e2a..13b1de3 100644
104912--- a/net/netfilter/nf_conntrack_acct.c
104913+++ b/net/netfilter/nf_conntrack_acct.c
104914@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
104915 #ifdef CONFIG_SYSCTL
104916 static int nf_conntrack_acct_init_sysctl(struct net *net)
104917 {
104918- struct ctl_table *table;
104919+ ctl_table_no_const *table;
104920
104921 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
104922 GFP_KERNEL);
104923diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
104924index 46d1b26..b7f3b76 100644
104925--- a/net/netfilter/nf_conntrack_core.c
104926+++ b/net/netfilter/nf_conntrack_core.c
104927@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
104928 #define DYING_NULLS_VAL ((1<<30)+1)
104929 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
104930
104931+#ifdef CONFIG_GRKERNSEC_HIDESYM
104932+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
104933+#endif
104934+
104935 int nf_conntrack_init_net(struct net *net)
104936 {
104937 int ret = -ENOMEM;
104938@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
104939 if (!net->ct.stat)
104940 goto err_pcpu_lists;
104941
104942+#ifdef CONFIG_GRKERNSEC_HIDESYM
104943+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
104944+#else
104945 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
104946+#endif
104947 if (!net->ct.slabname)
104948 goto err_slabname;
104949
104950diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
104951index 4e78c57..ec8fb74 100644
104952--- a/net/netfilter/nf_conntrack_ecache.c
104953+++ b/net/netfilter/nf_conntrack_ecache.c
104954@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
104955 #ifdef CONFIG_SYSCTL
104956 static int nf_conntrack_event_init_sysctl(struct net *net)
104957 {
104958- struct ctl_table *table;
104959+ ctl_table_no_const *table;
104960
104961 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
104962 GFP_KERNEL);
104963diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
104964index bd9d315..989947e 100644
104965--- a/net/netfilter/nf_conntrack_helper.c
104966+++ b/net/netfilter/nf_conntrack_helper.c
104967@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
104968
104969 static int nf_conntrack_helper_init_sysctl(struct net *net)
104970 {
104971- struct ctl_table *table;
104972+ ctl_table_no_const *table;
104973
104974 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
104975 GFP_KERNEL);
104976diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
104977index b65d586..beec902 100644
104978--- a/net/netfilter/nf_conntrack_proto.c
104979+++ b/net/netfilter/nf_conntrack_proto.c
104980@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
104981
104982 static void
104983 nf_ct_unregister_sysctl(struct ctl_table_header **header,
104984- struct ctl_table **table,
104985+ ctl_table_no_const **table,
104986 unsigned int users)
104987 {
104988 if (users > 0)
104989diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
104990index fc823fa..8311af3 100644
104991--- a/net/netfilter/nf_conntrack_standalone.c
104992+++ b/net/netfilter/nf_conntrack_standalone.c
104993@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
104994
104995 static int nf_conntrack_standalone_init_sysctl(struct net *net)
104996 {
104997- struct ctl_table *table;
104998+ ctl_table_no_const *table;
104999
105000 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105001 GFP_KERNEL);
105002diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105003index 7a394df..bd91a8a 100644
105004--- a/net/netfilter/nf_conntrack_timestamp.c
105005+++ b/net/netfilter/nf_conntrack_timestamp.c
105006@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105007 #ifdef CONFIG_SYSCTL
105008 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105009 {
105010- struct ctl_table *table;
105011+ ctl_table_no_const *table;
105012
105013 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105014 GFP_KERNEL);
105015diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105016index 43c926c..a5731d8 100644
105017--- a/net/netfilter/nf_log.c
105018+++ b/net/netfilter/nf_log.c
105019@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
105020
105021 #ifdef CONFIG_SYSCTL
105022 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105023-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105024+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105025
105026 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105027 void __user *buffer, size_t *lenp, loff_t *ppos)
105028@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105029 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105030 mutex_unlock(&nf_log_mutex);
105031 } else {
105032+ ctl_table_no_const nf_log_table = *table;
105033+
105034 mutex_lock(&nf_log_mutex);
105035 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
105036 if (!logger)
105037- table->data = "NONE";
105038+ nf_log_table.data = "NONE";
105039 else
105040- table->data = logger->name;
105041- r = proc_dostring(table, write, buffer, lenp, ppos);
105042+ nf_log_table.data = logger->name;
105043+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105044 mutex_unlock(&nf_log_mutex);
105045 }
105046
105047diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105048index c68c1e5..8b5d670 100644
105049--- a/net/netfilter/nf_sockopt.c
105050+++ b/net/netfilter/nf_sockopt.c
105051@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105052 }
105053 }
105054
105055- list_add(&reg->list, &nf_sockopts);
105056+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105057 out:
105058 mutex_unlock(&nf_sockopt_mutex);
105059 return ret;
105060@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105061 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105062 {
105063 mutex_lock(&nf_sockopt_mutex);
105064- list_del(&reg->list);
105065+ pax_list_del((struct list_head *)&reg->list);
105066 mutex_unlock(&nf_sockopt_mutex);
105067 }
105068 EXPORT_SYMBOL(nf_unregister_sockopt);
105069diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105070index 11d85b3..7fcc420 100644
105071--- a/net/netfilter/nfnetlink_log.c
105072+++ b/net/netfilter/nfnetlink_log.c
105073@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
105074 struct nfnl_log_net {
105075 spinlock_t instances_lock;
105076 struct hlist_head instance_table[INSTANCE_BUCKETS];
105077- atomic_t global_seq;
105078+ atomic_unchecked_t global_seq;
105079 };
105080
105081 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105082@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105083 /* global sequence number */
105084 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105085 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105086- htonl(atomic_inc_return(&log->global_seq))))
105087+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105088 goto nla_put_failure;
105089
105090 if (data_len) {
105091diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105092new file mode 100644
105093index 0000000..c566332
105094--- /dev/null
105095+++ b/net/netfilter/xt_gradm.c
105096@@ -0,0 +1,51 @@
105097+/*
105098+ * gradm match for netfilter
105099