]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-3.19.1-201503122205.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-3.19.1-201503122205.patch
CommitLineData
8cf17962
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index a311db8..415b28c 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 176d4fe..17ceefa 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1191,6 +1191,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+
327 hashdist= [KNL,NUMA] Large hashes allocated during boot
328 are distributed across NUMA nodes. Defaults on
329 for 64-bit NUMA, off otherwise.
330@@ -2283,6 +2287,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
331 noexec=on: enable non-executable mappings (default)
332 noexec=off: disable non-executable mappings
333
334+ nopcid [X86-64]
335+ Disable PCID (Process-Context IDentifier) even if it
336+ is supported by the processor.
337+
338 nosmap [X86]
339 Disable SMAP (Supervisor Mode Access Prevention)
340 even if it is supported by processor.
341@@ -2584,6 +2592,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
342 the specified number of seconds. This is to be used if
343 your oopses keep scrolling off the screen.
344
345+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
346+ virtualization environments that don't cope well with the
347+ expand down segment used by UDEREF on X86-32 or the frequent
348+ page table updates on X86-64.
349+
350+ pax_sanitize_slab=
351+ Format: { 0 | 1 | off | fast | full }
352+ Options '0' and '1' are only provided for backward
353+ compatibility, 'off' or 'fast' should be used instead.
354+ 0|off : disable slab object sanitization
355+ 1|fast: enable slab object sanitization excluding
356+ whitelisted slabs (default)
357+ full : sanitize all slabs, even the whitelisted ones
358+
359+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
360+
361+ pax_extra_latent_entropy
362+ Enable a very simple form of latent entropy extraction
363+ from the first 4GB of memory as the bootmem allocator
364+ passes the memory pages to the buddy allocator.
365+
366+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
367+ when the processor supports PCID.
368+
369 pcbit= [HW,ISDN]
370
371 pcd. [PARIDE]
372diff --git a/Makefile b/Makefile
373index 688777b..2821d8c 100644
374--- a/Makefile
375+++ b/Makefile
376@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
377 HOSTCC = gcc
378 HOSTCXX = g++
379 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
380-HOSTCXXFLAGS = -O2
381+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
382+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
383+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
384
385 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
386 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
387@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
388 # Rules shared between *config targets and build targets
389
390 # Basic helpers built in scripts/
391-PHONY += scripts_basic
392-scripts_basic:
393+PHONY += scripts_basic gcc-plugins
394+scripts_basic: gcc-plugins
395 $(Q)$(MAKE) $(build)=scripts/basic
396 $(Q)rm -f .tmp_quiet_recordmcount
397
398@@ -622,6 +624,72 @@ endif
399 # Tell gcc to never replace conditional load with a non-conditional one
400 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
401
402+ifndef DISABLE_PAX_PLUGINS
403+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
404+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
405+else
406+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
407+endif
408+ifneq ($(PLUGINCC),)
409+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
410+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
411+endif
412+ifdef CONFIG_PAX_MEMORY_STACKLEAK
413+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
414+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
415+endif
416+ifdef CONFIG_KALLOCSTAT_PLUGIN
417+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
418+endif
419+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
420+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
421+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
422+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
423+endif
424+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
425+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
426+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
427+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
428+endif
429+endif
430+ifdef CONFIG_CHECKER_PLUGIN
431+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
432+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
433+endif
434+endif
435+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
436+ifdef CONFIG_PAX_SIZE_OVERFLOW
437+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
438+endif
439+ifdef CONFIG_PAX_LATENT_ENTROPY
440+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
441+endif
442+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
443+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
444+endif
445+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
446+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
447+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
448+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
450+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
451+ifeq ($(KBUILD_EXTMOD),)
452+gcc-plugins:
453+ $(Q)$(MAKE) $(build)=tools/gcc
454+else
455+gcc-plugins: ;
456+endif
457+else
458+gcc-plugins:
459+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
460+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
461+else
462+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
463+endif
464+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
465+endif
466+endif
467+
468 ifdef CONFIG_READABLE_ASM
469 # Disable optimizations that make assembler listings hard to read.
470 # reorder blocks reorders the control in the function
471@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
472 else
473 KBUILD_CFLAGS += -g
474 endif
475-KBUILD_AFLAGS += -Wa,-gdwarf-2
476+KBUILD_AFLAGS += -Wa,--gdwarf-2
477 endif
478 ifdef CONFIG_DEBUG_INFO_DWARF4
479 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
480@@ -879,7 +947,7 @@ export mod_sign_cmd
481
482
483 ifeq ($(KBUILD_EXTMOD),)
484-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
485+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
486
487 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
488 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
489@@ -926,6 +994,8 @@ endif
490
491 # The actual objects are generated when descending,
492 # make sure no implicit rule kicks in
493+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
496
497 # Handle descending into subdirectories listed in $(vmlinux-dirs)
498@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499 # Error messages still appears in the original language
500
501 PHONY += $(vmlinux-dirs)
502-$(vmlinux-dirs): prepare scripts
503+$(vmlinux-dirs): gcc-plugins prepare scripts
504 $(Q)$(MAKE) $(build)=$@
505
506 define filechk_kernel.release
507@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
508
509 archprepare: archheaders archscripts prepare1 scripts_basic
510
511+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
512+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
513 prepare0: archprepare FORCE
514 $(Q)$(MAKE) $(build)=.
515
516 # All the preparing..
517+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
518 prepare: prepare0
519
520 # Generate some files
521@@ -1095,6 +1168,8 @@ all: modules
522 # using awk while concatenating to the final file.
523
524 PHONY += modules
525+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
526+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
527 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
528 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
529 @$(kecho) ' Building modules, stage 2.';
530@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
531
532 # Target to prepare building external modules
533 PHONY += modules_prepare
534-modules_prepare: prepare scripts
535+modules_prepare: gcc-plugins prepare scripts
536
537 # Target to install modules
538 PHONY += modules_install
539@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
540 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
541 signing_key.priv signing_key.x509 x509.genkey \
542 extra_certificates signing_key.x509.keyid \
543- signing_key.x509.signer
544+ signing_key.x509.signer \
545+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
546+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
547+ tools/gcc/randomize_layout_seed.h
548
549 # clean - Delete most, but leave enough to build external modules
550 #
551@@ -1215,7 +1293,7 @@ distclean: mrproper
552 @find $(srctree) $(RCS_FIND_IGNORE) \
553 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
554 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
555- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
556+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
557 -type f -print | xargs rm -f
558
559
560@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
561 $(module-dirs): crmodverdir $(objtree)/Module.symvers
562 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
563
564+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
565+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
566 modules: $(module-dirs)
567 @$(kecho) ' Building modules, stage 2.';
568 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
569@@ -1521,17 +1601,21 @@ else
570 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
571 endif
572
573-%.s: %.c prepare scripts FORCE
574+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
575+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
576+%.s: %.c gcc-plugins prepare scripts FORCE
577 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
578 %.i: %.c prepare scripts FORCE
579 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
580-%.o: %.c prepare scripts FORCE
581+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
582+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
583+%.o: %.c gcc-plugins prepare scripts FORCE
584 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
585 %.lst: %.c prepare scripts FORCE
586 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
587-%.s: %.S prepare scripts FORCE
588+%.s: %.S gcc-plugins prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.o: %.S prepare scripts FORCE
591+%.o: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593 %.symtypes: %.c prepare scripts FORCE
594 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
595@@ -1543,11 +1627,15 @@ endif
596 $(build)=$(build-dir)
597 # Make sure the latest headers are built for Documentation
598 Documentation/: headers_install
599-%/: prepare scripts FORCE
600+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
601+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
602+%/: gcc-plugins prepare scripts FORCE
603 $(cmd_crmodverdir)
604 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
605 $(build)=$(build-dir)
606-%.ko: prepare scripts FORCE
607+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
608+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
609+%.ko: gcc-plugins prepare scripts FORCE
610 $(cmd_crmodverdir)
611 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
612 $(build)=$(build-dir) $(@:.ko=.o)
613diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
614index 8f8eafb..3405f46 100644
615--- a/arch/alpha/include/asm/atomic.h
616+++ b/arch/alpha/include/asm/atomic.h
617@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
618 #define atomic_dec(v) atomic_sub(1,(v))
619 #define atomic64_dec(v) atomic64_sub(1,(v))
620
621+#define atomic64_read_unchecked(v) atomic64_read(v)
622+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
623+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
624+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
625+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
626+#define atomic64_inc_unchecked(v) atomic64_inc(v)
627+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
628+#define atomic64_dec_unchecked(v) atomic64_dec(v)
629+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
630+
631 #endif /* _ALPHA_ATOMIC_H */
632diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
633index ad368a9..fbe0f25 100644
634--- a/arch/alpha/include/asm/cache.h
635+++ b/arch/alpha/include/asm/cache.h
636@@ -4,19 +4,19 @@
637 #ifndef __ARCH_ALPHA_CACHE_H
638 #define __ARCH_ALPHA_CACHE_H
639
640+#include <linux/const.h>
641
642 /* Bytes per L1 (data) cache line. */
643 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
644-# define L1_CACHE_BYTES 64
645 # define L1_CACHE_SHIFT 6
646 #else
647 /* Both EV4 and EV5 are write-through, read-allocate,
648 direct-mapped, physical.
649 */
650-# define L1_CACHE_BYTES 32
651 # define L1_CACHE_SHIFT 5
652 #endif
653
654+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
655 #define SMP_CACHE_BYTES L1_CACHE_BYTES
656
657 #endif
658diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
659index 968d999..d36b2df 100644
660--- a/arch/alpha/include/asm/elf.h
661+++ b/arch/alpha/include/asm/elf.h
662@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
663
664 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
665
666+#ifdef CONFIG_PAX_ASLR
667+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
668+
669+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
670+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
671+#endif
672+
673 /* $0 is set by ld.so to a pointer to a function which might be
674 registered using atexit. This provides a mean for the dynamic
675 linker to call DT_FINI functions for shared libraries that have
676diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
677index aab14a0..b4fa3e7 100644
678--- a/arch/alpha/include/asm/pgalloc.h
679+++ b/arch/alpha/include/asm/pgalloc.h
680@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
681 pgd_set(pgd, pmd);
682 }
683
684+static inline void
685+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
686+{
687+ pgd_populate(mm, pgd, pmd);
688+}
689+
690 extern pgd_t *pgd_alloc(struct mm_struct *mm);
691
692 static inline void
693diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
694index d8f9b7e..f6222fa 100644
695--- a/arch/alpha/include/asm/pgtable.h
696+++ b/arch/alpha/include/asm/pgtable.h
697@@ -102,6 +102,17 @@ struct vm_area_struct;
698 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
699 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
700 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
704+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
705+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
706+#else
707+# define PAGE_SHARED_NOEXEC PAGE_SHARED
708+# define PAGE_COPY_NOEXEC PAGE_COPY
709+# define PAGE_READONLY_NOEXEC PAGE_READONLY
710+#endif
711+
712 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
713
714 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
715diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
716index 2fd00b7..cfd5069 100644
717--- a/arch/alpha/kernel/module.c
718+++ b/arch/alpha/kernel/module.c
719@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
720
721 /* The small sections were sorted to the end of the segment.
722 The following should definitely cover them. */
723- gp = (u64)me->module_core + me->core_size - 0x8000;
724+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
725 got = sechdrs[me->arch.gotsecindex].sh_addr;
726
727 for (i = 0; i < n; i++) {
728diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
729index e51f578..16c64a3 100644
730--- a/arch/alpha/kernel/osf_sys.c
731+++ b/arch/alpha/kernel/osf_sys.c
732@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
733 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
734
735 static unsigned long
736-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
737- unsigned long limit)
738+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
739+ unsigned long limit, unsigned long flags)
740 {
741 struct vm_unmapped_area_info info;
742+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
743
744 info.flags = 0;
745 info.length = len;
746@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
747 info.high_limit = limit;
748 info.align_mask = 0;
749 info.align_offset = 0;
750+ info.threadstack_offset = offset;
751 return vm_unmapped_area(&info);
752 }
753
754@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
755 merely specific addresses, but regions of memory -- perhaps
756 this feature should be incorporated into all ports? */
757
758+#ifdef CONFIG_PAX_RANDMMAP
759+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
760+#endif
761+
762 if (addr) {
763- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
764+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
765 if (addr != (unsigned long) -ENOMEM)
766 return addr;
767 }
768
769 /* Next, try allocating at TASK_UNMAPPED_BASE. */
770- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
771- len, limit);
772+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
773+
774 if (addr != (unsigned long) -ENOMEM)
775 return addr;
776
777 /* Finally, try allocating in low memory. */
778- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
779+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
780
781 return addr;
782 }
783diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
784index 9d0ac09..479a962 100644
785--- a/arch/alpha/mm/fault.c
786+++ b/arch/alpha/mm/fault.c
787@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
788 __reload_thread(pcb);
789 }
790
791+#ifdef CONFIG_PAX_PAGEEXEC
792+/*
793+ * PaX: decide what to do with offenders (regs->pc = fault address)
794+ *
795+ * returns 1 when task should be killed
796+ * 2 when patched PLT trampoline was detected
797+ * 3 when unpatched PLT trampoline was detected
798+ */
799+static int pax_handle_fetch_fault(struct pt_regs *regs)
800+{
801+
802+#ifdef CONFIG_PAX_EMUPLT
803+ int err;
804+
805+ do { /* PaX: patched PLT emulation #1 */
806+ unsigned int ldah, ldq, jmp;
807+
808+ err = get_user(ldah, (unsigned int *)regs->pc);
809+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
810+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
811+
812+ if (err)
813+ break;
814+
815+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
816+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
817+ jmp == 0x6BFB0000U)
818+ {
819+ unsigned long r27, addr;
820+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
821+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
822+
823+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
824+ err = get_user(r27, (unsigned long *)addr);
825+ if (err)
826+ break;
827+
828+ regs->r27 = r27;
829+ regs->pc = r27;
830+ return 2;
831+ }
832+ } while (0);
833+
834+ do { /* PaX: patched PLT emulation #2 */
835+ unsigned int ldah, lda, br;
836+
837+ err = get_user(ldah, (unsigned int *)regs->pc);
838+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
839+ err |= get_user(br, (unsigned int *)(regs->pc+8));
840+
841+ if (err)
842+ break;
843+
844+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
845+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
846+ (br & 0xFFE00000U) == 0xC3E00000U)
847+ {
848+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
849+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
850+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
851+
852+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
853+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
854+ return 2;
855+ }
856+ } while (0);
857+
858+ do { /* PaX: unpatched PLT emulation */
859+ unsigned int br;
860+
861+ err = get_user(br, (unsigned int *)regs->pc);
862+
863+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
864+ unsigned int br2, ldq, nop, jmp;
865+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
866+
867+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
868+ err = get_user(br2, (unsigned int *)addr);
869+ err |= get_user(ldq, (unsigned int *)(addr+4));
870+ err |= get_user(nop, (unsigned int *)(addr+8));
871+ err |= get_user(jmp, (unsigned int *)(addr+12));
872+ err |= get_user(resolver, (unsigned long *)(addr+16));
873+
874+ if (err)
875+ break;
876+
877+ if (br2 == 0xC3600000U &&
878+ ldq == 0xA77B000CU &&
879+ nop == 0x47FF041FU &&
880+ jmp == 0x6B7B0000U)
881+ {
882+ regs->r28 = regs->pc+4;
883+ regs->r27 = addr+16;
884+ regs->pc = resolver;
885+ return 3;
886+ }
887+ }
888+ } while (0);
889+#endif
890+
891+ return 1;
892+}
893+
894+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
895+{
896+ unsigned long i;
897+
898+ printk(KERN_ERR "PAX: bytes at PC: ");
899+ for (i = 0; i < 5; i++) {
900+ unsigned int c;
901+ if (get_user(c, (unsigned int *)pc+i))
902+ printk(KERN_CONT "???????? ");
903+ else
904+ printk(KERN_CONT "%08x ", c);
905+ }
906+ printk("\n");
907+}
908+#endif
909
910 /*
911 * This routine handles page faults. It determines the address,
912@@ -133,8 +251,29 @@ retry:
913 good_area:
914 si_code = SEGV_ACCERR;
915 if (cause < 0) {
916- if (!(vma->vm_flags & VM_EXEC))
917+ if (!(vma->vm_flags & VM_EXEC)) {
918+
919+#ifdef CONFIG_PAX_PAGEEXEC
920+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
921+ goto bad_area;
922+
923+ up_read(&mm->mmap_sem);
924+ switch (pax_handle_fetch_fault(regs)) {
925+
926+#ifdef CONFIG_PAX_EMUPLT
927+ case 2:
928+ case 3:
929+ return;
930+#endif
931+
932+ }
933+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
934+ do_group_exit(SIGKILL);
935+#else
936 goto bad_area;
937+#endif
938+
939+ }
940 } else if (!cause) {
941 /* Allow reads even for write-only mappings */
942 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
943diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
944index 97d07ed..2931f2b 100644
945--- a/arch/arm/Kconfig
946+++ b/arch/arm/Kconfig
947@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
948
949 config UACCESS_WITH_MEMCPY
950 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
951- depends on MMU
952+ depends on MMU && !PAX_MEMORY_UDEREF
953 default y if CPU_FEROCEON
954 help
955 Implement faster copy_to_user and clear_user methods for CPU
956@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
957 config KEXEC
958 bool "Kexec system call (EXPERIMENTAL)"
959 depends on (!SMP || PM_SLEEP_SMP)
960+ depends on !GRKERNSEC_KMEM
961 help
962 kexec is a system call that implements the ability to shutdown your
963 current kernel, and to start another kernel. It is like a reboot
964diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
965index e22c119..eaa807d 100644
966--- a/arch/arm/include/asm/atomic.h
967+++ b/arch/arm/include/asm/atomic.h
968@@ -18,17 +18,41 @@
969 #include <asm/barrier.h>
970 #include <asm/cmpxchg.h>
971
972+#ifdef CONFIG_GENERIC_ATOMIC64
973+#include <asm-generic/atomic64.h>
974+#endif
975+
976 #define ATOMIC_INIT(i) { (i) }
977
978 #ifdef __KERNEL__
979
980+#ifdef CONFIG_THUMB2_KERNEL
981+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
982+#else
983+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
984+#endif
985+
986+#define _ASM_EXTABLE(from, to) \
987+" .pushsection __ex_table,\"a\"\n"\
988+" .align 3\n" \
989+" .long " #from ", " #to"\n" \
990+" .popsection"
991+
992 /*
993 * On ARM, ordinary assignment (str instruction) doesn't clear the local
994 * strex/ldrex monitor on some implementations. The reason we can use it for
995 * atomic_set() is the clrex or dummy strex done on every exception return.
996 */
997 #define atomic_read(v) ACCESS_ONCE((v)->counter)
998+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
999+{
1000+ return ACCESS_ONCE(v->counter);
1001+}
1002 #define atomic_set(v,i) (((v)->counter) = (i))
1003+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1004+{
1005+ v->counter = i;
1006+}
1007
1008 #if __LINUX_ARM_ARCH__ >= 6
1009
1010@@ -38,26 +62,50 @@
1011 * to ensure that the update happens.
1012 */
1013
1014-#define ATOMIC_OP(op, c_op, asm_op) \
1015-static inline void atomic_##op(int i, atomic_t *v) \
1016+#ifdef CONFIG_PAX_REFCOUNT
1017+#define __OVERFLOW_POST \
1018+ " bvc 3f\n" \
1019+ "2: " REFCOUNT_TRAP_INSN "\n"\
1020+ "3:\n"
1021+#define __OVERFLOW_POST_RETURN \
1022+ " bvc 3f\n" \
1023+" mov %0, %1\n" \
1024+ "2: " REFCOUNT_TRAP_INSN "\n"\
1025+ "3:\n"
1026+#define __OVERFLOW_EXTABLE \
1027+ "4:\n" \
1028+ _ASM_EXTABLE(2b, 4b)
1029+#else
1030+#define __OVERFLOW_POST
1031+#define __OVERFLOW_POST_RETURN
1032+#define __OVERFLOW_EXTABLE
1033+#endif
1034+
1035+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1036+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1037 { \
1038 unsigned long tmp; \
1039 int result; \
1040 \
1041 prefetchw(&v->counter); \
1042- __asm__ __volatile__("@ atomic_" #op "\n" \
1043+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1044 "1: ldrex %0, [%3]\n" \
1045 " " #asm_op " %0, %0, %4\n" \
1046+ post_op \
1047 " strex %1, %0, [%3]\n" \
1048 " teq %1, #0\n" \
1049-" bne 1b" \
1050+" bne 1b\n" \
1051+ extable \
1052 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1053 : "r" (&v->counter), "Ir" (i) \
1054 : "cc"); \
1055 } \
1056
1057-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1058-static inline int atomic_##op##_return(int i, atomic_t *v) \
1059+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\
1060+ __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1061+
1062+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1063+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1064 { \
1065 unsigned long tmp; \
1066 int result; \
1067@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1068 smp_mb(); \
1069 prefetchw(&v->counter); \
1070 \
1071- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1072+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1073 "1: ldrex %0, [%3]\n" \
1074 " " #asm_op " %0, %0, %4\n" \
1075+ post_op \
1076 " strex %1, %0, [%3]\n" \
1077 " teq %1, #0\n" \
1078-" bne 1b" \
1079+" bne 1b\n" \
1080+ extable \
1081 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1082 : "r" (&v->counter), "Ir" (i) \
1083 : "cc"); \
1084@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1085 return result; \
1086 }
1087
1088+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\
1089+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1090+
1091 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1092 {
1093 int oldval;
1094@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1095 __asm__ __volatile__ ("@ atomic_add_unless\n"
1096 "1: ldrex %0, [%4]\n"
1097 " teq %0, %5\n"
1098-" beq 2f\n"
1099-" add %1, %0, %6\n"
1100+" beq 4f\n"
1101+" adds %1, %0, %6\n"
1102+
1103+#ifdef CONFIG_PAX_REFCOUNT
1104+" bvc 3f\n"
1105+"2: " REFCOUNT_TRAP_INSN "\n"
1106+"3:\n"
1107+#endif
1108+
1109 " strex %2, %1, [%4]\n"
1110 " teq %2, #0\n"
1111 " bne 1b\n"
1112-"2:"
1113+"4:"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+ _ASM_EXTABLE(2b, 4b)
1117+#endif
1118+
1119 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1120 : "r" (&v->counter), "r" (u), "r" (a)
1121 : "cc");
1122@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1123 return oldval;
1124 }
1125
1126+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1127+{
1128+ unsigned long oldval, res;
1129+
1130+ smp_mb();
1131+
1132+ do {
1133+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1134+ "ldrex %1, [%3]\n"
1135+ "mov %0, #0\n"
1136+ "teq %1, %4\n"
1137+ "strexeq %0, %5, [%3]\n"
1138+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1139+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1140+ : "cc");
1141+ } while (res);
1142+
1143+ smp_mb();
1144+
1145+ return oldval;
1146+}
1147+
1148 #else /* ARM_ARCH_6 */
1149
1150 #ifdef CONFIG_SMP
1151 #error SMP not supported on pre-ARMv6 CPUs
1152 #endif
1153
1154-#define ATOMIC_OP(op, c_op, asm_op) \
1155-static inline void atomic_##op(int i, atomic_t *v) \
1156+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1157+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1158 { \
1159 unsigned long flags; \
1160 \
1161@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1162 raw_local_irq_restore(flags); \
1163 } \
1164
1165-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1166-static inline int atomic_##op##_return(int i, atomic_t *v) \
1167+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1168+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1169+
1170+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1171+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1172 { \
1173 unsigned long flags; \
1174 int val; \
1175@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1176 return val; \
1177 }
1178
1179+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1180+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1181+
1182 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1183 {
1184 int ret;
1185@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 return ret;
1187 }
1188
1189+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1190+{
1191+ return atomic_cmpxchg((atomic_t *)v, old, new);
1192+}
1193+
1194 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1195 {
1196 int c, old;
1197@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1198
1199 #undef ATOMIC_OPS
1200 #undef ATOMIC_OP_RETURN
1201+#undef __ATOMIC_OP_RETURN
1202 #undef ATOMIC_OP
1203+#undef __ATOMIC_OP
1204
1205 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1206+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1207+{
1208+ return xchg(&v->counter, new);
1209+}
1210
1211 #define atomic_inc(v) atomic_add(1, v)
1212+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1213+{
1214+ atomic_add_unchecked(1, v);
1215+}
1216 #define atomic_dec(v) atomic_sub(1, v)
1217+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1218+{
1219+ atomic_sub_unchecked(1, v);
1220+}
1221
1222 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1223+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1224+{
1225+ return atomic_add_return_unchecked(1, v) == 0;
1226+}
1227 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1228 #define atomic_inc_return(v) (atomic_add_return(1, v))
1229+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1230+{
1231+ return atomic_add_return_unchecked(1, v);
1232+}
1233 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1234 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1235
1236@@ -216,6 +336,14 @@ typedef struct {
1237 long long counter;
1238 } atomic64_t;
1239
1240+#ifdef CONFIG_PAX_REFCOUNT
1241+typedef struct {
1242+ long long counter;
1243+} atomic64_unchecked_t;
1244+#else
1245+typedef atomic64_t atomic64_unchecked_t;
1246+#endif
1247+
1248 #define ATOMIC64_INIT(i) { (i) }
1249
1250 #ifdef CONFIG_ARM_LPAE
1251@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1252 return result;
1253 }
1254
1255+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1256+{
1257+ long long result;
1258+
1259+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1260+" ldrd %0, %H0, [%1]"
1261+ : "=&r" (result)
1262+ : "r" (&v->counter), "Qo" (v->counter)
1263+ );
1264+
1265+ return result;
1266+}
1267+
1268 static inline void atomic64_set(atomic64_t *v, long long i)
1269 {
1270 __asm__ __volatile__("@ atomic64_set\n"
1271@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1272 : "r" (&v->counter), "r" (i)
1273 );
1274 }
1275+
1276+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1277+{
1278+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1279+" strd %2, %H2, [%1]"
1280+ : "=Qo" (v->counter)
1281+ : "r" (&v->counter), "r" (i)
1282+ );
1283+}
1284 #else
1285 static inline long long atomic64_read(const atomic64_t *v)
1286 {
1287@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1288 return result;
1289 }
1290
1291+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1292+{
1293+ long long result;
1294+
1295+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1296+" ldrexd %0, %H0, [%1]"
1297+ : "=&r" (result)
1298+ : "r" (&v->counter), "Qo" (v->counter)
1299+ );
1300+
1301+ return result;
1302+}
1303+
1304 static inline void atomic64_set(atomic64_t *v, long long i)
1305 {
1306 long long tmp;
1307@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1308 : "r" (&v->counter), "r" (i)
1309 : "cc");
1310 }
1311+
1312+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1313+{
1314+ long long tmp;
1315+
1316+ prefetchw(&v->counter);
1317+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1318+"1: ldrexd %0, %H0, [%2]\n"
1319+" strexd %0, %3, %H3, [%2]\n"
1320+" teq %0, #0\n"
1321+" bne 1b"
1322+ : "=&r" (tmp), "=Qo" (v->counter)
1323+ : "r" (&v->counter), "r" (i)
1324+ : "cc");
1325+}
1326 #endif
1327
1328-#define ATOMIC64_OP(op, op1, op2) \
1329-static inline void atomic64_##op(long long i, atomic64_t *v) \
1330+#undef __OVERFLOW_POST_RETURN
1331+#define __OVERFLOW_POST_RETURN \
1332+ " bvc 3f\n" \
1333+" mov %0, %1\n" \
1334+" mov %H0, %H1\n" \
1335+ "2: " REFCOUNT_TRAP_INSN "\n"\
1336+ "3:\n"
1337+
1338+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1339+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1340 { \
1341 long long result; \
1342 unsigned long tmp; \
1343 \
1344 prefetchw(&v->counter); \
1345- __asm__ __volatile__("@ atomic64_" #op "\n" \
1346+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1347 "1: ldrexd %0, %H0, [%3]\n" \
1348 " " #op1 " %Q0, %Q0, %Q4\n" \
1349 " " #op2 " %R0, %R0, %R4\n" \
1350+ post_op \
1351 " strexd %1, %0, %H0, [%3]\n" \
1352 " teq %1, #0\n" \
1353-" bne 1b" \
1354+" bne 1b\n" \
1355+ extable \
1356 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1357 : "r" (&v->counter), "r" (i) \
1358 : "cc"); \
1359 } \
1360
1361-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1362-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1363+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \
1364+ __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1365+
1366+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1367+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1368 { \
1369 long long result; \
1370 unsigned long tmp; \
1371@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1372 smp_mb(); \
1373 prefetchw(&v->counter); \
1374 \
1375- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1376+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1377 "1: ldrexd %0, %H0, [%3]\n" \
1378 " " #op1 " %Q0, %Q0, %Q4\n" \
1379 " " #op2 " %R0, %R0, %R4\n" \
1380+ post_op \
1381 " strexd %1, %0, %H0, [%3]\n" \
1382 " teq %1, #0\n" \
1383-" bne 1b" \
1384+" bne 1b\n" \
1385+ extable \
1386 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1387 : "r" (&v->counter), "r" (i) \
1388 : "cc"); \
1389@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1390 return result; \
1391 }
1392
1393+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \
1394+ __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1395+
1396 #define ATOMIC64_OPS(op, op1, op2) \
1397 ATOMIC64_OP(op, op1, op2) \
1398 ATOMIC64_OP_RETURN(op, op1, op2)
1399@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1400
1401 #undef ATOMIC64_OPS
1402 #undef ATOMIC64_OP_RETURN
1403+#undef __ATOMIC64_OP_RETURN
1404 #undef ATOMIC64_OP
1405+#undef __ATOMIC64_OP
1406+#undef __OVERFLOW_EXTABLE
1407+#undef __OVERFLOW_POST_RETURN
1408+#undef __OVERFLOW_POST
1409
1410 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1411 long long new)
1412@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1413 return oldval;
1414 }
1415
1416+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1417+ long long new)
1418+{
1419+ long long oldval;
1420+ unsigned long res;
1421+
1422+ smp_mb();
1423+
1424+ do {
1425+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1426+ "ldrexd %1, %H1, [%3]\n"
1427+ "mov %0, #0\n"
1428+ "teq %1, %4\n"
1429+ "teqeq %H1, %H4\n"
1430+ "strexdeq %0, %5, %H5, [%3]"
1431+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1432+ : "r" (&ptr->counter), "r" (old), "r" (new)
1433+ : "cc");
1434+ } while (res);
1435+
1436+ smp_mb();
1437+
1438+ return oldval;
1439+}
1440+
1441 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1442 {
1443 long long result;
1444@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1446 {
1447 long long result;
1448- unsigned long tmp;
1449+ u64 tmp;
1450
1451 smp_mb();
1452 prefetchw(&v->counter);
1453
1454 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1455-"1: ldrexd %0, %H0, [%3]\n"
1456-" subs %Q0, %Q0, #1\n"
1457-" sbc %R0, %R0, #0\n"
1458+"1: ldrexd %1, %H1, [%3]\n"
1459+" subs %Q0, %Q1, #1\n"
1460+" sbcs %R0, %R1, #0\n"
1461+
1462+#ifdef CONFIG_PAX_REFCOUNT
1463+" bvc 3f\n"
1464+" mov %Q0, %Q1\n"
1465+" mov %R0, %R1\n"
1466+"2: " REFCOUNT_TRAP_INSN "\n"
1467+"3:\n"
1468+#endif
1469+
1470 " teq %R0, #0\n"
1471-" bmi 2f\n"
1472+" bmi 4f\n"
1473 " strexd %1, %0, %H0, [%3]\n"
1474 " teq %1, #0\n"
1475 " bne 1b\n"
1476-"2:"
1477+"4:\n"
1478+
1479+#ifdef CONFIG_PAX_REFCOUNT
1480+ _ASM_EXTABLE(2b, 4b)
1481+#endif
1482+
1483 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1484 : "r" (&v->counter)
1485 : "cc");
1486@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1487 " teq %0, %5\n"
1488 " teqeq %H0, %H5\n"
1489 " moveq %1, #0\n"
1490-" beq 2f\n"
1491+" beq 4f\n"
1492 " adds %Q0, %Q0, %Q6\n"
1493-" adc %R0, %R0, %R6\n"
1494+" adcs %R0, %R0, %R6\n"
1495+
1496+#ifdef CONFIG_PAX_REFCOUNT
1497+" bvc 3f\n"
1498+"2: " REFCOUNT_TRAP_INSN "\n"
1499+"3:\n"
1500+#endif
1501+
1502 " strexd %2, %0, %H0, [%4]\n"
1503 " teq %2, #0\n"
1504 " bne 1b\n"
1505-"2:"
1506+"4:\n"
1507+
1508+#ifdef CONFIG_PAX_REFCOUNT
1509+ _ASM_EXTABLE(2b, 4b)
1510+#endif
1511+
1512 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1513 : "r" (&v->counter), "r" (u), "r" (a)
1514 : "cc");
1515@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1516
1517 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1518 #define atomic64_inc(v) atomic64_add(1LL, (v))
1519+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1520 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1521+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1522 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1523 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1524 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1525+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1526 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1527 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1528 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1529diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1530index d2f81e6..3c4dba5 100644
1531--- a/arch/arm/include/asm/barrier.h
1532+++ b/arch/arm/include/asm/barrier.h
1533@@ -67,7 +67,7 @@
1534 do { \
1535 compiletime_assert_atomic_type(*p); \
1536 smp_mb(); \
1537- ACCESS_ONCE(*p) = (v); \
1538+ ACCESS_ONCE_RW(*p) = (v); \
1539 } while (0)
1540
1541 #define smp_load_acquire(p) \
1542diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1543index 75fe66b..ba3dee4 100644
1544--- a/arch/arm/include/asm/cache.h
1545+++ b/arch/arm/include/asm/cache.h
1546@@ -4,8 +4,10 @@
1547 #ifndef __ASMARM_CACHE_H
1548 #define __ASMARM_CACHE_H
1549
1550+#include <linux/const.h>
1551+
1552 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1553-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1554+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1555
1556 /*
1557 * Memory returned by kmalloc() may be used for DMA, so we must make
1558@@ -24,5 +26,6 @@
1559 #endif
1560
1561 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1562+#define __read_only __attribute__ ((__section__(".data..read_only")))
1563
1564 #endif
1565diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1566index 2d46862..a35415b 100644
1567--- a/arch/arm/include/asm/cacheflush.h
1568+++ b/arch/arm/include/asm/cacheflush.h
1569@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1570 void (*dma_unmap_area)(const void *, size_t, int);
1571
1572 void (*dma_flush_range)(const void *, const void *);
1573-};
1574+} __no_const;
1575
1576 /*
1577 * Select the calling method
1578diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1579index 5233151..87a71fa 100644
1580--- a/arch/arm/include/asm/checksum.h
1581+++ b/arch/arm/include/asm/checksum.h
1582@@ -37,7 +37,19 @@ __wsum
1583 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1584
1585 __wsum
1586-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1587+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1588+
1589+static inline __wsum
1590+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1591+{
1592+ __wsum ret;
1593+ pax_open_userland();
1594+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1595+ pax_close_userland();
1596+ return ret;
1597+}
1598+
1599+
1600
1601 /*
1602 * Fold a partial checksum without adding pseudo headers
1603diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1604index abb2c37..96db950 100644
1605--- a/arch/arm/include/asm/cmpxchg.h
1606+++ b/arch/arm/include/asm/cmpxchg.h
1607@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1608
1609 #define xchg(ptr,x) \
1610 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1611+#define xchg_unchecked(ptr,x) \
1612+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1613
1614 #include <asm-generic/cmpxchg-local.h>
1615
1616diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1617index 6ddbe44..b5e38b1a 100644
1618--- a/arch/arm/include/asm/domain.h
1619+++ b/arch/arm/include/asm/domain.h
1620@@ -48,18 +48,37 @@
1621 * Domain types
1622 */
1623 #define DOMAIN_NOACCESS 0
1624-#define DOMAIN_CLIENT 1
1625 #ifdef CONFIG_CPU_USE_DOMAINS
1626+#define DOMAIN_USERCLIENT 1
1627+#define DOMAIN_KERNELCLIENT 1
1628 #define DOMAIN_MANAGER 3
1629+#define DOMAIN_VECTORS DOMAIN_USER
1630 #else
1631+
1632+#ifdef CONFIG_PAX_KERNEXEC
1633 #define DOMAIN_MANAGER 1
1634+#define DOMAIN_KERNEXEC 3
1635+#else
1636+#define DOMAIN_MANAGER 1
1637+#endif
1638+
1639+#ifdef CONFIG_PAX_MEMORY_UDEREF
1640+#define DOMAIN_USERCLIENT 0
1641+#define DOMAIN_UDEREF 1
1642+#define DOMAIN_VECTORS DOMAIN_KERNEL
1643+#else
1644+#define DOMAIN_USERCLIENT 1
1645+#define DOMAIN_VECTORS DOMAIN_USER
1646+#endif
1647+#define DOMAIN_KERNELCLIENT 1
1648+
1649 #endif
1650
1651 #define domain_val(dom,type) ((type) << (2*(dom)))
1652
1653 #ifndef __ASSEMBLY__
1654
1655-#ifdef CONFIG_CPU_USE_DOMAINS
1656+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1657 static inline void set_domain(unsigned val)
1658 {
1659 asm volatile(
1660@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1661 isb();
1662 }
1663
1664-#define modify_domain(dom,type) \
1665- do { \
1666- struct thread_info *thread = current_thread_info(); \
1667- unsigned int domain = thread->cpu_domain; \
1668- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1669- thread->cpu_domain = domain | domain_val(dom, type); \
1670- set_domain(thread->cpu_domain); \
1671- } while (0)
1672-
1673+extern void modify_domain(unsigned int dom, unsigned int type);
1674 #else
1675 static inline void set_domain(unsigned val) { }
1676 static inline void modify_domain(unsigned dom, unsigned type) { }
1677diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1678index afb9caf..9a0bac0 100644
1679--- a/arch/arm/include/asm/elf.h
1680+++ b/arch/arm/include/asm/elf.h
1681@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1682 the loader. We need to make sure that it is out of the way of the program
1683 that it will "exec", and that there is sufficient room for the brk. */
1684
1685-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1686+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1687+
1688+#ifdef CONFIG_PAX_ASLR
1689+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1690+
1691+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1692+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1693+#endif
1694
1695 /* When the program starts, a1 contains a pointer to a function to be
1696 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1697@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1698 extern void elf_set_personality(const struct elf32_hdr *);
1699 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1700
1701-struct mm_struct;
1702-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1703-#define arch_randomize_brk arch_randomize_brk
1704-
1705 #ifdef CONFIG_MMU
1706 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1707 struct linux_binprm;
1708diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1709index de53547..52b9a28 100644
1710--- a/arch/arm/include/asm/fncpy.h
1711+++ b/arch/arm/include/asm/fncpy.h
1712@@ -81,7 +81,9 @@
1713 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1714 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1715 \
1716+ pax_open_kernel(); \
1717 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1718+ pax_close_kernel(); \
1719 flush_icache_range((unsigned long)(dest_buf), \
1720 (unsigned long)(dest_buf) + (size)); \
1721 \
1722diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1723index 53e69da..3fdc896 100644
1724--- a/arch/arm/include/asm/futex.h
1725+++ b/arch/arm/include/asm/futex.h
1726@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1727 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1728 return -EFAULT;
1729
1730+ pax_open_userland();
1731+
1732 smp_mb();
1733 /* Prefetching cannot fault */
1734 prefetchw(uaddr);
1735@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1736 : "cc", "memory");
1737 smp_mb();
1738
1739+ pax_close_userland();
1740+
1741 *uval = val;
1742 return ret;
1743 }
1744@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1745 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1746 return -EFAULT;
1747
1748+ pax_open_userland();
1749+
1750 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1751 "1: " TUSER(ldr) " %1, [%4]\n"
1752 " teq %1, %2\n"
1753@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1754 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1755 : "cc", "memory");
1756
1757+ pax_close_userland();
1758+
1759 *uval = val;
1760 return ret;
1761 }
1762@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1763 return -EFAULT;
1764
1765 pagefault_disable(); /* implies preempt_disable() */
1766+ pax_open_userland();
1767
1768 switch (op) {
1769 case FUTEX_OP_SET:
1770@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1771 ret = -ENOSYS;
1772 }
1773
1774+ pax_close_userland();
1775 pagefault_enable(); /* subsumes preempt_enable() */
1776
1777 if (!ret) {
1778diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1779index 83eb2f7..ed77159 100644
1780--- a/arch/arm/include/asm/kmap_types.h
1781+++ b/arch/arm/include/asm/kmap_types.h
1782@@ -4,6 +4,6 @@
1783 /*
1784 * This is the "bare minimum". AIO seems to require this.
1785 */
1786-#define KM_TYPE_NR 16
1787+#define KM_TYPE_NR 17
1788
1789 #endif
1790diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1791index 9e614a1..3302cca 100644
1792--- a/arch/arm/include/asm/mach/dma.h
1793+++ b/arch/arm/include/asm/mach/dma.h
1794@@ -22,7 +22,7 @@ struct dma_ops {
1795 int (*residue)(unsigned int, dma_t *); /* optional */
1796 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1797 const char *type;
1798-};
1799+} __do_const;
1800
1801 struct dma_struct {
1802 void *addr; /* single DMA address */
1803diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1804index f98c7f3..e5c626d 100644
1805--- a/arch/arm/include/asm/mach/map.h
1806+++ b/arch/arm/include/asm/mach/map.h
1807@@ -23,17 +23,19 @@ struct map_desc {
1808
1809 /* types 0-3 are defined in asm/io.h */
1810 enum {
1811- MT_UNCACHED = 4,
1812- MT_CACHECLEAN,
1813- MT_MINICLEAN,
1814+ MT_UNCACHED_RW = 4,
1815+ MT_CACHECLEAN_RO,
1816+ MT_MINICLEAN_RO,
1817 MT_LOW_VECTORS,
1818 MT_HIGH_VECTORS,
1819- MT_MEMORY_RWX,
1820+ __MT_MEMORY_RWX,
1821 MT_MEMORY_RW,
1822- MT_ROM,
1823- MT_MEMORY_RWX_NONCACHED,
1824+ MT_MEMORY_RX,
1825+ MT_ROM_RX,
1826+ MT_MEMORY_RW_NONCACHED,
1827+ MT_MEMORY_RX_NONCACHED,
1828 MT_MEMORY_RW_DTCM,
1829- MT_MEMORY_RWX_ITCM,
1830+ MT_MEMORY_RX_ITCM,
1831 MT_MEMORY_RW_SO,
1832 MT_MEMORY_DMA_READY,
1833 };
1834diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1835index 891a56b..48f337e 100644
1836--- a/arch/arm/include/asm/outercache.h
1837+++ b/arch/arm/include/asm/outercache.h
1838@@ -36,7 +36,7 @@ struct outer_cache_fns {
1839
1840 /* This is an ARM L2C thing */
1841 void (*write_sec)(unsigned long, unsigned);
1842-};
1843+} __no_const;
1844
1845 extern struct outer_cache_fns outer_cache;
1846
1847diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1848index 4355f0e..cd9168e 100644
1849--- a/arch/arm/include/asm/page.h
1850+++ b/arch/arm/include/asm/page.h
1851@@ -23,6 +23,7 @@
1852
1853 #else
1854
1855+#include <linux/compiler.h>
1856 #include <asm/glue.h>
1857
1858 /*
1859@@ -114,7 +115,7 @@ struct cpu_user_fns {
1860 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1861 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1862 unsigned long vaddr, struct vm_area_struct *vma);
1863-};
1864+} __no_const;
1865
1866 #ifdef MULTI_USER
1867 extern struct cpu_user_fns cpu_user;
1868diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1869index 19cfab5..3f5c7e9 100644
1870--- a/arch/arm/include/asm/pgalloc.h
1871+++ b/arch/arm/include/asm/pgalloc.h
1872@@ -17,6 +17,7 @@
1873 #include <asm/processor.h>
1874 #include <asm/cacheflush.h>
1875 #include <asm/tlbflush.h>
1876+#include <asm/system_info.h>
1877
1878 #define check_pgt_cache() do { } while (0)
1879
1880@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1881 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1882 }
1883
1884+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1885+{
1886+ pud_populate(mm, pud, pmd);
1887+}
1888+
1889 #else /* !CONFIG_ARM_LPAE */
1890
1891 /*
1892@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1893 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1894 #define pmd_free(mm, pmd) do { } while (0)
1895 #define pud_populate(mm,pmd,pte) BUG()
1896+#define pud_populate_kernel(mm,pmd,pte) BUG()
1897
1898 #endif /* CONFIG_ARM_LPAE */
1899
1900@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1901 __free_page(pte);
1902 }
1903
1904+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1905+{
1906+#ifdef CONFIG_ARM_LPAE
1907+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1908+#else
1909+ if (addr & SECTION_SIZE)
1910+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1911+ else
1912+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1913+#endif
1914+ flush_pmd_entry(pmdp);
1915+}
1916+
1917 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1918 pmdval_t prot)
1919 {
1920diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1921index 5e68278..1869bae 100644
1922--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1923+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924@@ -27,7 +27,7 @@
1925 /*
1926 * - section
1927 */
1928-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1929+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1930 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1931 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1932 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1933@@ -39,6 +39,7 @@
1934 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1935 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1936 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1937+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1938
1939 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1940 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1941@@ -68,6 +69,7 @@
1942 * - extended small page/tiny page
1943 */
1944 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1945+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1946 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1947 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1948 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1949diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1950index f027941..f36ce30 100644
1951--- a/arch/arm/include/asm/pgtable-2level.h
1952+++ b/arch/arm/include/asm/pgtable-2level.h
1953@@ -126,6 +126,9 @@
1954 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1955 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1956
1957+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1958+#define L_PTE_PXN (_AT(pteval_t, 0))
1959+
1960 /*
1961 * These are the memory types, defined to be compatible with
1962 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1963diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1964index a31ecdad..95e98d4 100644
1965--- a/arch/arm/include/asm/pgtable-3level.h
1966+++ b/arch/arm/include/asm/pgtable-3level.h
1967@@ -81,6 +81,7 @@
1968 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1969 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1970 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1971+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1973 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1974 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1975@@ -92,10 +93,12 @@
1976 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1977 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1978 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1979+#define PMD_SECT_RDONLY PMD_SECT_AP2
1980
1981 /*
1982 * To be used in assembly code with the upper page attributes.
1983 */
1984+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1985 #define L_PTE_XN_HIGH (1 << (54 - 32))
1986 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1987
1988diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1989index d5cac54..906ea3e 100644
1990--- a/arch/arm/include/asm/pgtable.h
1991+++ b/arch/arm/include/asm/pgtable.h
1992@@ -33,6 +33,9 @@
1993 #include <asm/pgtable-2level.h>
1994 #endif
1995
1996+#define ktla_ktva(addr) (addr)
1997+#define ktva_ktla(addr) (addr)
1998+
1999 /*
2000 * Just any arbitrary offset to the start of the vmalloc VM area: the
2001 * current 8MB value just means that there will be a 8MB "hole" after the
2002@@ -48,6 +51,9 @@
2003 #define LIBRARY_TEXT_START 0x0c000000
2004
2005 #ifndef __ASSEMBLY__
2006+extern pteval_t __supported_pte_mask;
2007+extern pmdval_t __supported_pmd_mask;
2008+
2009 extern void __pte_error(const char *file, int line, pte_t);
2010 extern void __pmd_error(const char *file, int line, pmd_t);
2011 extern void __pgd_error(const char *file, int line, pgd_t);
2012@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2013 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2014 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2015
2016+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2017+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2018+
2019+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2020+#include <asm/domain.h>
2021+#include <linux/thread_info.h>
2022+#include <linux/preempt.h>
2023+
2024+static inline int test_domain(int domain, int domaintype)
2025+{
2026+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2027+}
2028+#endif
2029+
2030+#ifdef CONFIG_PAX_KERNEXEC
2031+static inline unsigned long pax_open_kernel(void) {
2032+#ifdef CONFIG_ARM_LPAE
2033+ /* TODO */
2034+#else
2035+ preempt_disable();
2036+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2037+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2038+#endif
2039+ return 0;
2040+}
2041+
2042+static inline unsigned long pax_close_kernel(void) {
2043+#ifdef CONFIG_ARM_LPAE
2044+ /* TODO */
2045+#else
2046+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2047+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2048+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2049+ preempt_enable_no_resched();
2050+#endif
2051+ return 0;
2052+}
2053+#else
2054+static inline unsigned long pax_open_kernel(void) { return 0; }
2055+static inline unsigned long pax_close_kernel(void) { return 0; }
2056+#endif
2057+
2058 /*
2059 * This is the lowest virtual address we can permit any user space
2060 * mapping to be mapped at. This is particularly important for
2061@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2062 /*
2063 * The pgprot_* and protection_map entries will be fixed up in runtime
2064 * to include the cachable and bufferable bits based on memory policy,
2065- * as well as any architecture dependent bits like global/ASID and SMP
2066- * shared mapping bits.
2067+ * as well as any architecture dependent bits like global/ASID, PXN,
2068+ * and SMP shared mapping bits.
2069 */
2070 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2071
2072@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2073 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2074 {
2075 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2076- L_PTE_NONE | L_PTE_VALID;
2077+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2078 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2079 return pte;
2080 }
2081diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2082index c25ef3e..735f14b 100644
2083--- a/arch/arm/include/asm/psci.h
2084+++ b/arch/arm/include/asm/psci.h
2085@@ -32,7 +32,7 @@ struct psci_operations {
2086 int (*affinity_info)(unsigned long target_affinity,
2087 unsigned long lowest_affinity_level);
2088 int (*migrate_info_type)(void);
2089-};
2090+} __no_const;
2091
2092 extern struct psci_operations psci_ops;
2093 extern struct smp_operations psci_smp_ops;
2094diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2095index 18f5a55..5072a40 100644
2096--- a/arch/arm/include/asm/smp.h
2097+++ b/arch/arm/include/asm/smp.h
2098@@ -107,7 +107,7 @@ struct smp_operations {
2099 int (*cpu_disable)(unsigned int cpu);
2100 #endif
2101 #endif
2102-};
2103+} __no_const;
2104
2105 struct of_cpu_method {
2106 const char *method;
2107diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2108index d890e41..3921292 100644
2109--- a/arch/arm/include/asm/thread_info.h
2110+++ b/arch/arm/include/asm/thread_info.h
2111@@ -78,9 +78,9 @@ struct thread_info {
2112 .flags = 0, \
2113 .preempt_count = INIT_PREEMPT_COUNT, \
2114 .addr_limit = KERNEL_DS, \
2115- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2116- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2117- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2118+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2119+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2120+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2121 .restart_block = { \
2122 .fn = do_no_restart_syscall, \
2123 }, \
2124@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2125 #define TIF_SYSCALL_AUDIT 9
2126 #define TIF_SYSCALL_TRACEPOINT 10
2127 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2128-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2129+/* within 8 bits of TIF_SYSCALL_TRACE
2130+ * to meet flexible second operand requirements
2131+ */
2132+#define TIF_GRSEC_SETXID 12
2133+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2134 #define TIF_USING_IWMMXT 17
2135 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2136 #define TIF_RESTORE_SIGMASK 20
2137@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2138 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2139 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2140 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2141+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2142
2143 /* Checks for any syscall work in entry-common.S */
2144 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2145- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2146+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2147
2148 /*
2149 * Change these and you break ASM code in entry-common.S
2150diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2151index 5f833f7..76e6644 100644
2152--- a/arch/arm/include/asm/tls.h
2153+++ b/arch/arm/include/asm/tls.h
2154@@ -3,6 +3,7 @@
2155
2156 #include <linux/compiler.h>
2157 #include <asm/thread_info.h>
2158+#include <asm/pgtable.h>
2159
2160 #ifdef __ASSEMBLY__
2161 #include <asm/asm-offsets.h>
2162@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2163 * at 0xffff0fe0 must be used instead. (see
2164 * entry-armv.S for details)
2165 */
2166+ pax_open_kernel();
2167 *((unsigned int *)0xffff0ff0) = val;
2168+ pax_close_kernel();
2169 #endif
2170 }
2171
2172diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2173index 4767eb9..bf00668 100644
2174--- a/arch/arm/include/asm/uaccess.h
2175+++ b/arch/arm/include/asm/uaccess.h
2176@@ -18,6 +18,7 @@
2177 #include <asm/domain.h>
2178 #include <asm/unified.h>
2179 #include <asm/compiler.h>
2180+#include <asm/pgtable.h>
2181
2182 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2183 #include <asm-generic/uaccess-unaligned.h>
2184@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2185 static inline void set_fs(mm_segment_t fs)
2186 {
2187 current_thread_info()->addr_limit = fs;
2188- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2189+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2190 }
2191
2192 #define segment_eq(a,b) ((a) == (b))
2193
2194+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2195+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2196+
2197+static inline void pax_open_userland(void)
2198+{
2199+
2200+#ifdef CONFIG_PAX_MEMORY_UDEREF
2201+ if (segment_eq(get_fs(), USER_DS)) {
2202+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2203+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2204+ }
2205+#endif
2206+
2207+}
2208+
2209+static inline void pax_close_userland(void)
2210+{
2211+
2212+#ifdef CONFIG_PAX_MEMORY_UDEREF
2213+ if (segment_eq(get_fs(), USER_DS)) {
2214+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2215+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2216+ }
2217+#endif
2218+
2219+}
2220+
2221 #define __addr_ok(addr) ({ \
2222 unsigned long flag; \
2223 __asm__("cmp %2, %0; movlo %0, #0" \
2224@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2225
2226 #define get_user(x,p) \
2227 ({ \
2228+ int __e; \
2229 might_fault(); \
2230- __get_user_check(x,p); \
2231+ pax_open_userland(); \
2232+ __e = __get_user_check(x,p); \
2233+ pax_close_userland(); \
2234+ __e; \
2235 })
2236
2237 extern int __put_user_1(void *, unsigned int);
2238@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2239
2240 #define put_user(x,p) \
2241 ({ \
2242+ int __e; \
2243 might_fault(); \
2244- __put_user_check(x,p); \
2245+ pax_open_userland(); \
2246+ __e = __put_user_check(x,p); \
2247+ pax_close_userland(); \
2248+ __e; \
2249 })
2250
2251 #else /* CONFIG_MMU */
2252@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2253
2254 #endif /* CONFIG_MMU */
2255
2256+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2257 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2258
2259 #define user_addr_max() \
2260@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2261 #define __get_user(x,ptr) \
2262 ({ \
2263 long __gu_err = 0; \
2264+ pax_open_userland(); \
2265 __get_user_err((x),(ptr),__gu_err); \
2266+ pax_close_userland(); \
2267 __gu_err; \
2268 })
2269
2270 #define __get_user_error(x,ptr,err) \
2271 ({ \
2272+ pax_open_userland(); \
2273 __get_user_err((x),(ptr),err); \
2274+ pax_close_userland(); \
2275 (void) 0; \
2276 })
2277
2278@@ -368,13 +409,17 @@ do { \
2279 #define __put_user(x,ptr) \
2280 ({ \
2281 long __pu_err = 0; \
2282+ pax_open_userland(); \
2283 __put_user_err((x),(ptr),__pu_err); \
2284+ pax_close_userland(); \
2285 __pu_err; \
2286 })
2287
2288 #define __put_user_error(x,ptr,err) \
2289 ({ \
2290+ pax_open_userland(); \
2291 __put_user_err((x),(ptr),err); \
2292+ pax_close_userland(); \
2293 (void) 0; \
2294 })
2295
2296@@ -474,11 +519,44 @@ do { \
2297
2298
2299 #ifdef CONFIG_MMU
2300-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2301-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2302+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2303+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2304+
2305+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2306+{
2307+ unsigned long ret;
2308+
2309+ check_object_size(to, n, false);
2310+ pax_open_userland();
2311+ ret = ___copy_from_user(to, from, n);
2312+ pax_close_userland();
2313+ return ret;
2314+}
2315+
2316+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2317+{
2318+ unsigned long ret;
2319+
2320+ check_object_size(from, n, true);
2321+ pax_open_userland();
2322+ ret = ___copy_to_user(to, from, n);
2323+ pax_close_userland();
2324+ return ret;
2325+}
2326+
2327 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2328-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2329+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2330 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2331+
2332+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2333+{
2334+ unsigned long ret;
2335+ pax_open_userland();
2336+ ret = ___clear_user(addr, n);
2337+ pax_close_userland();
2338+ return ret;
2339+}
2340+
2341 #else
2342 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2343 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2344@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2345
2346 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2347 {
2348+ if ((long)n < 0)
2349+ return n;
2350+
2351 if (access_ok(VERIFY_READ, from, n))
2352 n = __copy_from_user(to, from, n);
2353 else /* security hole - plug it */
2354@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2355
2356 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2357 {
2358+ if ((long)n < 0)
2359+ return n;
2360+
2361 if (access_ok(VERIFY_WRITE, to, n))
2362 n = __copy_to_user(to, from, n);
2363 return n;
2364diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2365index 5af0ed1..cea83883 100644
2366--- a/arch/arm/include/uapi/asm/ptrace.h
2367+++ b/arch/arm/include/uapi/asm/ptrace.h
2368@@ -92,7 +92,7 @@
2369 * ARMv7 groups of PSR bits
2370 */
2371 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2372-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2373+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2374 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2375 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2376
2377diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2378index a88671c..1cc895e 100644
2379--- a/arch/arm/kernel/armksyms.c
2380+++ b/arch/arm/kernel/armksyms.c
2381@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2382
2383 /* networking */
2384 EXPORT_SYMBOL(csum_partial);
2385-EXPORT_SYMBOL(csum_partial_copy_from_user);
2386+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2387 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2388 EXPORT_SYMBOL(__csum_ipv6_magic);
2389
2390@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2391 #ifdef CONFIG_MMU
2392 EXPORT_SYMBOL(copy_page);
2393
2394-EXPORT_SYMBOL(__copy_from_user);
2395-EXPORT_SYMBOL(__copy_to_user);
2396-EXPORT_SYMBOL(__clear_user);
2397+EXPORT_SYMBOL(___copy_from_user);
2398+EXPORT_SYMBOL(___copy_to_user);
2399+EXPORT_SYMBOL(___clear_user);
2400
2401 EXPORT_SYMBOL(__get_user_1);
2402 EXPORT_SYMBOL(__get_user_2);
2403diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2404index 2f5555d..d493c91 100644
2405--- a/arch/arm/kernel/entry-armv.S
2406+++ b/arch/arm/kernel/entry-armv.S
2407@@ -47,6 +47,87 @@
2408 9997:
2409 .endm
2410
2411+ .macro pax_enter_kernel
2412+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2413+ @ make aligned space for saved DACR
2414+ sub sp, sp, #8
2415+ @ save regs
2416+ stmdb sp!, {r1, r2}
2417+ @ read DACR from cpu_domain into r1
2418+ mov r2, sp
2419+ @ assume 8K pages, since we have to split the immediate in two
2420+ bic r2, r2, #(0x1fc0)
2421+ bic r2, r2, #(0x3f)
2422+ ldr r1, [r2, #TI_CPU_DOMAIN]
2423+ @ store old DACR on stack
2424+ str r1, [sp, #8]
2425+#ifdef CONFIG_PAX_KERNEXEC
2426+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2427+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2428+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2429+#endif
2430+#ifdef CONFIG_PAX_MEMORY_UDEREF
2431+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2432+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2433+#endif
2434+ @ write r1 to current_thread_info()->cpu_domain
2435+ str r1, [r2, #TI_CPU_DOMAIN]
2436+ @ write r1 to DACR
2437+ mcr p15, 0, r1, c3, c0, 0
2438+ @ instruction sync
2439+ instr_sync
2440+ @ restore regs
2441+ ldmia sp!, {r1, r2}
2442+#endif
2443+ .endm
2444+
2445+ .macro pax_open_userland
2446+#ifdef CONFIG_PAX_MEMORY_UDEREF
2447+ @ save regs
2448+ stmdb sp!, {r0, r1}
2449+ @ read DACR from cpu_domain into r1
2450+ mov r0, sp
2451+ @ assume 8K pages, since we have to split the immediate in two
2452+ bic r0, r0, #(0x1fc0)
2453+ bic r0, r0, #(0x3f)
2454+ ldr r1, [r0, #TI_CPU_DOMAIN]
2455+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2456+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2457+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2458+ @ write r1 to current_thread_info()->cpu_domain
2459+ str r1, [r0, #TI_CPU_DOMAIN]
2460+ @ write r1 to DACR
2461+ mcr p15, 0, r1, c3, c0, 0
2462+ @ instruction sync
2463+ instr_sync
2464+ @ restore regs
2465+ ldmia sp!, {r0, r1}
2466+#endif
2467+ .endm
2468+
2469+ .macro pax_close_userland
2470+#ifdef CONFIG_PAX_MEMORY_UDEREF
2471+ @ save regs
2472+ stmdb sp!, {r0, r1}
2473+ @ read DACR from cpu_domain into r1
2474+ mov r0, sp
2475+ @ assume 8K pages, since we have to split the immediate in two
2476+ bic r0, r0, #(0x1fc0)
2477+ bic r0, r0, #(0x3f)
2478+ ldr r1, [r0, #TI_CPU_DOMAIN]
2479+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2480+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2481+ @ write r1 to current_thread_info()->cpu_domain
2482+ str r1, [r0, #TI_CPU_DOMAIN]
2483+ @ write r1 to DACR
2484+ mcr p15, 0, r1, c3, c0, 0
2485+ @ instruction sync
2486+ instr_sync
2487+ @ restore regs
2488+ ldmia sp!, {r0, r1}
2489+#endif
2490+ .endm
2491+
2492 .macro pabt_helper
2493 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2494 #ifdef MULTI_PABORT
2495@@ -89,11 +170,15 @@
2496 * Invalid mode handlers
2497 */
2498 .macro inv_entry, reason
2499+
2500+ pax_enter_kernel
2501+
2502 sub sp, sp, #S_FRAME_SIZE
2503 ARM( stmib sp, {r1 - lr} )
2504 THUMB( stmia sp, {r0 - r12} )
2505 THUMB( str sp, [sp, #S_SP] )
2506 THUMB( str lr, [sp, #S_LR] )
2507+
2508 mov r1, #\reason
2509 .endm
2510
2511@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2512 .macro svc_entry, stack_hole=0, trace=1
2513 UNWIND(.fnstart )
2514 UNWIND(.save {r0 - pc} )
2515+
2516+ pax_enter_kernel
2517+
2518 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2519+
2520 #ifdef CONFIG_THUMB2_KERNEL
2521 SPFIX( str r0, [sp] ) @ temporarily saved
2522 SPFIX( mov r0, sp )
2523@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2524 ldmia r0, {r3 - r5}
2525 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2526 mov r6, #-1 @ "" "" "" ""
2527+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2528+ @ offset sp by 8 as done in pax_enter_kernel
2529+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2530+#else
2531 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2532+#endif
2533 SPFIX( addeq r2, r2, #4 )
2534 str r3, [sp, #-4]! @ save the "real" r0 copied
2535 @ from the exception stack
2536@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
2537 .macro usr_entry, trace=1
2538 UNWIND(.fnstart )
2539 UNWIND(.cantunwind ) @ don't unwind the user space
2540+
2541+ pax_enter_kernel_user
2542+
2543 sub sp, sp, #S_FRAME_SIZE
2544 ARM( stmib sp, {r1 - r12} )
2545 THUMB( stmia sp, {r0 - r12} )
2546@@ -478,7 +575,9 @@ __und_usr:
2547 tst r3, #PSR_T_BIT @ Thumb mode?
2548 bne __und_usr_thumb
2549 sub r4, r2, #4 @ ARM instr at LR - 4
2550+ pax_open_userland
2551 1: ldrt r0, [r4]
2552+ pax_close_userland
2553 ARM_BE8(rev r0, r0) @ little endian instruction
2554
2555 @ r0 = 32-bit ARM instruction which caused the exception
2556@@ -512,11 +611,15 @@ __und_usr_thumb:
2557 */
2558 .arch armv6t2
2559 #endif
2560+ pax_open_userland
2561 2: ldrht r5, [r4]
2562+ pax_close_userland
2563 ARM_BE8(rev16 r5, r5) @ little endian instruction
2564 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2565 blo __und_usr_fault_16 @ 16bit undefined instruction
2566+ pax_open_userland
2567 3: ldrht r0, [r2]
2568+ pax_close_userland
2569 ARM_BE8(rev16 r0, r0) @ little endian instruction
2570 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2571 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2572@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
2573 */
2574 .pushsection .fixup, "ax"
2575 .align 2
2576-4: str r4, [sp, #S_PC] @ retry current instruction
2577+4: pax_close_userland
2578+ str r4, [sp, #S_PC] @ retry current instruction
2579 ret r9
2580 .popsection
2581 .pushsection __ex_table,"a"
2582@@ -766,7 +870,7 @@ ENTRY(__switch_to)
2583 THUMB( str lr, [ip], #4 )
2584 ldr r4, [r2, #TI_TP_VALUE]
2585 ldr r5, [r2, #TI_TP_VALUE + 4]
2586-#ifdef CONFIG_CPU_USE_DOMAINS
2587+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2588 ldr r6, [r2, #TI_CPU_DOMAIN]
2589 #endif
2590 switch_tls r1, r4, r5, r3, r7
2591@@ -775,7 +879,7 @@ ENTRY(__switch_to)
2592 ldr r8, =__stack_chk_guard
2593 ldr r7, [r7, #TSK_STACK_CANARY]
2594 #endif
2595-#ifdef CONFIG_CPU_USE_DOMAINS
2596+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2598 #endif
2599 mov r5, r0
2600diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2601index f8ccc21..83d192f 100644
2602--- a/arch/arm/kernel/entry-common.S
2603+++ b/arch/arm/kernel/entry-common.S
2604@@ -11,18 +11,46 @@
2605 #include <asm/assembler.h>
2606 #include <asm/unistd.h>
2607 #include <asm/ftrace.h>
2608+#include <asm/domain.h>
2609 #include <asm/unwind.h>
2610
2611+#include "entry-header.S"
2612+
2613 #ifdef CONFIG_NEED_RET_TO_USER
2614 #include <mach/entry-macro.S>
2615 #else
2616 .macro arch_ret_to_user, tmp1, tmp2
2617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2618+ @ save regs
2619+ stmdb sp!, {r1, r2}
2620+ @ read DACR from cpu_domain into r1
2621+ mov r2, sp
2622+ @ assume 8K pages, since we have to split the immediate in two
2623+ bic r2, r2, #(0x1fc0)
2624+ bic r2, r2, #(0x3f)
2625+ ldr r1, [r2, #TI_CPU_DOMAIN]
2626+#ifdef CONFIG_PAX_KERNEXEC
2627+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2628+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2629+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2630+#endif
2631+#ifdef CONFIG_PAX_MEMORY_UDEREF
2632+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2633+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2635+#endif
2636+ @ write r1 to current_thread_info()->cpu_domain
2637+ str r1, [r2, #TI_CPU_DOMAIN]
2638+ @ write r1 to DACR
2639+ mcr p15, 0, r1, c3, c0, 0
2640+ @ instruction sync
2641+ instr_sync
2642+ @ restore regs
2643+ ldmia sp!, {r1, r2}
2644+#endif
2645 .endm
2646 #endif
2647
2648-#include "entry-header.S"
2649-
2650-
2651 .align 5
2652 /*
2653 * This is the fast syscall return path. We do as little as
2654@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2655 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2656 #endif
2657
2658+ /*
2659+ * do this here to avoid a performance hit of wrapping the code above
2660+ * that directly dereferences userland to parse the SWI instruction
2661+ */
2662+ pax_enter_kernel_user
2663+
2664 adr tbl, sys_call_table @ load syscall table pointer
2665
2666 #if defined(CONFIG_OABI_COMPAT)
2667diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2668index 1a0045a..9b4f34d 100644
2669--- a/arch/arm/kernel/entry-header.S
2670+++ b/arch/arm/kernel/entry-header.S
2671@@ -196,6 +196,60 @@
2672 msr cpsr_c, \rtemp @ switch back to the SVC mode
2673 .endm
2674
2675+ .macro pax_enter_kernel_user
2676+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2677+ @ save regs
2678+ stmdb sp!, {r0, r1}
2679+ @ read DACR from cpu_domain into r1
2680+ mov r0, sp
2681+ @ assume 8K pages, since we have to split the immediate in two
2682+ bic r0, r0, #(0x1fc0)
2683+ bic r0, r0, #(0x3f)
2684+ ldr r1, [r0, #TI_CPU_DOMAIN]
2685+#ifdef CONFIG_PAX_MEMORY_UDEREF
2686+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2687+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2688+#endif
2689+#ifdef CONFIG_PAX_KERNEXEC
2690+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2691+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2692+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2693+#endif
2694+ @ write r1 to current_thread_info()->cpu_domain
2695+ str r1, [r0, #TI_CPU_DOMAIN]
2696+ @ write r1 to DACR
2697+ mcr p15, 0, r1, c3, c0, 0
2698+ @ instruction sync
2699+ instr_sync
2700+ @ restore regs
2701+ ldmia sp!, {r0, r1}
2702+#endif
2703+ .endm
2704+
2705+ .macro pax_exit_kernel
2706+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2707+ @ save regs
2708+ stmdb sp!, {r0, r1}
2709+ @ read old DACR from stack into r1
2710+ ldr r1, [sp, #(8 + S_SP)]
2711+ sub r1, r1, #8
2712+ ldr r1, [r1]
2713+
2714+ @ write r1 to current_thread_info()->cpu_domain
2715+ mov r0, sp
2716+ @ assume 8K pages, since we have to split the immediate in two
2717+ bic r0, r0, #(0x1fc0)
2718+ bic r0, r0, #(0x3f)
2719+ str r1, [r0, #TI_CPU_DOMAIN]
2720+ @ write r1 to DACR
2721+ mcr p15, 0, r1, c3, c0, 0
2722+ @ instruction sync
2723+ instr_sync
2724+ @ restore regs
2725+ ldmia sp!, {r0, r1}
2726+#endif
2727+ .endm
2728+
2729 #ifndef CONFIG_THUMB2_KERNEL
2730 .macro svc_exit, rpsr, irq = 0
2731 .if \irq != 0
2732@@ -215,6 +269,9 @@
2733 blne trace_hardirqs_off
2734 #endif
2735 .endif
2736+
2737+ pax_exit_kernel
2738+
2739 msr spsr_cxsf, \rpsr
2740 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2741 @ We must avoid clrex due to Cortex-A15 erratum #830321
2742@@ -291,6 +348,9 @@
2743 blne trace_hardirqs_off
2744 #endif
2745 .endif
2746+
2747+ pax_exit_kernel
2748+
2749 ldr lr, [sp, #S_SP] @ top of the stack
2750 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2751
2752diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2753index 059c3da..8e45cfc 100644
2754--- a/arch/arm/kernel/fiq.c
2755+++ b/arch/arm/kernel/fiq.c
2756@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2757 void *base = vectors_page;
2758 unsigned offset = FIQ_OFFSET;
2759
2760+ pax_open_kernel();
2761 memcpy(base + offset, start, length);
2762+ pax_close_kernel();
2763+
2764 if (!cache_is_vipt_nonaliasing())
2765 flush_icache_range((unsigned long)base + offset, offset +
2766 length);
2767diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2768index 664eee8..f470938 100644
2769--- a/arch/arm/kernel/head.S
2770+++ b/arch/arm/kernel/head.S
2771@@ -437,7 +437,7 @@ __enable_mmu:
2772 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2773 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2774 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2775- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2776+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2777 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2778 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2779 #endif
2780diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2781index bea7db9..a210d10 100644
2782--- a/arch/arm/kernel/module.c
2783+++ b/arch/arm/kernel/module.c
2784@@ -38,12 +38,39 @@
2785 #endif
2786
2787 #ifdef CONFIG_MMU
2788-void *module_alloc(unsigned long size)
2789+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2790 {
2791+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2792+ return NULL;
2793 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2794- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2795+ GFP_KERNEL, prot, NUMA_NO_NODE,
2796 __builtin_return_address(0));
2797 }
2798+
2799+void *module_alloc(unsigned long size)
2800+{
2801+
2802+#ifdef CONFIG_PAX_KERNEXEC
2803+ return __module_alloc(size, PAGE_KERNEL);
2804+#else
2805+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2806+#endif
2807+
2808+}
2809+
2810+#ifdef CONFIG_PAX_KERNEXEC
2811+void module_memfree_exec(void *module_region)
2812+{
2813+ module_memfree(module_region);
2814+}
2815+EXPORT_SYMBOL(module_memfree_exec);
2816+
2817+void *module_alloc_exec(unsigned long size)
2818+{
2819+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2820+}
2821+EXPORT_SYMBOL(module_alloc_exec);
2822+#endif
2823 #endif
2824
2825 int
2826diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2827index 5038960..4aa71d8 100644
2828--- a/arch/arm/kernel/patch.c
2829+++ b/arch/arm/kernel/patch.c
2830@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2831 else
2832 __acquire(&patch_lock);
2833
2834+ pax_open_kernel();
2835 if (thumb2 && __opcode_is_thumb16(insn)) {
2836 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2837 size = sizeof(u16);
2838@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2839 *(u32 *)waddr = insn;
2840 size = sizeof(u32);
2841 }
2842+ pax_close_kernel();
2843
2844 if (waddr != addr) {
2845 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2846diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2847index fdfa3a7..5d208b8 100644
2848--- a/arch/arm/kernel/process.c
2849+++ b/arch/arm/kernel/process.c
2850@@ -207,6 +207,7 @@ void machine_power_off(void)
2851
2852 if (pm_power_off)
2853 pm_power_off();
2854+ BUG();
2855 }
2856
2857 /*
2858@@ -220,7 +221,7 @@ void machine_power_off(void)
2859 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2860 * to use. Implementing such co-ordination would be essentially impossible.
2861 */
2862-void machine_restart(char *cmd)
2863+__noreturn void machine_restart(char *cmd)
2864 {
2865 local_irq_disable();
2866 smp_send_stop();
2867@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2868
2869 show_regs_print_info(KERN_DEFAULT);
2870
2871- print_symbol("PC is at %s\n", instruction_pointer(regs));
2872- print_symbol("LR is at %s\n", regs->ARM_lr);
2873+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2874+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2875 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2876 "sp : %08lx ip : %08lx fp : %08lx\n",
2877 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2878@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2879 return 0;
2880 }
2881
2882-unsigned long arch_randomize_brk(struct mm_struct *mm)
2883-{
2884- unsigned long range_end = mm->brk + 0x02000000;
2885- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2886-}
2887-
2888 #ifdef CONFIG_MMU
2889 #ifdef CONFIG_KUSER_HELPERS
2890 /*
2891@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2892
2893 static int __init gate_vma_init(void)
2894 {
2895- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2896+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2897 return 0;
2898 }
2899 arch_initcall(gate_vma_init);
2900@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2901 return is_gate_vma(vma) ? "[vectors]" : NULL;
2902 }
2903
2904-/* If possible, provide a placement hint at a random offset from the
2905- * stack for the signal page.
2906- */
2907-static unsigned long sigpage_addr(const struct mm_struct *mm,
2908- unsigned int npages)
2909-{
2910- unsigned long offset;
2911- unsigned long first;
2912- unsigned long last;
2913- unsigned long addr;
2914- unsigned int slots;
2915-
2916- first = PAGE_ALIGN(mm->start_stack);
2917-
2918- last = TASK_SIZE - (npages << PAGE_SHIFT);
2919-
2920- /* No room after stack? */
2921- if (first > last)
2922- return 0;
2923-
2924- /* Just enough room? */
2925- if (first == last)
2926- return first;
2927-
2928- slots = ((last - first) >> PAGE_SHIFT) + 1;
2929-
2930- offset = get_random_int() % slots;
2931-
2932- addr = first + (offset << PAGE_SHIFT);
2933-
2934- return addr;
2935-}
2936-
2937-static struct page *signal_page;
2938-extern struct page *get_signal_page(void);
2939-
2940-static const struct vm_special_mapping sigpage_mapping = {
2941- .name = "[sigpage]",
2942- .pages = &signal_page,
2943-};
2944-
2945 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2946 {
2947 struct mm_struct *mm = current->mm;
2948- struct vm_area_struct *vma;
2949- unsigned long addr;
2950- unsigned long hint;
2951- int ret = 0;
2952-
2953- if (!signal_page)
2954- signal_page = get_signal_page();
2955- if (!signal_page)
2956- return -ENOMEM;
2957
2958 down_write(&mm->mmap_sem);
2959- hint = sigpage_addr(mm, 1);
2960- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2961- if (IS_ERR_VALUE(addr)) {
2962- ret = addr;
2963- goto up_fail;
2964- }
2965-
2966- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2967- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2968- &sigpage_mapping);
2969-
2970- if (IS_ERR(vma)) {
2971- ret = PTR_ERR(vma);
2972- goto up_fail;
2973- }
2974-
2975- mm->context.sigpage = addr;
2976-
2977- up_fail:
2978+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2979 up_write(&mm->mmap_sem);
2980- return ret;
2981+ return 0;
2982 }
2983 #endif
2984diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2985index f73891b..cf3004e 100644
2986--- a/arch/arm/kernel/psci.c
2987+++ b/arch/arm/kernel/psci.c
2988@@ -28,7 +28,7 @@
2989 #include <asm/psci.h>
2990 #include <asm/system_misc.h>
2991
2992-struct psci_operations psci_ops;
2993+struct psci_operations psci_ops __read_only;
2994
2995 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2996 typedef int (*psci_initcall_t)(const struct device_node *);
2997diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2998index ef9119f..31995a3 100644
2999--- a/arch/arm/kernel/ptrace.c
3000+++ b/arch/arm/kernel/ptrace.c
3001@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3002 regs->ARM_ip = ip;
3003 }
3004
3005+#ifdef CONFIG_GRKERNSEC_SETXID
3006+extern void gr_delayed_cred_worker(void);
3007+#endif
3008+
3009 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3010 {
3011 current_thread_info()->syscall = scno;
3012
3013+#ifdef CONFIG_GRKERNSEC_SETXID
3014+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3015+ gr_delayed_cred_worker();
3016+#endif
3017+
3018 /* Do the secure computing check first; failures should be fast. */
3019 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3020 if (secure_computing() == -1)
3021diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3022index e55408e..14d9998 100644
3023--- a/arch/arm/kernel/setup.c
3024+++ b/arch/arm/kernel/setup.c
3025@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3026 unsigned int elf_hwcap2 __read_mostly;
3027 EXPORT_SYMBOL(elf_hwcap2);
3028
3029+pteval_t __supported_pte_mask __read_only;
3030+pmdval_t __supported_pmd_mask __read_only;
3031
3032 #ifdef MULTI_CPU
3033-struct processor processor __read_mostly;
3034+struct processor processor __read_only;
3035 #endif
3036 #ifdef MULTI_TLB
3037-struct cpu_tlb_fns cpu_tlb __read_mostly;
3038+struct cpu_tlb_fns cpu_tlb __read_only;
3039 #endif
3040 #ifdef MULTI_USER
3041-struct cpu_user_fns cpu_user __read_mostly;
3042+struct cpu_user_fns cpu_user __read_only;
3043 #endif
3044 #ifdef MULTI_CACHE
3045-struct cpu_cache_fns cpu_cache __read_mostly;
3046+struct cpu_cache_fns cpu_cache __read_only;
3047 #endif
3048 #ifdef CONFIG_OUTER_CACHE
3049-struct outer_cache_fns outer_cache __read_mostly;
3050+struct outer_cache_fns outer_cache __read_only;
3051 EXPORT_SYMBOL(outer_cache);
3052 #endif
3053
3054@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3055 asm("mrc p15, 0, %0, c0, c1, 4"
3056 : "=r" (mmfr0));
3057 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3058- (mmfr0 & 0x000000f0) >= 0x00000030)
3059+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3060 cpu_arch = CPU_ARCH_ARMv7;
3061- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3062+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3063+ __supported_pte_mask |= L_PTE_PXN;
3064+ __supported_pmd_mask |= PMD_PXNTABLE;
3065+ }
3066+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3067 (mmfr0 & 0x000000f0) == 0x00000020)
3068 cpu_arch = CPU_ARCH_ARMv6;
3069 else
3070diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3071index 8aa6f1b..0899e08 100644
3072--- a/arch/arm/kernel/signal.c
3073+++ b/arch/arm/kernel/signal.c
3074@@ -24,8 +24,6 @@
3075
3076 extern const unsigned long sigreturn_codes[7];
3077
3078-static unsigned long signal_return_offset;
3079-
3080 #ifdef CONFIG_CRUNCH
3081 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3082 {
3083@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3084 * except when the MPU has protected the vectors
3085 * page from PL0
3086 */
3087- retcode = mm->context.sigpage + signal_return_offset +
3088- (idx << 2) + thumb;
3089+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3090 } else
3091 #endif
3092 {
3093@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3094 } while (thread_flags & _TIF_WORK_MASK);
3095 return 0;
3096 }
3097-
3098-struct page *get_signal_page(void)
3099-{
3100- unsigned long ptr;
3101- unsigned offset;
3102- struct page *page;
3103- void *addr;
3104-
3105- page = alloc_pages(GFP_KERNEL, 0);
3106-
3107- if (!page)
3108- return NULL;
3109-
3110- addr = page_address(page);
3111-
3112- /* Give the signal return code some randomness */
3113- offset = 0x200 + (get_random_int() & 0x7fc);
3114- signal_return_offset = offset;
3115-
3116- /*
3117- * Copy signal return handlers into the vector page, and
3118- * set sigreturn to be a pointer to these.
3119- */
3120- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3121-
3122- ptr = (unsigned long)addr + offset;
3123- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3124-
3125- return page;
3126-}
3127diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3128index 86ef244..c518451 100644
3129--- a/arch/arm/kernel/smp.c
3130+++ b/arch/arm/kernel/smp.c
3131@@ -76,7 +76,7 @@ enum ipi_msg_type {
3132
3133 static DECLARE_COMPLETION(cpu_running);
3134
3135-static struct smp_operations smp_ops;
3136+static struct smp_operations smp_ops __read_only;
3137
3138 void __init smp_set_ops(struct smp_operations *ops)
3139 {
3140diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3141index 7a3be1d..b00c7de 100644
3142--- a/arch/arm/kernel/tcm.c
3143+++ b/arch/arm/kernel/tcm.c
3144@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3145 .virtual = ITCM_OFFSET,
3146 .pfn = __phys_to_pfn(ITCM_OFFSET),
3147 .length = 0,
3148- .type = MT_MEMORY_RWX_ITCM,
3149+ .type = MT_MEMORY_RX_ITCM,
3150 }
3151 };
3152
3153@@ -267,7 +267,9 @@ no_dtcm:
3154 start = &__sitcm_text;
3155 end = &__eitcm_text;
3156 ram = &__itcm_start;
3157+ pax_open_kernel();
3158 memcpy(start, ram, itcm_code_sz);
3159+ pax_close_kernel();
3160 pr_debug("CPU ITCM: copied code from %p - %p\n",
3161 start, end);
3162 itcm_present = true;
3163diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3164index 788e23f..6fa06a1 100644
3165--- a/arch/arm/kernel/traps.c
3166+++ b/arch/arm/kernel/traps.c
3167@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3168 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3169 {
3170 #ifdef CONFIG_KALLSYMS
3171- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3172+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3173 #else
3174 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3175 #endif
3176@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3177 static int die_owner = -1;
3178 static unsigned int die_nest_count;
3179
3180+extern void gr_handle_kernel_exploit(void);
3181+
3182 static unsigned long oops_begin(void)
3183 {
3184 int cpu;
3185@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3186 panic("Fatal exception in interrupt");
3187 if (panic_on_oops)
3188 panic("Fatal exception");
3189+
3190+ gr_handle_kernel_exploit();
3191+
3192 if (signr)
3193 do_exit(signr);
3194 }
3195@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3196 kuser_init(vectors_base);
3197
3198 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3199- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3200+
3201+#ifndef CONFIG_PAX_MEMORY_UDEREF
3202+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3203+#endif
3204+
3205 #else /* ifndef CONFIG_CPU_V7M */
3206 /*
3207 * on V7-M there is no need to copy the vector table to a dedicated
3208diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3209index b31aa73..cc4b7a1 100644
3210--- a/arch/arm/kernel/vmlinux.lds.S
3211+++ b/arch/arm/kernel/vmlinux.lds.S
3212@@ -37,7 +37,7 @@
3213 #endif
3214
3215 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3216- defined(CONFIG_GENERIC_BUG)
3217+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3218 #define ARM_EXIT_KEEP(x) x
3219 #define ARM_EXIT_DISCARD(x)
3220 #else
3221@@ -123,6 +123,8 @@ SECTIONS
3222 #ifdef CONFIG_DEBUG_RODATA
3223 . = ALIGN(1<<SECTION_SHIFT);
3224 #endif
3225+ _etext = .; /* End of text section */
3226+
3227 RO_DATA(PAGE_SIZE)
3228
3229 . = ALIGN(4);
3230@@ -153,8 +155,6 @@ SECTIONS
3231
3232 NOTES
3233
3234- _etext = .; /* End of text and rodata section */
3235-
3236 #ifndef CONFIG_XIP_KERNEL
3237 # ifdef CONFIG_ARM_KERNMEM_PERMS
3238 . = ALIGN(1<<SECTION_SHIFT);
3239diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3240index 0b0d58a..988cb45 100644
3241--- a/arch/arm/kvm/arm.c
3242+++ b/arch/arm/kvm/arm.c
3243@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3244 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3245
3246 /* The VMID used in the VTTBR */
3247-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3248+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3249 static u8 kvm_next_vmid;
3250 static DEFINE_SPINLOCK(kvm_vmid_lock);
3251
3252@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
3253 */
3254 static bool need_new_vmid_gen(struct kvm *kvm)
3255 {
3256- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3257+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3258 }
3259
3260 /**
3261@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
3262
3263 /* First user of a new VMID generation? */
3264 if (unlikely(kvm_next_vmid == 0)) {
3265- atomic64_inc(&kvm_vmid_gen);
3266+ atomic64_inc_unchecked(&kvm_vmid_gen);
3267 kvm_next_vmid = 1;
3268
3269 /*
3270@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
3271 kvm_call_hyp(__kvm_flush_vm_context);
3272 }
3273
3274- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3275+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3276 kvm->arch.vmid = kvm_next_vmid;
3277 kvm_next_vmid++;
3278
3279@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
3280 /**
3281 * Initialize Hyp-mode and memory mappings on all CPUs.
3282 */
3283-int kvm_arch_init(void *opaque)
3284+int kvm_arch_init(const void *opaque)
3285 {
3286 int err;
3287 int ret, cpu;
3288diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3289index 14a0d98..7771a7d 100644
3290--- a/arch/arm/lib/clear_user.S
3291+++ b/arch/arm/lib/clear_user.S
3292@@ -12,14 +12,14 @@
3293
3294 .text
3295
3296-/* Prototype: int __clear_user(void *addr, size_t sz)
3297+/* Prototype: int ___clear_user(void *addr, size_t sz)
3298 * Purpose : clear some user memory
3299 * Params : addr - user memory address to clear
3300 * : sz - number of bytes to clear
3301 * Returns : number of bytes NOT cleared
3302 */
3303 ENTRY(__clear_user_std)
3304-WEAK(__clear_user)
3305+WEAK(___clear_user)
3306 stmfd sp!, {r1, lr}
3307 mov r2, #0
3308 cmp r1, #4
3309@@ -44,7 +44,7 @@ WEAK(__clear_user)
3310 USER( strnebt r2, [r0])
3311 mov r0, #0
3312 ldmfd sp!, {r1, pc}
3313-ENDPROC(__clear_user)
3314+ENDPROC(___clear_user)
3315 ENDPROC(__clear_user_std)
3316
3317 .pushsection .fixup,"ax"
3318diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3319index 7a235b9..73a0556 100644
3320--- a/arch/arm/lib/copy_from_user.S
3321+++ b/arch/arm/lib/copy_from_user.S
3322@@ -17,7 +17,7 @@
3323 /*
3324 * Prototype:
3325 *
3326- * size_t __copy_from_user(void *to, const void *from, size_t n)
3327+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3328 *
3329 * Purpose:
3330 *
3331@@ -89,11 +89,11 @@
3332
3333 .text
3334
3335-ENTRY(__copy_from_user)
3336+ENTRY(___copy_from_user)
3337
3338 #include "copy_template.S"
3339
3340-ENDPROC(__copy_from_user)
3341+ENDPROC(___copy_from_user)
3342
3343 .pushsection .fixup,"ax"
3344 .align 0
3345diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3346index 6ee2f67..d1cce76 100644
3347--- a/arch/arm/lib/copy_page.S
3348+++ b/arch/arm/lib/copy_page.S
3349@@ -10,6 +10,7 @@
3350 * ASM optimised string functions
3351 */
3352 #include <linux/linkage.h>
3353+#include <linux/const.h>
3354 #include <asm/assembler.h>
3355 #include <asm/asm-offsets.h>
3356 #include <asm/cache.h>
3357diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3358index a9d3db1..164b089 100644
3359--- a/arch/arm/lib/copy_to_user.S
3360+++ b/arch/arm/lib/copy_to_user.S
3361@@ -17,7 +17,7 @@
3362 /*
3363 * Prototype:
3364 *
3365- * size_t __copy_to_user(void *to, const void *from, size_t n)
3366+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3367 *
3368 * Purpose:
3369 *
3370@@ -93,11 +93,11 @@
3371 .text
3372
3373 ENTRY(__copy_to_user_std)
3374-WEAK(__copy_to_user)
3375+WEAK(___copy_to_user)
3376
3377 #include "copy_template.S"
3378
3379-ENDPROC(__copy_to_user)
3380+ENDPROC(___copy_to_user)
3381 ENDPROC(__copy_to_user_std)
3382
3383 .pushsection .fixup,"ax"
3384diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3385index 7d08b43..f7ca7ea 100644
3386--- a/arch/arm/lib/csumpartialcopyuser.S
3387+++ b/arch/arm/lib/csumpartialcopyuser.S
3388@@ -57,8 +57,8 @@
3389 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3390 */
3391
3392-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3393-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3394+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3395+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3396
3397 #include "csumpartialcopygeneric.S"
3398
3399diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3400index 312d43e..21d2322 100644
3401--- a/arch/arm/lib/delay.c
3402+++ b/arch/arm/lib/delay.c
3403@@ -29,7 +29,7 @@
3404 /*
3405 * Default to the loop-based delay implementation.
3406 */
3407-struct arm_delay_ops arm_delay_ops = {
3408+struct arm_delay_ops arm_delay_ops __read_only = {
3409 .delay = __loop_delay,
3410 .const_udelay = __loop_const_udelay,
3411 .udelay = __loop_udelay,
3412diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3413index 3e58d71..029817c 100644
3414--- a/arch/arm/lib/uaccess_with_memcpy.c
3415+++ b/arch/arm/lib/uaccess_with_memcpy.c
3416@@ -136,7 +136,7 @@ out:
3417 }
3418
3419 unsigned long
3420-__copy_to_user(void __user *to, const void *from, unsigned long n)
3421+___copy_to_user(void __user *to, const void *from, unsigned long n)
3422 {
3423 /*
3424 * This test is stubbed out of the main function above to keep
3425@@ -190,7 +190,7 @@ out:
3426 return n;
3427 }
3428
3429-unsigned long __clear_user(void __user *addr, unsigned long n)
3430+unsigned long ___clear_user(void __user *addr, unsigned long n)
3431 {
3432 /* See rational for this in __copy_to_user() above. */
3433 if (n < 64)
3434diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3435index ce25e85..3dd7850 100644
3436--- a/arch/arm/mach-at91/setup.c
3437+++ b/arch/arm/mach-at91/setup.c
3438@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3439
3440 desc->pfn = __phys_to_pfn(base);
3441 desc->length = length;
3442- desc->type = MT_MEMORY_RWX_NONCACHED;
3443+ desc->type = MT_MEMORY_RW_NONCACHED;
3444
3445 pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3446 base, length, desc->virtual);
3447diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3448index f8e7dcd..17ee921 100644
3449--- a/arch/arm/mach-exynos/suspend.c
3450+++ b/arch/arm/mach-exynos/suspend.c
3451@@ -18,6 +18,7 @@
3452 #include <linux/syscore_ops.h>
3453 #include <linux/cpu_pm.h>
3454 #include <linux/io.h>
3455+#include <linux/irq.h>
3456 #include <linux/irqchip/arm-gic.h>
3457 #include <linux/err.h>
3458 #include <linux/regulator/machine.h>
3459@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
3460 tmp |= pm_data->wake_disable_mask;
3461 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3462
3463- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3464- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3465+ pax_open_kernel();
3466+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3467+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3468+ pax_close_kernel();
3469
3470 register_syscore_ops(&exynos_pm_syscore_ops);
3471 suspend_set_ops(&exynos_suspend_ops);
3472diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3473index 7f352de..6dc0929 100644
3474--- a/arch/arm/mach-keystone/keystone.c
3475+++ b/arch/arm/mach-keystone/keystone.c
3476@@ -27,7 +27,7 @@
3477
3478 #include "keystone.h"
3479
3480-static struct notifier_block platform_nb;
3481+static notifier_block_no_const platform_nb;
3482 static unsigned long keystone_dma_pfn_offset __read_mostly;
3483
3484 static int keystone_platform_notifier(struct notifier_block *nb,
3485diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3486index ccef880..5dfad80 100644
3487--- a/arch/arm/mach-mvebu/coherency.c
3488+++ b/arch/arm/mach-mvebu/coherency.c
3489@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3490
3491 /*
3492 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3493- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3494+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3495 * is needed as a workaround for a deadlock issue between the PCIe
3496 * interface and the cache controller.
3497 */
3498@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3499 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3500
3501 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3502- mtype = MT_UNCACHED;
3503+ mtype = MT_UNCACHED_RW;
3504
3505 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3506 }
3507diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3508index b6443a4..20a0b74 100644
3509--- a/arch/arm/mach-omap2/board-n8x0.c
3510+++ b/arch/arm/mach-omap2/board-n8x0.c
3511@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3512 }
3513 #endif
3514
3515-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3516+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3517 .late_init = n8x0_menelaus_late_init,
3518 };
3519
3520diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3521index 79f49d9..70bf184 100644
3522--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3523+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3525 void (*resume)(void);
3526 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3527 void (*hotplug_restart)(void);
3528-};
3529+} __no_const;
3530
3531 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3532 static struct powerdomain *mpuss_pd;
3533@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3534 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3535 {}
3536
3537-struct cpu_pm_ops omap_pm_ops = {
3538+static struct cpu_pm_ops omap_pm_ops __read_only = {
3539 .finish_suspend = default_finish_suspend,
3540 .resume = dummy_cpu_resume,
3541 .scu_prepare = dummy_scu_prepare,
3542diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3543index 5305ec7..6d74045 100644
3544--- a/arch/arm/mach-omap2/omap-smp.c
3545+++ b/arch/arm/mach-omap2/omap-smp.c
3546@@ -19,6 +19,7 @@
3547 #include <linux/device.h>
3548 #include <linux/smp.h>
3549 #include <linux/io.h>
3550+#include <linux/irq.h>
3551 #include <linux/irqchip/arm-gic.h>
3552
3553 #include <asm/smp_scu.h>
3554diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3555index f961c46..4a453dc 100644
3556--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3557+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3558@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3559 return NOTIFY_OK;
3560 }
3561
3562-static struct notifier_block __refdata irq_hotplug_notifier = {
3563+static struct notifier_block irq_hotplug_notifier = {
3564 .notifier_call = irq_cpu_hotplug_notify,
3565 };
3566
3567diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3568index be9541e..821805f 100644
3569--- a/arch/arm/mach-omap2/omap_device.c
3570+++ b/arch/arm/mach-omap2/omap_device.c
3571@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3572 struct platform_device __init *omap_device_build(const char *pdev_name,
3573 int pdev_id,
3574 struct omap_hwmod *oh,
3575- void *pdata, int pdata_len)
3576+ const void *pdata, int pdata_len)
3577 {
3578 struct omap_hwmod *ohs[] = { oh };
3579
3580@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3581 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3582 int pdev_id,
3583 struct omap_hwmod **ohs,
3584- int oh_cnt, void *pdata,
3585+ int oh_cnt, const void *pdata,
3586 int pdata_len)
3587 {
3588 int ret = -ENOMEM;
3589diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3590index 78c02b3..c94109a 100644
3591--- a/arch/arm/mach-omap2/omap_device.h
3592+++ b/arch/arm/mach-omap2/omap_device.h
3593@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3594 /* Core code interface */
3595
3596 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3597- struct omap_hwmod *oh, void *pdata,
3598+ struct omap_hwmod *oh, const void *pdata,
3599 int pdata_len);
3600
3601 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3602 struct omap_hwmod **oh, int oh_cnt,
3603- void *pdata, int pdata_len);
3604+ const void *pdata, int pdata_len);
3605
3606 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3607 struct omap_hwmod **ohs, int oh_cnt);
3608diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3609index 9025fff..3555702 100644
3610--- a/arch/arm/mach-omap2/omap_hwmod.c
3611+++ b/arch/arm/mach-omap2/omap_hwmod.c
3612@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3613 int (*init_clkdm)(struct omap_hwmod *oh);
3614 void (*update_context_lost)(struct omap_hwmod *oh);
3615 int (*get_context_lost)(struct omap_hwmod *oh);
3616-};
3617+} __no_const;
3618
3619 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3620-static struct omap_hwmod_soc_ops soc_ops;
3621+static struct omap_hwmod_soc_ops soc_ops __read_only;
3622
3623 /* omap_hwmod_list contains all registered struct omap_hwmods */
3624 static LIST_HEAD(omap_hwmod_list);
3625diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3626index 95fee54..cfa9cf1 100644
3627--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3628+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3629@@ -10,6 +10,7 @@
3630
3631 #include <linux/kernel.h>
3632 #include <linux/init.h>
3633+#include <asm/pgtable.h>
3634
3635 #include "powerdomain.h"
3636
3637@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3638
3639 void __init am43xx_powerdomains_init(void)
3640 {
3641- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3642+ pax_open_kernel();
3643+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3644+ pax_close_kernel();
3645 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3646 pwrdm_register_pwrdms(powerdomains_am43xx);
3647 pwrdm_complete_init();
3648diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3649index ff0a68c..b312aa0 100644
3650--- a/arch/arm/mach-omap2/wd_timer.c
3651+++ b/arch/arm/mach-omap2/wd_timer.c
3652@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3653 struct omap_hwmod *oh;
3654 char *oh_name = "wd_timer2";
3655 char *dev_name = "omap_wdt";
3656- struct omap_wd_timer_platform_data pdata;
3657+ static struct omap_wd_timer_platform_data pdata = {
3658+ .read_reset_sources = prm_read_reset_sources
3659+ };
3660
3661 if (!cpu_class_is_omap2() || of_have_populated_dt())
3662 return 0;
3663@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3664 return -EINVAL;
3665 }
3666
3667- pdata.read_reset_sources = prm_read_reset_sources;
3668-
3669 pdev = omap_device_build(dev_name, id, oh, &pdata,
3670 sizeof(struct omap_wd_timer_platform_data));
3671 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3672diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3673index 4f25a7c..a81be85 100644
3674--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3675+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3676@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3677 bool entered_lp2 = false;
3678
3679 if (tegra_pending_sgi())
3680- ACCESS_ONCE(abort_flag) = true;
3681+ ACCESS_ONCE_RW(abort_flag) = true;
3682
3683 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3684
3685diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3686index ab95f53..4b977a7 100644
3687--- a/arch/arm/mach-tegra/irq.c
3688+++ b/arch/arm/mach-tegra/irq.c
3689@@ -20,6 +20,7 @@
3690 #include <linux/cpu_pm.h>
3691 #include <linux/interrupt.h>
3692 #include <linux/io.h>
3693+#include <linux/irq.h>
3694 #include <linux/irqchip/arm-gic.h>
3695 #include <linux/irq.h>
3696 #include <linux/kernel.h>
3697diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3698index 2cb587b..6ddfebf 100644
3699--- a/arch/arm/mach-ux500/pm.c
3700+++ b/arch/arm/mach-ux500/pm.c
3701@@ -10,6 +10,7 @@
3702 */
3703
3704 #include <linux/kernel.h>
3705+#include <linux/irq.h>
3706 #include <linux/irqchip/arm-gic.h>
3707 #include <linux/delay.h>
3708 #include <linux/io.h>
3709diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3710index 2dea8b5..6499da2 100644
3711--- a/arch/arm/mach-ux500/setup.h
3712+++ b/arch/arm/mach-ux500/setup.h
3713@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3714 .type = MT_DEVICE, \
3715 }
3716
3717-#define __MEM_DEV_DESC(x, sz) { \
3718- .virtual = IO_ADDRESS(x), \
3719- .pfn = __phys_to_pfn(x), \
3720- .length = sz, \
3721- .type = MT_MEMORY_RWX, \
3722-}
3723-
3724 extern struct smp_operations ux500_smp_ops;
3725 extern void ux500_cpu_die(unsigned int cpu);
3726
3727diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3728index 52d768f..5f93180 100644
3729--- a/arch/arm/mach-zynq/platsmp.c
3730+++ b/arch/arm/mach-zynq/platsmp.c
3731@@ -24,6 +24,7 @@
3732 #include <linux/io.h>
3733 #include <asm/cacheflush.h>
3734 #include <asm/smp_scu.h>
3735+#include <linux/irq.h>
3736 #include <linux/irqchip/arm-gic.h>
3737 #include "common.h"
3738
3739diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3740index c43c714..4f8f7b9 100644
3741--- a/arch/arm/mm/Kconfig
3742+++ b/arch/arm/mm/Kconfig
3743@@ -446,6 +446,7 @@ config CPU_32v5
3744
3745 config CPU_32v6
3746 bool
3747+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3748 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3749
3750 config CPU_32v6K
3751@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3752
3753 config CPU_USE_DOMAINS
3754 bool
3755+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3756 help
3757 This option enables or disables the use of domain switching
3758 via the set_fs() function.
3759@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3760
3761 config KUSER_HELPERS
3762 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3763- depends on MMU
3764+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3765 default y
3766 help
3767 Warning: disabling this option may break user programs.
3768@@ -812,7 +814,7 @@ config KUSER_HELPERS
3769 See Documentation/arm/kernel_user_helpers.txt for details.
3770
3771 However, the fixed address nature of these helpers can be used
3772- by ROP (return orientated programming) authors when creating
3773+ by ROP (Return Oriented Programming) authors when creating
3774 exploits.
3775
3776 If all of the binaries and libraries which run on your platform
3777diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3778index 2c0c541..4585df9 100644
3779--- a/arch/arm/mm/alignment.c
3780+++ b/arch/arm/mm/alignment.c
3781@@ -216,10 +216,12 @@ union offset_union {
3782 #define __get16_unaligned_check(ins,val,addr) \
3783 do { \
3784 unsigned int err = 0, v, a = addr; \
3785+ pax_open_userland(); \
3786 __get8_unaligned_check(ins,v,a,err); \
3787 val = v << ((BE) ? 8 : 0); \
3788 __get8_unaligned_check(ins,v,a,err); \
3789 val |= v << ((BE) ? 0 : 8); \
3790+ pax_close_userland(); \
3791 if (err) \
3792 goto fault; \
3793 } while (0)
3794@@ -233,6 +235,7 @@ union offset_union {
3795 #define __get32_unaligned_check(ins,val,addr) \
3796 do { \
3797 unsigned int err = 0, v, a = addr; \
3798+ pax_open_userland(); \
3799 __get8_unaligned_check(ins,v,a,err); \
3800 val = v << ((BE) ? 24 : 0); \
3801 __get8_unaligned_check(ins,v,a,err); \
3802@@ -241,6 +244,7 @@ union offset_union {
3803 val |= v << ((BE) ? 8 : 16); \
3804 __get8_unaligned_check(ins,v,a,err); \
3805 val |= v << ((BE) ? 0 : 24); \
3806+ pax_close_userland(); \
3807 if (err) \
3808 goto fault; \
3809 } while (0)
3810@@ -254,6 +258,7 @@ union offset_union {
3811 #define __put16_unaligned_check(ins,val,addr) \
3812 do { \
3813 unsigned int err = 0, v = val, a = addr; \
3814+ pax_open_userland(); \
3815 __asm__( FIRST_BYTE_16 \
3816 ARM( "1: "ins" %1, [%2], #1\n" ) \
3817 THUMB( "1: "ins" %1, [%2]\n" ) \
3818@@ -273,6 +278,7 @@ union offset_union {
3819 " .popsection\n" \
3820 : "=r" (err), "=&r" (v), "=&r" (a) \
3821 : "0" (err), "1" (v), "2" (a)); \
3822+ pax_close_userland(); \
3823 if (err) \
3824 goto fault; \
3825 } while (0)
3826@@ -286,6 +292,7 @@ union offset_union {
3827 #define __put32_unaligned_check(ins,val,addr) \
3828 do { \
3829 unsigned int err = 0, v = val, a = addr; \
3830+ pax_open_userland(); \
3831 __asm__( FIRST_BYTE_32 \
3832 ARM( "1: "ins" %1, [%2], #1\n" ) \
3833 THUMB( "1: "ins" %1, [%2]\n" ) \
3834@@ -315,6 +322,7 @@ union offset_union {
3835 " .popsection\n" \
3836 : "=r" (err), "=&r" (v), "=&r" (a) \
3837 : "0" (err), "1" (v), "2" (a)); \
3838+ pax_close_userland(); \
3839 if (err) \
3840 goto fault; \
3841 } while (0)
3842diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3843index 5e65ca8..879e7b3 100644
3844--- a/arch/arm/mm/cache-l2x0.c
3845+++ b/arch/arm/mm/cache-l2x0.c
3846@@ -42,7 +42,7 @@ struct l2c_init_data {
3847 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3848 void (*save)(void __iomem *);
3849 struct outer_cache_fns outer_cache;
3850-};
3851+} __do_const;
3852
3853 #define CACHE_LINE_SIZE 32
3854
3855diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3856index 845769e..4278fd7 100644
3857--- a/arch/arm/mm/context.c
3858+++ b/arch/arm/mm/context.c
3859@@ -43,7 +43,7 @@
3860 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3861
3862 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3863-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3864+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3865 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3866
3867 static DEFINE_PER_CPU(atomic64_t, active_asids);
3868@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3869 {
3870 static u32 cur_idx = 1;
3871 u64 asid = atomic64_read(&mm->context.id);
3872- u64 generation = atomic64_read(&asid_generation);
3873+ u64 generation = atomic64_read_unchecked(&asid_generation);
3874
3875 if (asid != 0) {
3876 /*
3877@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3878 */
3879 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3880 if (asid == NUM_USER_ASIDS) {
3881- generation = atomic64_add_return(ASID_FIRST_VERSION,
3882+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3883 &asid_generation);
3884 flush_context(cpu);
3885 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3886@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3887 cpu_set_reserved_ttbr0();
3888
3889 asid = atomic64_read(&mm->context.id);
3890- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3891+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3892 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3893 goto switch_mm_fastpath;
3894
3895 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3896 /* Check that our ASID belongs to the current generation. */
3897 asid = atomic64_read(&mm->context.id);
3898- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3899+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3900 asid = new_context(mm, cpu);
3901 atomic64_set(&mm->context.id, asid);
3902 }
3903diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3904index a982dc3..2d9f5f7 100644
3905--- a/arch/arm/mm/fault.c
3906+++ b/arch/arm/mm/fault.c
3907@@ -25,6 +25,7 @@
3908 #include <asm/system_misc.h>
3909 #include <asm/system_info.h>
3910 #include <asm/tlbflush.h>
3911+#include <asm/sections.h>
3912
3913 #include "fault.h"
3914
3915@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3916 if (fixup_exception(regs))
3917 return;
3918
3919+#ifdef CONFIG_PAX_MEMORY_UDEREF
3920+ if (addr < TASK_SIZE) {
3921+ if (current->signal->curr_ip)
3922+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3923+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3924+ else
3925+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3926+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3927+ }
3928+#endif
3929+
3930+#ifdef CONFIG_PAX_KERNEXEC
3931+ if ((fsr & FSR_WRITE) &&
3932+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3933+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3934+ {
3935+ if (current->signal->curr_ip)
3936+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3937+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3938+ else
3939+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3940+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3941+ }
3942+#endif
3943+
3944 /*
3945 * No handler, we'll have to terminate things with extreme prejudice.
3946 */
3947@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3948 }
3949 #endif
3950
3951+#ifdef CONFIG_PAX_PAGEEXEC
3952+ if (fsr & FSR_LNX_PF) {
3953+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3954+ do_group_exit(SIGKILL);
3955+ }
3956+#endif
3957+
3958 tsk->thread.address = addr;
3959 tsk->thread.error_code = fsr;
3960 tsk->thread.trap_no = 14;
3961@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3962 }
3963 #endif /* CONFIG_MMU */
3964
3965+#ifdef CONFIG_PAX_PAGEEXEC
3966+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3967+{
3968+ long i;
3969+
3970+ printk(KERN_ERR "PAX: bytes at PC: ");
3971+ for (i = 0; i < 20; i++) {
3972+ unsigned char c;
3973+ if (get_user(c, (__force unsigned char __user *)pc+i))
3974+ printk(KERN_CONT "?? ");
3975+ else
3976+ printk(KERN_CONT "%02x ", c);
3977+ }
3978+ printk("\n");
3979+
3980+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3981+ for (i = -1; i < 20; i++) {
3982+ unsigned long c;
3983+ if (get_user(c, (__force unsigned long __user *)sp+i))
3984+ printk(KERN_CONT "???????? ");
3985+ else
3986+ printk(KERN_CONT "%08lx ", c);
3987+ }
3988+ printk("\n");
3989+}
3990+#endif
3991+
3992 /*
3993 * First Level Translation Fault Handler
3994 *
3995@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3996 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3997 struct siginfo info;
3998
3999+#ifdef CONFIG_PAX_MEMORY_UDEREF
4000+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4001+ if (current->signal->curr_ip)
4002+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4003+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4004+ else
4005+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4006+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4007+ goto die;
4008+ }
4009+#endif
4010+
4011 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4012 return;
4013
4014+die:
4015 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4016 inf->name, fsr, addr);
4017
4018@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4019 ifsr_info[nr].name = name;
4020 }
4021
4022+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4023+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4024+
4025 asmlinkage void __exception
4026 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4027 {
4028 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4029 struct siginfo info;
4030+ unsigned long pc = instruction_pointer(regs);
4031+
4032+ if (user_mode(regs)) {
4033+ unsigned long sigpage = current->mm->context.sigpage;
4034+
4035+ if (sigpage <= pc && pc < sigpage + 7*4) {
4036+ if (pc < sigpage + 3*4)
4037+ sys_sigreturn(regs);
4038+ else
4039+ sys_rt_sigreturn(regs);
4040+ return;
4041+ }
4042+ if (pc == 0xffff0f60UL) {
4043+ /*
4044+ * PaX: __kuser_cmpxchg64 emulation
4045+ */
4046+ // TODO
4047+ //regs->ARM_pc = regs->ARM_lr;
4048+ //return;
4049+ }
4050+ if (pc == 0xffff0fa0UL) {
4051+ /*
4052+ * PaX: __kuser_memory_barrier emulation
4053+ */
4054+ // dmb(); implied by the exception
4055+ regs->ARM_pc = regs->ARM_lr;
4056+ return;
4057+ }
4058+ if (pc == 0xffff0fc0UL) {
4059+ /*
4060+ * PaX: __kuser_cmpxchg emulation
4061+ */
4062+ // TODO
4063+ //long new;
4064+ //int op;
4065+
4066+ //op = FUTEX_OP_SET << 28;
4067+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4068+ //regs->ARM_r0 = old != new;
4069+ //regs->ARM_pc = regs->ARM_lr;
4070+ //return;
4071+ }
4072+ if (pc == 0xffff0fe0UL) {
4073+ /*
4074+ * PaX: __kuser_get_tls emulation
4075+ */
4076+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4077+ regs->ARM_pc = regs->ARM_lr;
4078+ return;
4079+ }
4080+ }
4081+
4082+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4083+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4084+ if (current->signal->curr_ip)
4085+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4086+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4087+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4088+ else
4089+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4090+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4091+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4092+ goto die;
4093+ }
4094+#endif
4095+
4096+#ifdef CONFIG_PAX_REFCOUNT
4097+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4098+#ifdef CONFIG_THUMB2_KERNEL
4099+ unsigned short bkpt;
4100+
4101+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4102+#else
4103+ unsigned int bkpt;
4104+
4105+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4106+#endif
4107+ current->thread.error_code = ifsr;
4108+ current->thread.trap_no = 0;
4109+ pax_report_refcount_overflow(regs);
4110+ fixup_exception(regs);
4111+ return;
4112+ }
4113+ }
4114+#endif
4115
4116 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4117 return;
4118
4119+die:
4120 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4121 inf->name, ifsr, addr);
4122
4123diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4124index cf08bdf..772656c 100644
4125--- a/arch/arm/mm/fault.h
4126+++ b/arch/arm/mm/fault.h
4127@@ -3,6 +3,7 @@
4128
4129 /*
4130 * Fault status register encodings. We steal bit 31 for our own purposes.
4131+ * Set when the FSR value is from an instruction fault.
4132 */
4133 #define FSR_LNX_PF (1 << 31)
4134 #define FSR_WRITE (1 << 11)
4135@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4136 }
4137 #endif
4138
4139+/* valid for LPAE and !LPAE */
4140+static inline int is_xn_fault(unsigned int fsr)
4141+{
4142+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4143+}
4144+
4145+static inline int is_domain_fault(unsigned int fsr)
4146+{
4147+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4148+}
4149+
4150 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4151 unsigned long search_exception_table(unsigned long addr);
4152
4153diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4154index 2495c8c..415b7fc 100644
4155--- a/arch/arm/mm/init.c
4156+++ b/arch/arm/mm/init.c
4157@@ -758,7 +758,46 @@ void free_tcmmem(void)
4158 {
4159 #ifdef CONFIG_HAVE_TCM
4160 extern char __tcm_start, __tcm_end;
4161+#endif
4162
4163+#ifdef CONFIG_PAX_KERNEXEC
4164+ unsigned long addr;
4165+ pgd_t *pgd;
4166+ pud_t *pud;
4167+ pmd_t *pmd;
4168+ int cpu_arch = cpu_architecture();
4169+ unsigned int cr = get_cr();
4170+
4171+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4172+ /* make pages tables, etc before .text NX */
4173+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4174+ pgd = pgd_offset_k(addr);
4175+ pud = pud_offset(pgd, addr);
4176+ pmd = pmd_offset(pud, addr);
4177+ __section_update(pmd, addr, PMD_SECT_XN);
4178+ }
4179+ /* make init NX */
4180+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4181+ pgd = pgd_offset_k(addr);
4182+ pud = pud_offset(pgd, addr);
4183+ pmd = pmd_offset(pud, addr);
4184+ __section_update(pmd, addr, PMD_SECT_XN);
4185+ }
4186+ /* make kernel code/rodata RX */
4187+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4188+ pgd = pgd_offset_k(addr);
4189+ pud = pud_offset(pgd, addr);
4190+ pmd = pmd_offset(pud, addr);
4191+#ifdef CONFIG_ARM_LPAE
4192+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4193+#else
4194+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4195+#endif
4196+ }
4197+ }
4198+#endif
4199+
4200+#ifdef CONFIG_HAVE_TCM
4201 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4202 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4203 #endif
4204diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4205index d1e5ad7..84dcbf2 100644
4206--- a/arch/arm/mm/ioremap.c
4207+++ b/arch/arm/mm/ioremap.c
4208@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4209 unsigned int mtype;
4210
4211 if (cached)
4212- mtype = MT_MEMORY_RWX;
4213+ mtype = MT_MEMORY_RX;
4214 else
4215- mtype = MT_MEMORY_RWX_NONCACHED;
4216+ mtype = MT_MEMORY_RX_NONCACHED;
4217
4218 return __arm_ioremap_caller(phys_addr, size, mtype,
4219 __builtin_return_address(0));
4220diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4221index 5e85ed3..b10a7ed 100644
4222--- a/arch/arm/mm/mmap.c
4223+++ b/arch/arm/mm/mmap.c
4224@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4225 struct vm_area_struct *vma;
4226 int do_align = 0;
4227 int aliasing = cache_is_vipt_aliasing();
4228+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4229 struct vm_unmapped_area_info info;
4230
4231 /*
4232@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4233 if (len > TASK_SIZE)
4234 return -ENOMEM;
4235
4236+#ifdef CONFIG_PAX_RANDMMAP
4237+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4238+#endif
4239+
4240 if (addr) {
4241 if (do_align)
4242 addr = COLOUR_ALIGN(addr, pgoff);
4243@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 addr = PAGE_ALIGN(addr);
4245
4246 vma = find_vma(mm, addr);
4247- if (TASK_SIZE - len >= addr &&
4248- (!vma || addr + len <= vma->vm_start))
4249+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4250 return addr;
4251 }
4252
4253@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4254 info.high_limit = TASK_SIZE;
4255 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4256 info.align_offset = pgoff << PAGE_SHIFT;
4257+ info.threadstack_offset = offset;
4258 return vm_unmapped_area(&info);
4259 }
4260
4261@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4262 unsigned long addr = addr0;
4263 int do_align = 0;
4264 int aliasing = cache_is_vipt_aliasing();
4265+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4266 struct vm_unmapped_area_info info;
4267
4268 /*
4269@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4270 return addr;
4271 }
4272
4273+#ifdef CONFIG_PAX_RANDMMAP
4274+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4275+#endif
4276+
4277 /* requesting a specific address */
4278 if (addr) {
4279 if (do_align)
4280@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4281 else
4282 addr = PAGE_ALIGN(addr);
4283 vma = find_vma(mm, addr);
4284- if (TASK_SIZE - len >= addr &&
4285- (!vma || addr + len <= vma->vm_start))
4286+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4287 return addr;
4288 }
4289
4290@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4291 info.high_limit = mm->mmap_base;
4292 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4293 info.align_offset = pgoff << PAGE_SHIFT;
4294+ info.threadstack_offset = offset;
4295 addr = vm_unmapped_area(&info);
4296
4297 /*
4298@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4299 {
4300 unsigned long random_factor = 0UL;
4301
4302+#ifdef CONFIG_PAX_RANDMMAP
4303+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4304+#endif
4305+
4306 /* 8 bits of randomness in 20 address space bits */
4307 if ((current->flags & PF_RANDOMIZE) &&
4308 !(current->personality & ADDR_NO_RANDOMIZE))
4309@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4310
4311 if (mmap_is_legacy()) {
4312 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4313+
4314+#ifdef CONFIG_PAX_RANDMMAP
4315+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4316+ mm->mmap_base += mm->delta_mmap;
4317+#endif
4318+
4319 mm->get_unmapped_area = arch_get_unmapped_area;
4320 } else {
4321 mm->mmap_base = mmap_base(random_factor);
4322+
4323+#ifdef CONFIG_PAX_RANDMMAP
4324+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4325+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4326+#endif
4327+
4328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4329 }
4330 }
4331diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4332index 4e6ef89..21c27f2 100644
4333--- a/arch/arm/mm/mmu.c
4334+++ b/arch/arm/mm/mmu.c
4335@@ -41,6 +41,22 @@
4336 #include "mm.h"
4337 #include "tcm.h"
4338
4339+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4340+void modify_domain(unsigned int dom, unsigned int type)
4341+{
4342+ struct thread_info *thread = current_thread_info();
4343+ unsigned int domain = thread->cpu_domain;
4344+ /*
4345+ * DOMAIN_MANAGER might be defined to some other value,
4346+ * use the arch-defined constant
4347+ */
4348+ domain &= ~domain_val(dom, 3);
4349+ thread->cpu_domain = domain | domain_val(dom, type);
4350+ set_domain(thread->cpu_domain);
4351+}
4352+EXPORT_SYMBOL(modify_domain);
4353+#endif
4354+
4355 /*
4356 * empty_zero_page is a special page that is used for
4357 * zero-initialized data and COW.
4358@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4359 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4360 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4361
4362-static struct mem_type mem_types[] = {
4363+#ifdef CONFIG_PAX_KERNEXEC
4364+#define L_PTE_KERNEXEC L_PTE_RDONLY
4365+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4366+#else
4367+#define L_PTE_KERNEXEC L_PTE_DIRTY
4368+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4369+#endif
4370+
4371+static struct mem_type mem_types[] __read_only = {
4372 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4373 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4374 L_PTE_SHARED,
4375@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4376 .prot_sect = PROT_SECT_DEVICE,
4377 .domain = DOMAIN_IO,
4378 },
4379- [MT_UNCACHED] = {
4380+ [MT_UNCACHED_RW] = {
4381 .prot_pte = PROT_PTE_DEVICE,
4382 .prot_l1 = PMD_TYPE_TABLE,
4383 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4384 .domain = DOMAIN_IO,
4385 },
4386- [MT_CACHECLEAN] = {
4387- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4388+ [MT_CACHECLEAN_RO] = {
4389+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4390 .domain = DOMAIN_KERNEL,
4391 },
4392 #ifndef CONFIG_ARM_LPAE
4393- [MT_MINICLEAN] = {
4394- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4395+ [MT_MINICLEAN_RO] = {
4396+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4397 .domain = DOMAIN_KERNEL,
4398 },
4399 #endif
4400@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4401 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4402 L_PTE_RDONLY,
4403 .prot_l1 = PMD_TYPE_TABLE,
4404- .domain = DOMAIN_USER,
4405+ .domain = DOMAIN_VECTORS,
4406 },
4407 [MT_HIGH_VECTORS] = {
4408 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4409 L_PTE_USER | L_PTE_RDONLY,
4410 .prot_l1 = PMD_TYPE_TABLE,
4411- .domain = DOMAIN_USER,
4412+ .domain = DOMAIN_VECTORS,
4413 },
4414- [MT_MEMORY_RWX] = {
4415+ [__MT_MEMORY_RWX] = {
4416 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4417 .prot_l1 = PMD_TYPE_TABLE,
4418 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4419@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4420 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4421 .domain = DOMAIN_KERNEL,
4422 },
4423- [MT_ROM] = {
4424- .prot_sect = PMD_TYPE_SECT,
4425+ [MT_MEMORY_RX] = {
4426+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4427+ .prot_l1 = PMD_TYPE_TABLE,
4428+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4429+ .domain = DOMAIN_KERNEL,
4430+ },
4431+ [MT_ROM_RX] = {
4432+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4433 .domain = DOMAIN_KERNEL,
4434 },
4435- [MT_MEMORY_RWX_NONCACHED] = {
4436+ [MT_MEMORY_RW_NONCACHED] = {
4437 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4438 L_PTE_MT_BUFFERABLE,
4439 .prot_l1 = PMD_TYPE_TABLE,
4440 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4441 .domain = DOMAIN_KERNEL,
4442 },
4443+ [MT_MEMORY_RX_NONCACHED] = {
4444+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4445+ L_PTE_MT_BUFFERABLE,
4446+ .prot_l1 = PMD_TYPE_TABLE,
4447+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4448+ .domain = DOMAIN_KERNEL,
4449+ },
4450 [MT_MEMORY_RW_DTCM] = {
4451 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4452 L_PTE_XN,
4453@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4454 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4455 .domain = DOMAIN_KERNEL,
4456 },
4457- [MT_MEMORY_RWX_ITCM] = {
4458- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4459+ [MT_MEMORY_RX_ITCM] = {
4460+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4461 .prot_l1 = PMD_TYPE_TABLE,
4462+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4463 .domain = DOMAIN_KERNEL,
4464 },
4465 [MT_MEMORY_RW_SO] = {
4466@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4467 * Mark cache clean areas and XIP ROM read only
4468 * from SVC mode and no access from userspace.
4469 */
4470- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4472- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4473+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4474+#ifdef CONFIG_PAX_KERNEXEC
4475+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4477+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4478+#endif
4479+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4480+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481 #endif
4482
4483 /*
4484@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4485 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4486 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4487 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4488- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4489- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4490+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4491+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4492 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4493 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4494+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4495+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4496 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4497- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4498- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4499+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4500+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4501+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4502+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4503 }
4504 }
4505
4506@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4507 if (cpu_arch >= CPU_ARCH_ARMv6) {
4508 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4509 /* Non-cacheable Normal is XCB = 001 */
4510- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4511+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4512+ PMD_SECT_BUFFERED;
4513+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4514 PMD_SECT_BUFFERED;
4515 } else {
4516 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4517- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4518+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4519+ PMD_SECT_TEX(1);
4520+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4521 PMD_SECT_TEX(1);
4522 }
4523 } else {
4524- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4525+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4526+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4527 }
4528
4529 #ifdef CONFIG_ARM_LPAE
4530@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4531 user_pgprot |= PTE_EXT_PXN;
4532 #endif
4533
4534+ user_pgprot |= __supported_pte_mask;
4535+
4536 for (i = 0; i < 16; i++) {
4537 pteval_t v = pgprot_val(protection_map[i]);
4538 protection_map[i] = __pgprot(v | user_pgprot);
4539@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4540
4541 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4542 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4543- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4544- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4545+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4546+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4547 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4548 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4549+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4550+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4551 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4552- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4553- mem_types[MT_ROM].prot_sect |= cp->pmd;
4554+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4555+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4556+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4557
4558 switch (cp->pmd) {
4559 case PMD_SECT_WT:
4560- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4561+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4562 break;
4563 case PMD_SECT_WB:
4564 case PMD_SECT_WBWA:
4565- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4566+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4567 break;
4568 }
4569 pr_info("Memory policy: %sData cache %s\n",
4570@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4571 return;
4572 }
4573
4574- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4575+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4576 md->virtual >= PAGE_OFFSET &&
4577 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4578 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4579@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4580 * called function. This means you can't use any function or debugging
4581 * method which may touch any device, otherwise the kernel _will_ crash.
4582 */
4583+
4584+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4585+
4586 static void __init devicemaps_init(const struct machine_desc *mdesc)
4587 {
4588 struct map_desc map;
4589 unsigned long addr;
4590- void *vectors;
4591
4592- /*
4593- * Allocate the vector page early.
4594- */
4595- vectors = early_alloc(PAGE_SIZE * 2);
4596-
4597- early_trap_init(vectors);
4598+ early_trap_init(&vectors);
4599
4600 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4601 pmd_clear(pmd_off_k(addr));
4602@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4603 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4604 map.virtual = MODULES_VADDR;
4605 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4606- map.type = MT_ROM;
4607+ map.type = MT_ROM_RX;
4608 create_mapping(&map);
4609 #endif
4610
4611@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4612 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4613 map.virtual = FLUSH_BASE;
4614 map.length = SZ_1M;
4615- map.type = MT_CACHECLEAN;
4616+ map.type = MT_CACHECLEAN_RO;
4617 create_mapping(&map);
4618 #endif
4619 #ifdef FLUSH_BASE_MINICACHE
4620 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4621 map.virtual = FLUSH_BASE_MINICACHE;
4622 map.length = SZ_1M;
4623- map.type = MT_MINICLEAN;
4624+ map.type = MT_MINICLEAN_RO;
4625 create_mapping(&map);
4626 #endif
4627
4628@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4629 * location (0xffff0000). If we aren't using high-vectors, also
4630 * create a mapping at the low-vectors virtual address.
4631 */
4632- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4633+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4634 map.virtual = 0xffff0000;
4635 map.length = PAGE_SIZE;
4636 #ifdef CONFIG_KUSER_HELPERS
4637@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4638 static void __init map_lowmem(void)
4639 {
4640 struct memblock_region *reg;
4641+#ifndef CONFIG_PAX_KERNEXEC
4642 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4643 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4644+#endif
4645
4646 /* Map all the lowmem memory banks. */
4647 for_each_memblock(memory, reg) {
4648@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4649 if (start >= end)
4650 break;
4651
4652+#ifdef CONFIG_PAX_KERNEXEC
4653+ map.pfn = __phys_to_pfn(start);
4654+ map.virtual = __phys_to_virt(start);
4655+ map.length = end - start;
4656+
4657+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4658+ struct map_desc kernel;
4659+ struct map_desc initmap;
4660+
4661+ /* when freeing initmem we will make this RW */
4662+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4663+ initmap.virtual = (unsigned long)__init_begin;
4664+ initmap.length = _sdata - __init_begin;
4665+ initmap.type = __MT_MEMORY_RWX;
4666+ create_mapping(&initmap);
4667+
4668+ /* when freeing initmem we will make this RX */
4669+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4670+ kernel.virtual = (unsigned long)_stext;
4671+ kernel.length = __init_begin - _stext;
4672+ kernel.type = __MT_MEMORY_RWX;
4673+ create_mapping(&kernel);
4674+
4675+ if (map.virtual < (unsigned long)_stext) {
4676+ map.length = (unsigned long)_stext - map.virtual;
4677+ map.type = __MT_MEMORY_RWX;
4678+ create_mapping(&map);
4679+ }
4680+
4681+ map.pfn = __phys_to_pfn(__pa(_sdata));
4682+ map.virtual = (unsigned long)_sdata;
4683+ map.length = end - __pa(_sdata);
4684+ }
4685+
4686+ map.type = MT_MEMORY_RW;
4687+ create_mapping(&map);
4688+#else
4689 if (end < kernel_x_start) {
4690 map.pfn = __phys_to_pfn(start);
4691 map.virtual = __phys_to_virt(start);
4692 map.length = end - start;
4693- map.type = MT_MEMORY_RWX;
4694+ map.type = __MT_MEMORY_RWX;
4695
4696 create_mapping(&map);
4697 } else if (start >= kernel_x_end) {
4698@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4699 map.pfn = __phys_to_pfn(kernel_x_start);
4700 map.virtual = __phys_to_virt(kernel_x_start);
4701 map.length = kernel_x_end - kernel_x_start;
4702- map.type = MT_MEMORY_RWX;
4703+ map.type = __MT_MEMORY_RWX;
4704
4705 create_mapping(&map);
4706
4707@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4708 create_mapping(&map);
4709 }
4710 }
4711+#endif
4712 }
4713 }
4714
4715diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4716index e1268f9..a9755a7 100644
4717--- a/arch/arm/net/bpf_jit_32.c
4718+++ b/arch/arm/net/bpf_jit_32.c
4719@@ -20,6 +20,7 @@
4720 #include <asm/cacheflush.h>
4721 #include <asm/hwcap.h>
4722 #include <asm/opcodes.h>
4723+#include <asm/pgtable.h>
4724
4725 #include "bpf_jit_32.h"
4726
4727@@ -71,7 +72,11 @@ struct jit_ctx {
4728 #endif
4729 };
4730
4731+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4732+int bpf_jit_enable __read_only;
4733+#else
4734 int bpf_jit_enable __read_mostly;
4735+#endif
4736
4737 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4738 {
4739@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4740 {
4741 u32 *ptr;
4742 /* We are guaranteed to have aligned memory. */
4743+ pax_open_kernel();
4744 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4745 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4746+ pax_close_kernel();
4747 }
4748
4749 static void build_prologue(struct jit_ctx *ctx)
4750diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4751index 5b217f4..c23f40e 100644
4752--- a/arch/arm/plat-iop/setup.c
4753+++ b/arch/arm/plat-iop/setup.c
4754@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4755 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4756 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4757 .length = IOP3XX_PERIPHERAL_SIZE,
4758- .type = MT_UNCACHED,
4759+ .type = MT_UNCACHED_RW,
4760 },
4761 };
4762
4763diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4764index a5bc92d..0bb4730 100644
4765--- a/arch/arm/plat-omap/sram.c
4766+++ b/arch/arm/plat-omap/sram.c
4767@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4768 * Looks like we need to preserve some bootloader code at the
4769 * beginning of SRAM for jumping to flash for reboot to work...
4770 */
4771+ pax_open_kernel();
4772 memset_io(omap_sram_base + omap_sram_skip, 0,
4773 omap_sram_size - omap_sram_skip);
4774+ pax_close_kernel();
4775 }
4776diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4777index ce6d763..cfea917 100644
4778--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4779+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4780@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4781 int (*started)(unsigned ch);
4782 int (*flush)(unsigned ch);
4783 int (*stop)(unsigned ch);
4784-};
4785+} __no_const;
4786
4787 extern void *samsung_dmadev_get_ops(void);
4788 extern void *s3c_dma_get_ops(void);
4789diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4790index a5abb00..9cbca9a 100644
4791--- a/arch/arm64/include/asm/barrier.h
4792+++ b/arch/arm64/include/asm/barrier.h
4793@@ -44,7 +44,7 @@
4794 do { \
4795 compiletime_assert_atomic_type(*p); \
4796 barrier(); \
4797- ACCESS_ONCE(*p) = (v); \
4798+ ACCESS_ONCE_RW(*p) = (v); \
4799 } while (0)
4800
4801 #define smp_load_acquire(p) \
4802diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4803index 09da25b..3ea0d64 100644
4804--- a/arch/arm64/include/asm/percpu.h
4805+++ b/arch/arm64/include/asm/percpu.h
4806@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4807 {
4808 switch (size) {
4809 case 1:
4810- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4811+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4812 break;
4813 case 2:
4814- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4815+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4816 break;
4817 case 4:
4818- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4819+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4820 break;
4821 case 8:
4822- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4823+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4824 break;
4825 default:
4826 BUILD_BUG();
4827diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4828index 3bf8f4e..5dd5491 100644
4829--- a/arch/arm64/include/asm/uaccess.h
4830+++ b/arch/arm64/include/asm/uaccess.h
4831@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4832 flag; \
4833 })
4834
4835+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4836 #define access_ok(type, addr, size) __range_ok(addr, size)
4837 #define user_addr_max get_fs
4838
4839diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4840index c3a58a1..78fbf54 100644
4841--- a/arch/avr32/include/asm/cache.h
4842+++ b/arch/avr32/include/asm/cache.h
4843@@ -1,8 +1,10 @@
4844 #ifndef __ASM_AVR32_CACHE_H
4845 #define __ASM_AVR32_CACHE_H
4846
4847+#include <linux/const.h>
4848+
4849 #define L1_CACHE_SHIFT 5
4850-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4851+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4852
4853 /*
4854 * Memory returned by kmalloc() may be used for DMA, so we must make
4855diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4856index d232888..87c8df1 100644
4857--- a/arch/avr32/include/asm/elf.h
4858+++ b/arch/avr32/include/asm/elf.h
4859@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4860 the loader. We need to make sure that it is out of the way of the program
4861 that it will "exec", and that there is sufficient room for the brk. */
4862
4863-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4864+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4865
4866+#ifdef CONFIG_PAX_ASLR
4867+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4868+
4869+#define PAX_DELTA_MMAP_LEN 15
4870+#define PAX_DELTA_STACK_LEN 15
4871+#endif
4872
4873 /* This yields a mask that user programs can use to figure out what
4874 instruction set this CPU supports. This could be done in user space,
4875diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4876index 479330b..53717a8 100644
4877--- a/arch/avr32/include/asm/kmap_types.h
4878+++ b/arch/avr32/include/asm/kmap_types.h
4879@@ -2,9 +2,9 @@
4880 #define __ASM_AVR32_KMAP_TYPES_H
4881
4882 #ifdef CONFIG_DEBUG_HIGHMEM
4883-# define KM_TYPE_NR 29
4884+# define KM_TYPE_NR 30
4885 #else
4886-# define KM_TYPE_NR 14
4887+# define KM_TYPE_NR 15
4888 #endif
4889
4890 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4891diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4892index d223a8b..69c5210 100644
4893--- a/arch/avr32/mm/fault.c
4894+++ b/arch/avr32/mm/fault.c
4895@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4896
4897 int exception_trace = 1;
4898
4899+#ifdef CONFIG_PAX_PAGEEXEC
4900+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4901+{
4902+ unsigned long i;
4903+
4904+ printk(KERN_ERR "PAX: bytes at PC: ");
4905+ for (i = 0; i < 20; i++) {
4906+ unsigned char c;
4907+ if (get_user(c, (unsigned char *)pc+i))
4908+ printk(KERN_CONT "???????? ");
4909+ else
4910+ printk(KERN_CONT "%02x ", c);
4911+ }
4912+ printk("\n");
4913+}
4914+#endif
4915+
4916 /*
4917 * This routine handles page faults. It determines the address and the
4918 * problem, and then passes it off to one of the appropriate routines.
4919@@ -178,6 +195,16 @@ bad_area:
4920 up_read(&mm->mmap_sem);
4921
4922 if (user_mode(regs)) {
4923+
4924+#ifdef CONFIG_PAX_PAGEEXEC
4925+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4926+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4927+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4928+ do_group_exit(SIGKILL);
4929+ }
4930+ }
4931+#endif
4932+
4933 if (exception_trace && printk_ratelimit())
4934 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4935 "sp %08lx ecr %lu\n",
4936diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4937index 568885a..f8008df 100644
4938--- a/arch/blackfin/include/asm/cache.h
4939+++ b/arch/blackfin/include/asm/cache.h
4940@@ -7,6 +7,7 @@
4941 #ifndef __ARCH_BLACKFIN_CACHE_H
4942 #define __ARCH_BLACKFIN_CACHE_H
4943
4944+#include <linux/const.h>
4945 #include <linux/linkage.h> /* for asmlinkage */
4946
4947 /*
4948@@ -14,7 +15,7 @@
4949 * Blackfin loads 32 bytes for cache
4950 */
4951 #define L1_CACHE_SHIFT 5
4952-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4953+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4954 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4955
4956 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4957diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4958index aea2718..3639a60 100644
4959--- a/arch/cris/include/arch-v10/arch/cache.h
4960+++ b/arch/cris/include/arch-v10/arch/cache.h
4961@@ -1,8 +1,9 @@
4962 #ifndef _ASM_ARCH_CACHE_H
4963 #define _ASM_ARCH_CACHE_H
4964
4965+#include <linux/const.h>
4966 /* Etrax 100LX have 32-byte cache-lines. */
4967-#define L1_CACHE_BYTES 32
4968 #define L1_CACHE_SHIFT 5
4969+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4970
4971 #endif /* _ASM_ARCH_CACHE_H */
4972diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4973index 7caf25d..ee65ac5 100644
4974--- a/arch/cris/include/arch-v32/arch/cache.h
4975+++ b/arch/cris/include/arch-v32/arch/cache.h
4976@@ -1,11 +1,12 @@
4977 #ifndef _ASM_CRIS_ARCH_CACHE_H
4978 #define _ASM_CRIS_ARCH_CACHE_H
4979
4980+#include <linux/const.h>
4981 #include <arch/hwregs/dma.h>
4982
4983 /* A cache-line is 32 bytes. */
4984-#define L1_CACHE_BYTES 32
4985 #define L1_CACHE_SHIFT 5
4986+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4987
4988 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4989
4990diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4991index 102190a..5334cea 100644
4992--- a/arch/frv/include/asm/atomic.h
4993+++ b/arch/frv/include/asm/atomic.h
4994@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
4995 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4996 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4997
4998+#define atomic64_read_unchecked(v) atomic64_read(v)
4999+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5000+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5001+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5002+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5003+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5004+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5005+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5006+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5007+
5008 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5009 {
5010 int c, old;
5011diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5012index 2797163..c2a401df9 100644
5013--- a/arch/frv/include/asm/cache.h
5014+++ b/arch/frv/include/asm/cache.h
5015@@ -12,10 +12,11 @@
5016 #ifndef __ASM_CACHE_H
5017 #define __ASM_CACHE_H
5018
5019+#include <linux/const.h>
5020
5021 /* bytes per L1 cache line */
5022 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5023-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5024+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5025
5026 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5027 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5028diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5029index 43901f2..0d8b865 100644
5030--- a/arch/frv/include/asm/kmap_types.h
5031+++ b/arch/frv/include/asm/kmap_types.h
5032@@ -2,6 +2,6 @@
5033 #ifndef _ASM_KMAP_TYPES_H
5034 #define _ASM_KMAP_TYPES_H
5035
5036-#define KM_TYPE_NR 17
5037+#define KM_TYPE_NR 18
5038
5039 #endif
5040diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5041index 836f147..4cf23f5 100644
5042--- a/arch/frv/mm/elf-fdpic.c
5043+++ b/arch/frv/mm/elf-fdpic.c
5044@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 {
5046 struct vm_area_struct *vma;
5047 struct vm_unmapped_area_info info;
5048+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5049
5050 if (len > TASK_SIZE)
5051 return -ENOMEM;
5052@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5053 if (addr) {
5054 addr = PAGE_ALIGN(addr);
5055 vma = find_vma(current->mm, addr);
5056- if (TASK_SIZE - len >= addr &&
5057- (!vma || addr + len <= vma->vm_start))
5058+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5059 goto success;
5060 }
5061
5062@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5063 info.high_limit = (current->mm->start_stack - 0x00200000);
5064 info.align_mask = 0;
5065 info.align_offset = 0;
5066+ info.threadstack_offset = offset;
5067 addr = vm_unmapped_area(&info);
5068 if (!(addr & ~PAGE_MASK))
5069 goto success;
5070diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5071index 69952c1..4fa2908 100644
5072--- a/arch/hexagon/include/asm/cache.h
5073+++ b/arch/hexagon/include/asm/cache.h
5074@@ -21,9 +21,11 @@
5075 #ifndef __ASM_CACHE_H
5076 #define __ASM_CACHE_H
5077
5078+#include <linux/const.h>
5079+
5080 /* Bytes per L1 cache line */
5081-#define L1_CACHE_SHIFT (5)
5082-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5083+#define L1_CACHE_SHIFT 5
5084+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5085
5086 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5087
5088diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5089index 074e52b..76afdac 100644
5090--- a/arch/ia64/Kconfig
5091+++ b/arch/ia64/Kconfig
5092@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5093 config KEXEC
5094 bool "kexec system call"
5095 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5096+ depends on !GRKERNSEC_KMEM
5097 help
5098 kexec is a system call that implements the ability to shutdown your
5099 current kernel, and to start another kernel. It is like a reboot
5100diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5101index 970d0bd..e750b9b 100644
5102--- a/arch/ia64/Makefile
5103+++ b/arch/ia64/Makefile
5104@@ -98,5 +98,6 @@ endef
5105 archprepare: make_nr_irqs_h FORCE
5106 PHONY += make_nr_irqs_h FORCE
5107
5108+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5109 make_nr_irqs_h: FORCE
5110 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5111diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5112index 0bf0350..2ad1957 100644
5113--- a/arch/ia64/include/asm/atomic.h
5114+++ b/arch/ia64/include/asm/atomic.h
5115@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5116 #define atomic64_inc(v) atomic64_add(1, (v))
5117 #define atomic64_dec(v) atomic64_sub(1, (v))
5118
5119+#define atomic64_read_unchecked(v) atomic64_read(v)
5120+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5121+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5122+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5123+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5124+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5125+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5126+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5127+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5128+
5129 #endif /* _ASM_IA64_ATOMIC_H */
5130diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5131index f6769eb..1cdb590 100644
5132--- a/arch/ia64/include/asm/barrier.h
5133+++ b/arch/ia64/include/asm/barrier.h
5134@@ -66,7 +66,7 @@
5135 do { \
5136 compiletime_assert_atomic_type(*p); \
5137 barrier(); \
5138- ACCESS_ONCE(*p) = (v); \
5139+ ACCESS_ONCE_RW(*p) = (v); \
5140 } while (0)
5141
5142 #define smp_load_acquire(p) \
5143diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5144index 988254a..e1ee885 100644
5145--- a/arch/ia64/include/asm/cache.h
5146+++ b/arch/ia64/include/asm/cache.h
5147@@ -1,6 +1,7 @@
5148 #ifndef _ASM_IA64_CACHE_H
5149 #define _ASM_IA64_CACHE_H
5150
5151+#include <linux/const.h>
5152
5153 /*
5154 * Copyright (C) 1998-2000 Hewlett-Packard Co
5155@@ -9,7 +10,7 @@
5156
5157 /* Bytes per L1 (data) cache line. */
5158 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5159-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5160+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5161
5162 #ifdef CONFIG_SMP
5163 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5164diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5165index 5a83c5c..4d7f553 100644
5166--- a/arch/ia64/include/asm/elf.h
5167+++ b/arch/ia64/include/asm/elf.h
5168@@ -42,6 +42,13 @@
5169 */
5170 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5171
5172+#ifdef CONFIG_PAX_ASLR
5173+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5174+
5175+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5176+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5177+#endif
5178+
5179 #define PT_IA_64_UNWIND 0x70000001
5180
5181 /* IA-64 relocations: */
5182diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5183index 5767cdf..7462574 100644
5184--- a/arch/ia64/include/asm/pgalloc.h
5185+++ b/arch/ia64/include/asm/pgalloc.h
5186@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5187 pgd_val(*pgd_entry) = __pa(pud);
5188 }
5189
5190+static inline void
5191+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5192+{
5193+ pgd_populate(mm, pgd_entry, pud);
5194+}
5195+
5196 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5197 {
5198 return quicklist_alloc(0, GFP_KERNEL, NULL);
5199@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5200 pud_val(*pud_entry) = __pa(pmd);
5201 }
5202
5203+static inline void
5204+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5205+{
5206+ pud_populate(mm, pud_entry, pmd);
5207+}
5208+
5209 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5210 {
5211 return quicklist_alloc(0, GFP_KERNEL, NULL);
5212diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5213index 7935115..c0eca6a 100644
5214--- a/arch/ia64/include/asm/pgtable.h
5215+++ b/arch/ia64/include/asm/pgtable.h
5216@@ -12,7 +12,7 @@
5217 * David Mosberger-Tang <davidm@hpl.hp.com>
5218 */
5219
5220-
5221+#include <linux/const.h>
5222 #include <asm/mman.h>
5223 #include <asm/page.h>
5224 #include <asm/processor.h>
5225@@ -142,6 +142,17 @@
5226 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5227 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5228 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5229+
5230+#ifdef CONFIG_PAX_PAGEEXEC
5231+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5232+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5233+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5234+#else
5235+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5236+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5237+# define PAGE_COPY_NOEXEC PAGE_COPY
5238+#endif
5239+
5240 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5241 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5242 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5243diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5244index 45698cd..e8e2dbc 100644
5245--- a/arch/ia64/include/asm/spinlock.h
5246+++ b/arch/ia64/include/asm/spinlock.h
5247@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5248 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5249
5250 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5251- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5252+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5253 }
5254
5255 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5256diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5257index 103bedc..0210597 100644
5258--- a/arch/ia64/include/asm/uaccess.h
5259+++ b/arch/ia64/include/asm/uaccess.h
5260@@ -70,6 +70,7 @@
5261 && ((segment).seg == KERNEL_DS.seg \
5262 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5263 })
5264+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5265 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5266
5267 /*
5268@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5269 static inline unsigned long
5270 __copy_to_user (void __user *to, const void *from, unsigned long count)
5271 {
5272+ if (count > INT_MAX)
5273+ return count;
5274+
5275+ if (!__builtin_constant_p(count))
5276+ check_object_size(from, count, true);
5277+
5278 return __copy_user(to, (__force void __user *) from, count);
5279 }
5280
5281 static inline unsigned long
5282 __copy_from_user (void *to, const void __user *from, unsigned long count)
5283 {
5284+ if (count > INT_MAX)
5285+ return count;
5286+
5287+ if (!__builtin_constant_p(count))
5288+ check_object_size(to, count, false);
5289+
5290 return __copy_user((__force void __user *) to, from, count);
5291 }
5292
5293@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5294 ({ \
5295 void __user *__cu_to = (to); \
5296 const void *__cu_from = (from); \
5297- long __cu_len = (n); \
5298+ unsigned long __cu_len = (n); \
5299 \
5300- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5301+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5302+ if (!__builtin_constant_p(n)) \
5303+ check_object_size(__cu_from, __cu_len, true); \
5304 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5305+ } \
5306 __cu_len; \
5307 })
5308
5309@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5310 ({ \
5311 void *__cu_to = (to); \
5312 const void __user *__cu_from = (from); \
5313- long __cu_len = (n); \
5314+ unsigned long __cu_len = (n); \
5315 \
5316 __chk_user_ptr(__cu_from); \
5317- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5318+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5319+ if (!__builtin_constant_p(n)) \
5320+ check_object_size(__cu_to, __cu_len, false); \
5321 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5322+ } \
5323 __cu_len; \
5324 })
5325
5326diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5327index 29754aa..06d2838 100644
5328--- a/arch/ia64/kernel/module.c
5329+++ b/arch/ia64/kernel/module.c
5330@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5331 }
5332
5333 static inline int
5334+in_init_rx (const struct module *mod, uint64_t addr)
5335+{
5336+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5337+}
5338+
5339+static inline int
5340+in_init_rw (const struct module *mod, uint64_t addr)
5341+{
5342+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5343+}
5344+
5345+static inline int
5346 in_init (const struct module *mod, uint64_t addr)
5347 {
5348- return addr - (uint64_t) mod->module_init < mod->init_size;
5349+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5350+}
5351+
5352+static inline int
5353+in_core_rx (const struct module *mod, uint64_t addr)
5354+{
5355+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5356+}
5357+
5358+static inline int
5359+in_core_rw (const struct module *mod, uint64_t addr)
5360+{
5361+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5362 }
5363
5364 static inline int
5365 in_core (const struct module *mod, uint64_t addr)
5366 {
5367- return addr - (uint64_t) mod->module_core < mod->core_size;
5368+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5369 }
5370
5371 static inline int
5372@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5373 break;
5374
5375 case RV_BDREL:
5376- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5377+ if (in_init_rx(mod, val))
5378+ val -= (uint64_t) mod->module_init_rx;
5379+ else if (in_init_rw(mod, val))
5380+ val -= (uint64_t) mod->module_init_rw;
5381+ else if (in_core_rx(mod, val))
5382+ val -= (uint64_t) mod->module_core_rx;
5383+ else if (in_core_rw(mod, val))
5384+ val -= (uint64_t) mod->module_core_rw;
5385 break;
5386
5387 case RV_LTV:
5388@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5389 * addresses have been selected...
5390 */
5391 uint64_t gp;
5392- if (mod->core_size > MAX_LTOFF)
5393+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5394 /*
5395 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5396 * at the end of the module.
5397 */
5398- gp = mod->core_size - MAX_LTOFF / 2;
5399+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5400 else
5401- gp = mod->core_size / 2;
5402- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5403+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5404+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5405 mod->arch.gp = gp;
5406 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5407 }
5408diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5409index c39c3cd..3c77738 100644
5410--- a/arch/ia64/kernel/palinfo.c
5411+++ b/arch/ia64/kernel/palinfo.c
5412@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5413 return NOTIFY_OK;
5414 }
5415
5416-static struct notifier_block __refdata palinfo_cpu_notifier =
5417+static struct notifier_block palinfo_cpu_notifier =
5418 {
5419 .notifier_call = palinfo_cpu_callback,
5420 .priority = 0,
5421diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5422index 41e33f8..65180b2a 100644
5423--- a/arch/ia64/kernel/sys_ia64.c
5424+++ b/arch/ia64/kernel/sys_ia64.c
5425@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5426 unsigned long align_mask = 0;
5427 struct mm_struct *mm = current->mm;
5428 struct vm_unmapped_area_info info;
5429+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5430
5431 if (len > RGN_MAP_LIMIT)
5432 return -ENOMEM;
5433@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5434 if (REGION_NUMBER(addr) == RGN_HPAGE)
5435 addr = 0;
5436 #endif
5437+
5438+#ifdef CONFIG_PAX_RANDMMAP
5439+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5440+ addr = mm->free_area_cache;
5441+ else
5442+#endif
5443+
5444 if (!addr)
5445 addr = TASK_UNMAPPED_BASE;
5446
5447@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5448 info.high_limit = TASK_SIZE;
5449 info.align_mask = align_mask;
5450 info.align_offset = 0;
5451+ info.threadstack_offset = offset;
5452 return vm_unmapped_area(&info);
5453 }
5454
5455diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5456index 84f8a52..7c76178 100644
5457--- a/arch/ia64/kernel/vmlinux.lds.S
5458+++ b/arch/ia64/kernel/vmlinux.lds.S
5459@@ -192,7 +192,7 @@ SECTIONS {
5460 /* Per-cpu data: */
5461 . = ALIGN(PERCPU_PAGE_SIZE);
5462 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5463- __phys_per_cpu_start = __per_cpu_load;
5464+ __phys_per_cpu_start = per_cpu_load;
5465 /*
5466 * ensure percpu data fits
5467 * into percpu page size
5468diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5469index ba5ba7a..36e9d3a 100644
5470--- a/arch/ia64/mm/fault.c
5471+++ b/arch/ia64/mm/fault.c
5472@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5473 return pte_present(pte);
5474 }
5475
5476+#ifdef CONFIG_PAX_PAGEEXEC
5477+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5478+{
5479+ unsigned long i;
5480+
5481+ printk(KERN_ERR "PAX: bytes at PC: ");
5482+ for (i = 0; i < 8; i++) {
5483+ unsigned int c;
5484+ if (get_user(c, (unsigned int *)pc+i))
5485+ printk(KERN_CONT "???????? ");
5486+ else
5487+ printk(KERN_CONT "%08x ", c);
5488+ }
5489+ printk("\n");
5490+}
5491+#endif
5492+
5493 # define VM_READ_BIT 0
5494 # define VM_WRITE_BIT 1
5495 # define VM_EXEC_BIT 2
5496@@ -151,8 +168,21 @@ retry:
5497 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5498 goto bad_area;
5499
5500- if ((vma->vm_flags & mask) != mask)
5501+ if ((vma->vm_flags & mask) != mask) {
5502+
5503+#ifdef CONFIG_PAX_PAGEEXEC
5504+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5505+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5506+ goto bad_area;
5507+
5508+ up_read(&mm->mmap_sem);
5509+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5510+ do_group_exit(SIGKILL);
5511+ }
5512+#endif
5513+
5514 goto bad_area;
5515+ }
5516
5517 /*
5518 * If for any reason at all we couldn't handle the fault, make
5519diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5520index 76069c1..c2aa816 100644
5521--- a/arch/ia64/mm/hugetlbpage.c
5522+++ b/arch/ia64/mm/hugetlbpage.c
5523@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5524 unsigned long pgoff, unsigned long flags)
5525 {
5526 struct vm_unmapped_area_info info;
5527+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5528
5529 if (len > RGN_MAP_LIMIT)
5530 return -ENOMEM;
5531@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5532 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5533 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5534 info.align_offset = 0;
5535+ info.threadstack_offset = offset;
5536 return vm_unmapped_area(&info);
5537 }
5538
5539diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5540index 6b33457..88b5124 100644
5541--- a/arch/ia64/mm/init.c
5542+++ b/arch/ia64/mm/init.c
5543@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5544 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5545 vma->vm_end = vma->vm_start + PAGE_SIZE;
5546 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5547+
5548+#ifdef CONFIG_PAX_PAGEEXEC
5549+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5550+ vma->vm_flags &= ~VM_EXEC;
5551+
5552+#ifdef CONFIG_PAX_MPROTECT
5553+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5554+ vma->vm_flags &= ~VM_MAYEXEC;
5555+#endif
5556+
5557+ }
5558+#endif
5559+
5560 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5561 down_write(&current->mm->mmap_sem);
5562 if (insert_vm_struct(current->mm, vma)) {
5563@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5564 gate_vma.vm_start = FIXADDR_USER_START;
5565 gate_vma.vm_end = FIXADDR_USER_END;
5566 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5567- gate_vma.vm_page_prot = __P101;
5568+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5569
5570 return 0;
5571 }
5572diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5573index 40b3ee98..8c2c112 100644
5574--- a/arch/m32r/include/asm/cache.h
5575+++ b/arch/m32r/include/asm/cache.h
5576@@ -1,8 +1,10 @@
5577 #ifndef _ASM_M32R_CACHE_H
5578 #define _ASM_M32R_CACHE_H
5579
5580+#include <linux/const.h>
5581+
5582 /* L1 cache line size */
5583 #define L1_CACHE_SHIFT 4
5584-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5585+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5586
5587 #endif /* _ASM_M32R_CACHE_H */
5588diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5589index 82abd15..d95ae5d 100644
5590--- a/arch/m32r/lib/usercopy.c
5591+++ b/arch/m32r/lib/usercopy.c
5592@@ -14,6 +14,9 @@
5593 unsigned long
5594 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5595 {
5596+ if ((long)n < 0)
5597+ return n;
5598+
5599 prefetch(from);
5600 if (access_ok(VERIFY_WRITE, to, n))
5601 __copy_user(to,from,n);
5602@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5603 unsigned long
5604 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5605 {
5606+ if ((long)n < 0)
5607+ return n;
5608+
5609 prefetchw(to);
5610 if (access_ok(VERIFY_READ, from, n))
5611 __copy_user_zeroing(to,from,n);
5612diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5613index 0395c51..5f26031 100644
5614--- a/arch/m68k/include/asm/cache.h
5615+++ b/arch/m68k/include/asm/cache.h
5616@@ -4,9 +4,11 @@
5617 #ifndef __ARCH_M68K_CACHE_H
5618 #define __ARCH_M68K_CACHE_H
5619
5620+#include <linux/const.h>
5621+
5622 /* bytes per L1 cache line */
5623 #define L1_CACHE_SHIFT 4
5624-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5625+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5626
5627 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5628
5629diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5630index d703d8e..a8e2d70 100644
5631--- a/arch/metag/include/asm/barrier.h
5632+++ b/arch/metag/include/asm/barrier.h
5633@@ -90,7 +90,7 @@ static inline void fence(void)
5634 do { \
5635 compiletime_assert_atomic_type(*p); \
5636 smp_mb(); \
5637- ACCESS_ONCE(*p) = (v); \
5638+ ACCESS_ONCE_RW(*p) = (v); \
5639 } while (0)
5640
5641 #define smp_load_acquire(p) \
5642diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5643index 3c32075..ae0ae75 100644
5644--- a/arch/metag/mm/hugetlbpage.c
5645+++ b/arch/metag/mm/hugetlbpage.c
5646@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5647 info.high_limit = TASK_SIZE;
5648 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5649 info.align_offset = 0;
5650+ info.threadstack_offset = 0;
5651 return vm_unmapped_area(&info);
5652 }
5653
5654diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5655index 4efe96a..60e8699 100644
5656--- a/arch/microblaze/include/asm/cache.h
5657+++ b/arch/microblaze/include/asm/cache.h
5658@@ -13,11 +13,12 @@
5659 #ifndef _ASM_MICROBLAZE_CACHE_H
5660 #define _ASM_MICROBLAZE_CACHE_H
5661
5662+#include <linux/const.h>
5663 #include <asm/registers.h>
5664
5665 #define L1_CACHE_SHIFT 5
5666 /* word-granular cache in microblaze */
5667-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5668+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5669
5670 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5671
5672diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5673index 843713c..b6a87b9 100644
5674--- a/arch/mips/Kconfig
5675+++ b/arch/mips/Kconfig
5676@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
5677
5678 config KEXEC
5679 bool "Kexec system call"
5680+ depends on !GRKERNSEC_KMEM
5681 help
5682 kexec is a system call that implements the ability to shutdown your
5683 current kernel, and to start another kernel. It is like a reboot
5684diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5685index 3778655..1dff0a9 100644
5686--- a/arch/mips/cavium-octeon/dma-octeon.c
5687+++ b/arch/mips/cavium-octeon/dma-octeon.c
5688@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5689 if (dma_release_from_coherent(dev, order, vaddr))
5690 return;
5691
5692- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5693+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5694 }
5695
5696 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5697diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5698index 857da84..3f4458b 100644
5699--- a/arch/mips/include/asm/atomic.h
5700+++ b/arch/mips/include/asm/atomic.h
5701@@ -22,15 +22,39 @@
5702 #include <asm/cmpxchg.h>
5703 #include <asm/war.h>
5704
5705+#ifdef CONFIG_GENERIC_ATOMIC64
5706+#include <asm-generic/atomic64.h>
5707+#endif
5708+
5709 #define ATOMIC_INIT(i) { (i) }
5710
5711+#ifdef CONFIG_64BIT
5712+#define _ASM_EXTABLE(from, to) \
5713+" .section __ex_table,\"a\"\n" \
5714+" .dword " #from ", " #to"\n" \
5715+" .previous\n"
5716+#else
5717+#define _ASM_EXTABLE(from, to) \
5718+" .section __ex_table,\"a\"\n" \
5719+" .word " #from ", " #to"\n" \
5720+" .previous\n"
5721+#endif
5722+
5723 /*
5724 * atomic_read - read atomic variable
5725 * @v: pointer of type atomic_t
5726 *
5727 * Atomically reads the value of @v.
5728 */
5729-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5730+static inline int atomic_read(const atomic_t *v)
5731+{
5732+ return ACCESS_ONCE(v->counter);
5733+}
5734+
5735+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5736+{
5737+ return ACCESS_ONCE(v->counter);
5738+}
5739
5740 /*
5741 * atomic_set - set atomic variable
5742@@ -39,47 +63,77 @@
5743 *
5744 * Atomically sets the value of @v to @i.
5745 */
5746-#define atomic_set(v, i) ((v)->counter = (i))
5747+static inline void atomic_set(atomic_t *v, int i)
5748+{
5749+ v->counter = i;
5750+}
5751
5752-#define ATOMIC_OP(op, c_op, asm_op) \
5753-static __inline__ void atomic_##op(int i, atomic_t * v) \
5754+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5755+{
5756+ v->counter = i;
5757+}
5758+
5759+#ifdef CONFIG_PAX_REFCOUNT
5760+#define __OVERFLOW_POST \
5761+ " b 4f \n" \
5762+ " .set noreorder \n" \
5763+ "3: b 5f \n" \
5764+ " move %0, %1 \n" \
5765+ " .set reorder \n"
5766+#define __OVERFLOW_EXTABLE \
5767+ "3:\n" \
5768+ _ASM_EXTABLE(2b, 3b)
5769+#else
5770+#define __OVERFLOW_POST
5771+#define __OVERFLOW_EXTABLE
5772+#endif
5773+
5774+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5775+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5776 { \
5777 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5778 int temp; \
5779 \
5780 __asm__ __volatile__( \
5781- " .set arch=r4000 \n" \
5782- "1: ll %0, %1 # atomic_" #op " \n" \
5783- " " #asm_op " %0, %2 \n" \
5784+ " .set mips3 \n" \
5785+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5786+ "2: " #asm_op " %0, %2 \n" \
5787 " sc %0, %1 \n" \
5788 " beqzl %0, 1b \n" \
5789+ extable \
5790 " .set mips0 \n" \
5791 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5792 : "Ir" (i)); \
5793 } else if (kernel_uses_llsc) { \
5794 int temp; \
5795 \
5796- do { \
5797- __asm__ __volatile__( \
5798- " .set arch=r4000 \n" \
5799- " ll %0, %1 # atomic_" #op "\n" \
5800- " " #asm_op " %0, %2 \n" \
5801- " sc %0, %1 \n" \
5802- " .set mips0 \n" \
5803- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5804- : "Ir" (i)); \
5805- } while (unlikely(!temp)); \
5806+ __asm__ __volatile__( \
5807+ " .set mips3 \n" \
5808+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5809+ "2: " #asm_op " %0, %2 \n" \
5810+ " sc %0, %1 \n" \
5811+ " beqz %0, 1b \n" \
5812+ extable \
5813+ " .set mips0 \n" \
5814+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5815+ : "Ir" (i)); \
5816 } else { \
5817 unsigned long flags; \
5818 \
5819 raw_local_irq_save(flags); \
5820- v->counter c_op i; \
5821+ __asm__ __volatile__( \
5822+ "2: " #asm_op " %0, %1 \n" \
5823+ extable \
5824+ : "+r" (v->counter) : "Ir" (i)); \
5825 raw_local_irq_restore(flags); \
5826 } \
5827 }
5828
5829-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5830-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5831+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u) \
5832+ __ATOMIC_OP(op, _unchecked, asm_op)
5833+
5834+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5835+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5836 { \
5837 int result; \
5838 \
5839@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5840 int temp; \
5841 \
5842 __asm__ __volatile__( \
5843- " .set arch=r4000 \n" \
5844- "1: ll %1, %2 # atomic_" #op "_return \n" \
5845- " " #asm_op " %0, %1, %3 \n" \
5846+ " .set mips3 \n" \
5847+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5848+ "2: " #asm_op " %0, %1, %3 \n" \
5849 " sc %0, %2 \n" \
5850 " beqzl %0, 1b \n" \
5851- " " #asm_op " %0, %1, %3 \n" \
5852+ post_op \
5853+ extable \
5854+ "4: " #asm_op " %0, %1, %3 \n" \
5855+ "5: \n" \
5856 " .set mips0 \n" \
5857 : "=&r" (result), "=&r" (temp), \
5858 "+" GCC_OFF12_ASM() (v->counter) \
5859@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5860 } else if (kernel_uses_llsc) { \
5861 int temp; \
5862 \
5863- do { \
5864- __asm__ __volatile__( \
5865- " .set arch=r4000 \n" \
5866- " ll %1, %2 # atomic_" #op "_return \n" \
5867- " " #asm_op " %0, %1, %3 \n" \
5868- " sc %0, %2 \n" \
5869- " .set mips0 \n" \
5870- : "=&r" (result), "=&r" (temp), \
5871- "+" GCC_OFF12_ASM() (v->counter) \
5872- : "Ir" (i)); \
5873- } while (unlikely(!result)); \
5874+ __asm__ __volatile__( \
5875+ " .set mips3 \n" \
5876+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5877+ "2: " #asm_op " %0, %1, %3 \n" \
5878+ " sc %0, %2 \n" \
5879+ post_op \
5880+ extable \
5881+ "4: " #asm_op " %0, %1, %3 \n" \
5882+ "5: \n" \
5883+ " .set mips0 \n" \
5884+ : "=&r" (result), "=&r" (temp), \
5885+ "+" GCC_OFF12_ASM() (v->counter) \
5886+ : "Ir" (i)); \
5887 \
5888 result = temp; result c_op i; \
5889 } else { \
5890 unsigned long flags; \
5891 \
5892 raw_local_irq_save(flags); \
5893- result = v->counter; \
5894- result c_op i; \
5895- v->counter = result; \
5896+ __asm__ __volatile__( \
5897+ " lw %0, %1 \n" \
5898+ "2: " #asm_op " %0, %1, %2 \n" \
5899+ " sw %0, %1 \n" \
5900+ "3: \n" \
5901+ extable \
5902+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
5903+ : "Ir" (i)); \
5904 raw_local_irq_restore(flags); \
5905 } \
5906 \
5907@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5908 return result; \
5909 }
5910
5911-#define ATOMIC_OPS(op, c_op, asm_op) \
5912- ATOMIC_OP(op, c_op, asm_op) \
5913- ATOMIC_OP_RETURN(op, c_op, asm_op)
5914+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
5915+ __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5916
5917-ATOMIC_OPS(add, +=, addu)
5918-ATOMIC_OPS(sub, -=, subu)
5919+#define ATOMIC_OPS(op, asm_op) \
5920+ ATOMIC_OP(op, asm_op) \
5921+ ATOMIC_OP_RETURN(op, asm_op)
5922+
5923+ATOMIC_OPS(add, add)
5924+ATOMIC_OPS(sub, sub)
5925
5926 #undef ATOMIC_OPS
5927 #undef ATOMIC_OP_RETURN
5928+#undef __ATOMIC_OP_RETURN
5929 #undef ATOMIC_OP
5930+#undef __ATOMIC_OP
5931
5932 /*
5933 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5934@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5935 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5936 * The function returns the old value of @v minus @i.
5937 */
5938-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5939+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5940 {
5941 int result;
5942
5943@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5944 return result;
5945 }
5946
5947-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5948-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5949+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5950+{
5951+ return cmpxchg(&v->counter, old, new);
5952+}
5953+
5954+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5955+ int new)
5956+{
5957+ return cmpxchg(&(v->counter), old, new);
5958+}
5959+
5960+static inline int atomic_xchg(atomic_t *v, int new)
5961+{
5962+ return xchg(&v->counter, new);
5963+}
5964+
5965+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5966+{
5967+ return xchg(&(v->counter), new);
5968+}
5969
5970 /**
5971 * __atomic_add_unless - add unless the number is a given value
5972@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5973
5974 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5975 #define atomic_inc_return(v) atomic_add_return(1, (v))
5976+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5977+{
5978+ return atomic_add_return_unchecked(1, v);
5979+}
5980
5981 /*
5982 * atomic_sub_and_test - subtract value from variable and test result
5983@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5984 * other cases.
5985 */
5986 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5987+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5988+{
5989+ return atomic_add_return_unchecked(1, v) == 0;
5990+}
5991
5992 /*
5993 * atomic_dec_and_test - decrement by 1 and test
5994@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5995 * Atomically increments @v by 1.
5996 */
5997 #define atomic_inc(v) atomic_add(1, (v))
5998+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5999+{
6000+ atomic_add_unchecked(1, v);
6001+}
6002
6003 /*
6004 * atomic_dec - decrement and test
6005@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6006 * Atomically decrements @v by 1.
6007 */
6008 #define atomic_dec(v) atomic_sub(1, (v))
6009+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6010+{
6011+ atomic_sub_unchecked(1, v);
6012+}
6013
6014 /*
6015 * atomic_add_negative - add and test if negative
6016@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6017 * @v: pointer of type atomic64_t
6018 *
6019 */
6020-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6021+static inline long atomic64_read(const atomic64_t *v)
6022+{
6023+ return ACCESS_ONCE(v->counter);
6024+}
6025+
6026+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6027+{
6028+ return ACCESS_ONCE(v->counter);
6029+}
6030
6031 /*
6032 * atomic64_set - set atomic variable
6033 * @v: pointer of type atomic64_t
6034 * @i: required value
6035 */
6036-#define atomic64_set(v, i) ((v)->counter = (i))
6037+static inline void atomic64_set(atomic64_t *v, long i)
6038+{
6039+ v->counter = i;
6040+}
6041
6042-#define ATOMIC64_OP(op, c_op, asm_op) \
6043-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6044+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6045+{
6046+ v->counter = i;
6047+}
6048+
6049+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6050+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6051 { \
6052 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6053 long temp; \
6054 \
6055 __asm__ __volatile__( \
6056- " .set arch=r4000 \n" \
6057- "1: lld %0, %1 # atomic64_" #op " \n" \
6058- " " #asm_op " %0, %2 \n" \
6059+ " .set mips3 \n" \
6060+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6061+ "2: " #asm_op " %0, %2 \n" \
6062 " scd %0, %1 \n" \
6063 " beqzl %0, 1b \n" \
6064+ extable \
6065 " .set mips0 \n" \
6066 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6067 : "Ir" (i)); \
6068 } else if (kernel_uses_llsc) { \
6069 long temp; \
6070 \
6071- do { \
6072- __asm__ __volatile__( \
6073- " .set arch=r4000 \n" \
6074- " lld %0, %1 # atomic64_" #op "\n" \
6075- " " #asm_op " %0, %2 \n" \
6076- " scd %0, %1 \n" \
6077- " .set mips0 \n" \
6078- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6079- : "Ir" (i)); \
6080- } while (unlikely(!temp)); \
6081+ __asm__ __volatile__( \
6082+ " .set mips3 \n" \
6083+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6084+ "2: " #asm_op " %0, %2 \n" \
6085+ " scd %0, %1 \n" \
6086+ " beqz %0, 1b \n" \
6087+ extable \
6088+ " .set mips0 \n" \
6089+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6090+ : "Ir" (i)); \
6091 } else { \
6092 unsigned long flags; \
6093 \
6094 raw_local_irq_save(flags); \
6095- v->counter c_op i; \
6096+ __asm__ __volatile__( \
6097+ "2: " #asm_op " %0, %1 \n" \
6098+ extable \
6099+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
6100 raw_local_irq_restore(flags); \
6101 } \
6102 }
6103
6104-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6105-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6106+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u) \
6107+ __ATOMIC64_OP(op, _unchecked, asm_op)
6108+
6109+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6110+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6111 { \
6112 long result; \
6113 \
6114@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6115 long temp; \
6116 \
6117 __asm__ __volatile__( \
6118- " .set arch=r4000 \n" \
6119+ " .set mips3 \n" \
6120 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6121- " " #asm_op " %0, %1, %3 \n" \
6122+ "2: " #asm_op " %0, %1, %3 \n" \
6123 " scd %0, %2 \n" \
6124 " beqzl %0, 1b \n" \
6125- " " #asm_op " %0, %1, %3 \n" \
6126+ post_op \
6127+ extable \
6128+ "4: " #asm_op " %0, %1, %3 \n" \
6129+ "5: \n" \
6130 " .set mips0 \n" \
6131 : "=&r" (result), "=&r" (temp), \
6132 "+" GCC_OFF12_ASM() (v->counter) \
6133@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6134 } else if (kernel_uses_llsc) { \
6135 long temp; \
6136 \
6137- do { \
6138- __asm__ __volatile__( \
6139- " .set arch=r4000 \n" \
6140- " lld %1, %2 # atomic64_" #op "_return\n" \
6141- " " #asm_op " %0, %1, %3 \n" \
6142- " scd %0, %2 \n" \
6143- " .set mips0 \n" \
6144- : "=&r" (result), "=&r" (temp), \
6145- "=" GCC_OFF12_ASM() (v->counter) \
6146- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6147- : "memory"); \
6148- } while (unlikely(!result)); \
6149+ __asm__ __volatile__( \
6150+ " .set mips3 \n" \
6151+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6152+ "2: " #asm_op " %0, %1, %3 \n" \
6153+ " scd %0, %2 \n" \
6154+ " beqz %0, 1b \n" \
6155+ post_op \
6156+ extable \
6157+ "4: " #asm_op " %0, %1, %3 \n" \
6158+ "5: \n" \
6159+ " .set mips0 \n" \
6160+ : "=&r" (result), "=&r" (temp), \
6161+ "=" GCC_OFF12_ASM() (v->counter) \
6162+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6163+ : "memory"); \
6164 \
6165 result = temp; result c_op i; \
6166 } else { \
6167 unsigned long flags; \
6168 \
6169 raw_local_irq_save(flags); \
6170- result = v->counter; \
6171- result c_op i; \
6172- v->counter = result; \
6173+ __asm__ __volatile__( \
6174+ " ld %0, %1 \n" \
6175+ "2: " #asm_op " %0, %1, %2 \n" \
6176+ " sd %0, %1 \n" \
6177+ "3: \n" \
6178+ extable \
6179+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
6180+ : "Ir" (i)); \
6181 raw_local_irq_restore(flags); \
6182 } \
6183 \
6184@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6185 return result; \
6186 }
6187
6188-#define ATOMIC64_OPS(op, c_op, asm_op) \
6189- ATOMIC64_OP(op, c_op, asm_op) \
6190- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6191+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
6192+ __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6193
6194-ATOMIC64_OPS(add, +=, daddu)
6195-ATOMIC64_OPS(sub, -=, dsubu)
6196+#define ATOMIC64_OPS(op, asm_op) \
6197+ ATOMIC64_OP(op, asm_op) \
6198+ ATOMIC64_OP_RETURN(op, asm_op)
6199+
6200+ATOMIC64_OPS(add, dadd)
6201+ATOMIC64_OPS(sub, dsub)
6202
6203 #undef ATOMIC64_OPS
6204 #undef ATOMIC64_OP_RETURN
6205+#undef __ATOMIC64_OP_RETURN
6206 #undef ATOMIC64_OP
6207+#undef __ATOMIC64_OP
6208+#undef __OVERFLOW_EXTABLE
6209+#undef __OVERFLOW_POST
6210
6211 /*
6212 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6213@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6214 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6215 * The function returns the old value of @v minus @i.
6216 */
6217-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6218+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6219 {
6220 long result;
6221
6222@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6223 return result;
6224 }
6225
6226-#define atomic64_cmpxchg(v, o, n) \
6227- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6228-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6229+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6230+{
6231+ return cmpxchg(&v->counter, old, new);
6232+}
6233+
6234+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6235+ long new)
6236+{
6237+ return cmpxchg(&(v->counter), old, new);
6238+}
6239+
6240+static inline long atomic64_xchg(atomic64_t *v, long new)
6241+{
6242+ return xchg(&v->counter, new);
6243+}
6244+
6245+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6246+{
6247+ return xchg(&(v->counter), new);
6248+}
6249
6250 /**
6251 * atomic64_add_unless - add unless the number is a given value
6252@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6253
6254 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6255 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6256+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6257
6258 /*
6259 * atomic64_sub_and_test - subtract value from variable and test result
6260@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6261 * other cases.
6262 */
6263 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6264+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6265
6266 /*
6267 * atomic64_dec_and_test - decrement by 1 and test
6268@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6269 * Atomically increments @v by 1.
6270 */
6271 #define atomic64_inc(v) atomic64_add(1, (v))
6272+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6273
6274 /*
6275 * atomic64_dec - decrement and test
6276@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6277 * Atomically decrements @v by 1.
6278 */
6279 #define atomic64_dec(v) atomic64_sub(1, (v))
6280+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6281
6282 /*
6283 * atomic64_add_negative - add and test if negative
6284diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6285index 2b8bbbc..4556df6 100644
6286--- a/arch/mips/include/asm/barrier.h
6287+++ b/arch/mips/include/asm/barrier.h
6288@@ -133,7 +133,7 @@
6289 do { \
6290 compiletime_assert_atomic_type(*p); \
6291 smp_mb(); \
6292- ACCESS_ONCE(*p) = (v); \
6293+ ACCESS_ONCE_RW(*p) = (v); \
6294 } while (0)
6295
6296 #define smp_load_acquire(p) \
6297diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6298index b4db69f..8f3b093 100644
6299--- a/arch/mips/include/asm/cache.h
6300+++ b/arch/mips/include/asm/cache.h
6301@@ -9,10 +9,11 @@
6302 #ifndef _ASM_CACHE_H
6303 #define _ASM_CACHE_H
6304
6305+#include <linux/const.h>
6306 #include <kmalloc.h>
6307
6308 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6309-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6310+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6311
6312 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6313 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6314diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6315index eb4d95d..f2f7f93 100644
6316--- a/arch/mips/include/asm/elf.h
6317+++ b/arch/mips/include/asm/elf.h
6318@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6319 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6320 #endif
6321
6322+#ifdef CONFIG_PAX_ASLR
6323+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6324+
6325+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6326+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6327+#endif
6328+
6329 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6330 struct linux_binprm;
6331 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6332 int uses_interp);
6333
6334-struct mm_struct;
6335-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6336-#define arch_randomize_brk arch_randomize_brk
6337-
6338 struct arch_elf_state {
6339 int fp_abi;
6340 int interp_fp_abi;
6341diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6342index c1f6afa..38cc6e9 100644
6343--- a/arch/mips/include/asm/exec.h
6344+++ b/arch/mips/include/asm/exec.h
6345@@ -12,6 +12,6 @@
6346 #ifndef _ASM_EXEC_H
6347 #define _ASM_EXEC_H
6348
6349-extern unsigned long arch_align_stack(unsigned long sp);
6350+#define arch_align_stack(x) ((x) & ~0xfUL)
6351
6352 #endif /* _ASM_EXEC_H */
6353diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6354index 9e8ef59..1139d6b 100644
6355--- a/arch/mips/include/asm/hw_irq.h
6356+++ b/arch/mips/include/asm/hw_irq.h
6357@@ -10,7 +10,7 @@
6358
6359 #include <linux/atomic.h>
6360
6361-extern atomic_t irq_err_count;
6362+extern atomic_unchecked_t irq_err_count;
6363
6364 /*
6365 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6366diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6367index 46dfc3c..a16b13a 100644
6368--- a/arch/mips/include/asm/local.h
6369+++ b/arch/mips/include/asm/local.h
6370@@ -12,15 +12,25 @@ typedef struct
6371 atomic_long_t a;
6372 } local_t;
6373
6374+typedef struct {
6375+ atomic_long_unchecked_t a;
6376+} local_unchecked_t;
6377+
6378 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6379
6380 #define local_read(l) atomic_long_read(&(l)->a)
6381+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6382 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6383+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6384
6385 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6386+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6387 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6388+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6389 #define local_inc(l) atomic_long_inc(&(l)->a)
6390+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6391 #define local_dec(l) atomic_long_dec(&(l)->a)
6392+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6393
6394 /*
6395 * Same as above, but return the result value
6396@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6397 return result;
6398 }
6399
6400+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6401+{
6402+ unsigned long result;
6403+
6404+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6405+ unsigned long temp;
6406+
6407+ __asm__ __volatile__(
6408+ " .set mips3 \n"
6409+ "1:" __LL "%1, %2 # local_add_return \n"
6410+ " addu %0, %1, %3 \n"
6411+ __SC "%0, %2 \n"
6412+ " beqzl %0, 1b \n"
6413+ " addu %0, %1, %3 \n"
6414+ " .set mips0 \n"
6415+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6416+ : "Ir" (i), "m" (l->a.counter)
6417+ : "memory");
6418+ } else if (kernel_uses_llsc) {
6419+ unsigned long temp;
6420+
6421+ __asm__ __volatile__(
6422+ " .set mips3 \n"
6423+ "1:" __LL "%1, %2 # local_add_return \n"
6424+ " addu %0, %1, %3 \n"
6425+ __SC "%0, %2 \n"
6426+ " beqz %0, 1b \n"
6427+ " addu %0, %1, %3 \n"
6428+ " .set mips0 \n"
6429+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6430+ : "Ir" (i), "m" (l->a.counter)
6431+ : "memory");
6432+ } else {
6433+ unsigned long flags;
6434+
6435+ local_irq_save(flags);
6436+ result = l->a.counter;
6437+ result += i;
6438+ l->a.counter = result;
6439+ local_irq_restore(flags);
6440+ }
6441+
6442+ return result;
6443+}
6444+
6445 static __inline__ long local_sub_return(long i, local_t * l)
6446 {
6447 unsigned long result;
6448@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6449
6450 #define local_cmpxchg(l, o, n) \
6451 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6452+#define local_cmpxchg_unchecked(l, o, n) \
6453+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6454 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6455
6456 /**
6457diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6458index 154b70a..426ae3d 100644
6459--- a/arch/mips/include/asm/page.h
6460+++ b/arch/mips/include/asm/page.h
6461@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6462 #ifdef CONFIG_CPU_MIPS32
6463 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6464 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6465- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6466+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6467 #else
6468 typedef struct { unsigned long long pte; } pte_t;
6469 #define pte_val(x) ((x).pte)
6470diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6471index b336037..5b874cc 100644
6472--- a/arch/mips/include/asm/pgalloc.h
6473+++ b/arch/mips/include/asm/pgalloc.h
6474@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6475 {
6476 set_pud(pud, __pud((unsigned long)pmd));
6477 }
6478+
6479+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6480+{
6481+ pud_populate(mm, pud, pmd);
6482+}
6483 #endif
6484
6485 /*
6486diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6487index 845016d..3303268 100644
6488--- a/arch/mips/include/asm/pgtable.h
6489+++ b/arch/mips/include/asm/pgtable.h
6490@@ -20,6 +20,9 @@
6491 #include <asm/io.h>
6492 #include <asm/pgtable-bits.h>
6493
6494+#define ktla_ktva(addr) (addr)
6495+#define ktva_ktla(addr) (addr)
6496+
6497 struct mm_struct;
6498 struct vm_area_struct;
6499
6500diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6501index e4440f9..8fb0005 100644
6502--- a/arch/mips/include/asm/thread_info.h
6503+++ b/arch/mips/include/asm/thread_info.h
6504@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6505 #define TIF_SECCOMP 4 /* secure computing */
6506 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6507 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6508+/* li takes a 32bit immediate */
6509+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6510+
6511 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6512 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6513 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6514@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
6515 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6516 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6517 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6518+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6519
6520 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6521 _TIF_SYSCALL_AUDIT | \
6522- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6523+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6524+ _TIF_GRSEC_SETXID)
6525
6526 /* work to do in syscall_trace_leave() */
6527 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6528- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6529+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6530
6531 /* work to do on interrupt/exception return */
6532 #define _TIF_WORK_MASK \
6533@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
6534 /* work to do on any return to u-space */
6535 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6536 _TIF_WORK_SYSCALL_EXIT | \
6537- _TIF_SYSCALL_TRACEPOINT)
6538+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6539
6540 /*
6541 * We stash processor id into a COP0 register to retrieve it fast
6542diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6543index bf8b324..cec5705 100644
6544--- a/arch/mips/include/asm/uaccess.h
6545+++ b/arch/mips/include/asm/uaccess.h
6546@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6547 __ok == 0; \
6548 })
6549
6550+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6551 #define access_ok(type, addr, size) \
6552 likely(__access_ok((addr), (size), __access_mask))
6553
6554diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6555index 1188e00..41cf144 100644
6556--- a/arch/mips/kernel/binfmt_elfn32.c
6557+++ b/arch/mips/kernel/binfmt_elfn32.c
6558@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6559 #undef ELF_ET_DYN_BASE
6560 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6561
6562+#ifdef CONFIG_PAX_ASLR
6563+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6564+
6565+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6566+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6567+#endif
6568+
6569 #include <asm/processor.h>
6570 #include <linux/module.h>
6571 #include <linux/elfcore.h>
6572diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6573index 9287678..f870e47 100644
6574--- a/arch/mips/kernel/binfmt_elfo32.c
6575+++ b/arch/mips/kernel/binfmt_elfo32.c
6576@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6577 #undef ELF_ET_DYN_BASE
6578 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6579
6580+#ifdef CONFIG_PAX_ASLR
6581+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6582+
6583+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6584+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6585+#endif
6586+
6587 #include <asm/processor.h>
6588
6589 #include <linux/module.h>
6590diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6591index a74ec3a..4f06f18 100644
6592--- a/arch/mips/kernel/i8259.c
6593+++ b/arch/mips/kernel/i8259.c
6594@@ -202,7 +202,7 @@ spurious_8259A_irq:
6595 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6596 spurious_irq_mask |= irqmask;
6597 }
6598- atomic_inc(&irq_err_count);
6599+ atomic_inc_unchecked(&irq_err_count);
6600 /*
6601 * Theoretically we do not have to handle this IRQ,
6602 * but in Linux this does not cause problems and is
6603diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6604index 44a1f79..2bd6aa3 100644
6605--- a/arch/mips/kernel/irq-gt641xx.c
6606+++ b/arch/mips/kernel/irq-gt641xx.c
6607@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6608 }
6609 }
6610
6611- atomic_inc(&irq_err_count);
6612+ atomic_inc_unchecked(&irq_err_count);
6613 }
6614
6615 void __init gt641xx_irq_init(void)
6616diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6617index d2bfbc2..a8eacd2 100644
6618--- a/arch/mips/kernel/irq.c
6619+++ b/arch/mips/kernel/irq.c
6620@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6621 printk("unexpected IRQ # %d\n", irq);
6622 }
6623
6624-atomic_t irq_err_count;
6625+atomic_unchecked_t irq_err_count;
6626
6627 int arch_show_interrupts(struct seq_file *p, int prec)
6628 {
6629- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6630+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6631 return 0;
6632 }
6633
6634 asmlinkage void spurious_interrupt(void)
6635 {
6636- atomic_inc(&irq_err_count);
6637+ atomic_inc_unchecked(&irq_err_count);
6638 }
6639
6640 void __init init_IRQ(void)
6641@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6642 #endif
6643 }
6644
6645+
6646 #ifdef DEBUG_STACKOVERFLOW
6647+extern void gr_handle_kernel_exploit(void);
6648+
6649 static inline void check_stack_overflow(void)
6650 {
6651 unsigned long sp;
6652@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6653 printk("do_IRQ: stack overflow: %ld\n",
6654 sp - sizeof(struct thread_info));
6655 dump_stack();
6656+ gr_handle_kernel_exploit();
6657 }
6658 }
6659 #else
6660diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6661index 0614717..002fa43 100644
6662--- a/arch/mips/kernel/pm-cps.c
6663+++ b/arch/mips/kernel/pm-cps.c
6664@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6665 nc_core_ready_count = nc_addr;
6666
6667 /* Ensure ready_count is zero-initialised before the assembly runs */
6668- ACCESS_ONCE(*nc_core_ready_count) = 0;
6669+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6670 coupled_barrier(&per_cpu(pm_barrier, core), online);
6671
6672 /* Run the generated entry code */
6673diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6674index 85bff5d..39bc202 100644
6675--- a/arch/mips/kernel/process.c
6676+++ b/arch/mips/kernel/process.c
6677@@ -534,18 +534,6 @@ out:
6678 return pc;
6679 }
6680
6681-/*
6682- * Don't forget that the stack pointer must be aligned on a 8 bytes
6683- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6684- */
6685-unsigned long arch_align_stack(unsigned long sp)
6686-{
6687- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6688- sp -= get_random_int() & ~PAGE_MASK;
6689-
6690- return sp & ALMASK;
6691-}
6692-
6693 static void arch_dump_stack(void *info)
6694 {
6695 struct pt_regs *regs;
6696diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6697index 5104528..950bbdc 100644
6698--- a/arch/mips/kernel/ptrace.c
6699+++ b/arch/mips/kernel/ptrace.c
6700@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6701 return ret;
6702 }
6703
6704+#ifdef CONFIG_GRKERNSEC_SETXID
6705+extern void gr_delayed_cred_worker(void);
6706+#endif
6707+
6708 /*
6709 * Notification of system call entry/exit
6710 * - triggered by current->work.syscall_trace
6711@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6712 tracehook_report_syscall_entry(regs))
6713 ret = -1;
6714
6715+#ifdef CONFIG_GRKERNSEC_SETXID
6716+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6717+ gr_delayed_cred_worker();
6718+#endif
6719+
6720 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6721 trace_sys_enter(regs, regs->regs[2]);
6722
6723diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6724index 07fc524..b9d7f28 100644
6725--- a/arch/mips/kernel/reset.c
6726+++ b/arch/mips/kernel/reset.c
6727@@ -13,6 +13,7 @@
6728 #include <linux/reboot.h>
6729
6730 #include <asm/reboot.h>
6731+#include <asm/bug.h>
6732
6733 /*
6734 * Urgs ... Too many MIPS machines to handle this in a generic way.
6735@@ -29,16 +30,19 @@ void machine_restart(char *command)
6736 {
6737 if (_machine_restart)
6738 _machine_restart(command);
6739+ BUG();
6740 }
6741
6742 void machine_halt(void)
6743 {
6744 if (_machine_halt)
6745 _machine_halt();
6746+ BUG();
6747 }
6748
6749 void machine_power_off(void)
6750 {
6751 if (pm_power_off)
6752 pm_power_off();
6753+ BUG();
6754 }
6755diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6756index 2242bdd..b284048 100644
6757--- a/arch/mips/kernel/sync-r4k.c
6758+++ b/arch/mips/kernel/sync-r4k.c
6759@@ -18,8 +18,8 @@
6760 #include <asm/mipsregs.h>
6761
6762 static atomic_t count_start_flag = ATOMIC_INIT(0);
6763-static atomic_t count_count_start = ATOMIC_INIT(0);
6764-static atomic_t count_count_stop = ATOMIC_INIT(0);
6765+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6766+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6767 static atomic_t count_reference = ATOMIC_INIT(0);
6768
6769 #define COUNTON 100
6770@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6771
6772 for (i = 0; i < NR_LOOPS; i++) {
6773 /* slaves loop on '!= 2' */
6774- while (atomic_read(&count_count_start) != 1)
6775+ while (atomic_read_unchecked(&count_count_start) != 1)
6776 mb();
6777- atomic_set(&count_count_stop, 0);
6778+ atomic_set_unchecked(&count_count_stop, 0);
6779 smp_wmb();
6780
6781 /* this lets the slaves write their count register */
6782- atomic_inc(&count_count_start);
6783+ atomic_inc_unchecked(&count_count_start);
6784
6785 /*
6786 * Everyone initialises count in the last loop:
6787@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6788 /*
6789 * Wait for all slaves to leave the synchronization point:
6790 */
6791- while (atomic_read(&count_count_stop) != 1)
6792+ while (atomic_read_unchecked(&count_count_stop) != 1)
6793 mb();
6794- atomic_set(&count_count_start, 0);
6795+ atomic_set_unchecked(&count_count_start, 0);
6796 smp_wmb();
6797- atomic_inc(&count_count_stop);
6798+ atomic_inc_unchecked(&count_count_stop);
6799 }
6800 /* Arrange for an interrupt in a short while */
6801 write_c0_compare(read_c0_count() + COUNTON);
6802@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6803 initcount = atomic_read(&count_reference);
6804
6805 for (i = 0; i < NR_LOOPS; i++) {
6806- atomic_inc(&count_count_start);
6807- while (atomic_read(&count_count_start) != 2)
6808+ atomic_inc_unchecked(&count_count_start);
6809+ while (atomic_read_unchecked(&count_count_start) != 2)
6810 mb();
6811
6812 /*
6813@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6814 if (i == NR_LOOPS-1)
6815 write_c0_count(initcount);
6816
6817- atomic_inc(&count_count_stop);
6818- while (atomic_read(&count_count_stop) != 2)
6819+ atomic_inc_unchecked(&count_count_stop);
6820+ while (atomic_read_unchecked(&count_count_stop) != 2)
6821 mb();
6822 }
6823 /* Arrange for an interrupt in a short while */
6824diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6825index c3b41e2..46c32e9 100644
6826--- a/arch/mips/kernel/traps.c
6827+++ b/arch/mips/kernel/traps.c
6828@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6829 siginfo_t info;
6830
6831 prev_state = exception_enter();
6832- die_if_kernel("Integer overflow", regs);
6833+ if (unlikely(!user_mode(regs))) {
6834+
6835+#ifdef CONFIG_PAX_REFCOUNT
6836+ if (fixup_exception(regs)) {
6837+ pax_report_refcount_overflow(regs);
6838+ exception_exit(prev_state);
6839+ return;
6840+ }
6841+#endif
6842+
6843+ die("Integer overflow", regs);
6844+ }
6845
6846 info.si_code = FPE_INTOVF;
6847 info.si_signo = SIGFPE;
6848diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6849index 270bbd4..c01932a 100644
6850--- a/arch/mips/kvm/mips.c
6851+++ b/arch/mips/kvm/mips.c
6852@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6853 return r;
6854 }
6855
6856-int kvm_arch_init(void *opaque)
6857+int kvm_arch_init(const void *opaque)
6858 {
6859 if (kvm_mips_callbacks) {
6860 kvm_err("kvm: module already exists\n");
6861diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6862index 70ab5d6..62940fe 100644
6863--- a/arch/mips/mm/fault.c
6864+++ b/arch/mips/mm/fault.c
6865@@ -28,6 +28,23 @@
6866 #include <asm/highmem.h> /* For VMALLOC_END */
6867 #include <linux/kdebug.h>
6868
6869+#ifdef CONFIG_PAX_PAGEEXEC
6870+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6871+{
6872+ unsigned long i;
6873+
6874+ printk(KERN_ERR "PAX: bytes at PC: ");
6875+ for (i = 0; i < 5; i++) {
6876+ unsigned int c;
6877+ if (get_user(c, (unsigned int *)pc+i))
6878+ printk(KERN_CONT "???????? ");
6879+ else
6880+ printk(KERN_CONT "%08x ", c);
6881+ }
6882+ printk("\n");
6883+}
6884+#endif
6885+
6886 /*
6887 * This routine handles page faults. It determines the address,
6888 * and the problem, and then passes it off to one of the appropriate
6889@@ -201,6 +218,14 @@ bad_area:
6890 bad_area_nosemaphore:
6891 /* User mode accesses just cause a SIGSEGV */
6892 if (user_mode(regs)) {
6893+
6894+#ifdef CONFIG_PAX_PAGEEXEC
6895+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6896+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6897+ do_group_exit(SIGKILL);
6898+ }
6899+#endif
6900+
6901 tsk->thread.cp0_badvaddr = address;
6902 tsk->thread.error_code = write;
6903 #if 0
6904diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6905index f1baadd..5472dca 100644
6906--- a/arch/mips/mm/mmap.c
6907+++ b/arch/mips/mm/mmap.c
6908@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6909 struct vm_area_struct *vma;
6910 unsigned long addr = addr0;
6911 int do_color_align;
6912+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6913 struct vm_unmapped_area_info info;
6914
6915 if (unlikely(len > TASK_SIZE))
6916@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6917 do_color_align = 1;
6918
6919 /* requesting a specific address */
6920+
6921+#ifdef CONFIG_PAX_RANDMMAP
6922+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6923+#endif
6924+
6925 if (addr) {
6926 if (do_color_align)
6927 addr = COLOUR_ALIGN(addr, pgoff);
6928@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6929 addr = PAGE_ALIGN(addr);
6930
6931 vma = find_vma(mm, addr);
6932- if (TASK_SIZE - len >= addr &&
6933- (!vma || addr + len <= vma->vm_start))
6934+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6935 return addr;
6936 }
6937
6938 info.length = len;
6939 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6940 info.align_offset = pgoff << PAGE_SHIFT;
6941+ info.threadstack_offset = offset;
6942
6943 if (dir == DOWN) {
6944 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6945@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6946 {
6947 unsigned long random_factor = 0UL;
6948
6949+#ifdef CONFIG_PAX_RANDMMAP
6950+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6951+#endif
6952+
6953 if (current->flags & PF_RANDOMIZE) {
6954 random_factor = get_random_int();
6955 random_factor = random_factor << PAGE_SHIFT;
6956@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6957
6958 if (mmap_is_legacy()) {
6959 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6960+
6961+#ifdef CONFIG_PAX_RANDMMAP
6962+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6963+ mm->mmap_base += mm->delta_mmap;
6964+#endif
6965+
6966 mm->get_unmapped_area = arch_get_unmapped_area;
6967 } else {
6968 mm->mmap_base = mmap_base(random_factor);
6969+
6970+#ifdef CONFIG_PAX_RANDMMAP
6971+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6972+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6973+#endif
6974+
6975 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6976 }
6977 }
6978
6979-static inline unsigned long brk_rnd(void)
6980-{
6981- unsigned long rnd = get_random_int();
6982-
6983- rnd = rnd << PAGE_SHIFT;
6984- /* 8MB for 32bit, 256MB for 64bit */
6985- if (TASK_IS_32BIT_ADDR)
6986- rnd = rnd & 0x7ffffful;
6987- else
6988- rnd = rnd & 0xffffffful;
6989-
6990- return rnd;
6991-}
6992-
6993-unsigned long arch_randomize_brk(struct mm_struct *mm)
6994-{
6995- unsigned long base = mm->brk;
6996- unsigned long ret;
6997-
6998- ret = PAGE_ALIGN(base + brk_rnd());
6999-
7000- if (ret < mm->brk)
7001- return mm->brk;
7002-
7003- return ret;
7004-}
7005-
7006 int __virt_addr_valid(const volatile void *kaddr)
7007 {
7008 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7009diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7010index d07e041..bedb72b 100644
7011--- a/arch/mips/pci/pci-octeon.c
7012+++ b/arch/mips/pci/pci-octeon.c
7013@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7014
7015
7016 static struct pci_ops octeon_pci_ops = {
7017- octeon_read_config,
7018- octeon_write_config,
7019+ .read = octeon_read_config,
7020+ .write = octeon_write_config,
7021 };
7022
7023 static struct resource octeon_pci_mem_resource = {
7024diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7025index 5e36c33..eb4a17b 100644
7026--- a/arch/mips/pci/pcie-octeon.c
7027+++ b/arch/mips/pci/pcie-octeon.c
7028@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7029 }
7030
7031 static struct pci_ops octeon_pcie0_ops = {
7032- octeon_pcie0_read_config,
7033- octeon_pcie0_write_config,
7034+ .read = octeon_pcie0_read_config,
7035+ .write = octeon_pcie0_write_config,
7036 };
7037
7038 static struct resource octeon_pcie0_mem_resource = {
7039@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7040 };
7041
7042 static struct pci_ops octeon_pcie1_ops = {
7043- octeon_pcie1_read_config,
7044- octeon_pcie1_write_config,
7045+ .read = octeon_pcie1_read_config,
7046+ .write = octeon_pcie1_write_config,
7047 };
7048
7049 static struct resource octeon_pcie1_mem_resource = {
7050@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7051 };
7052
7053 static struct pci_ops octeon_dummy_ops = {
7054- octeon_dummy_read_config,
7055- octeon_dummy_write_config,
7056+ .read = octeon_dummy_read_config,
7057+ .write = octeon_dummy_write_config,
7058 };
7059
7060 static struct resource octeon_dummy_mem_resource = {
7061diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7062index a2358b4..7cead4f 100644
7063--- a/arch/mips/sgi-ip27/ip27-nmi.c
7064+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7065@@ -187,9 +187,9 @@ void
7066 cont_nmi_dump(void)
7067 {
7068 #ifndef REAL_NMI_SIGNAL
7069- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7070+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7071
7072- atomic_inc(&nmied_cpus);
7073+ atomic_inc_unchecked(&nmied_cpus);
7074 #endif
7075 /*
7076 * Only allow 1 cpu to proceed
7077@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7078 udelay(10000);
7079 }
7080 #else
7081- while (atomic_read(&nmied_cpus) != num_online_cpus());
7082+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7083 #endif
7084
7085 /*
7086diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7087index a046b30..6799527 100644
7088--- a/arch/mips/sni/rm200.c
7089+++ b/arch/mips/sni/rm200.c
7090@@ -270,7 +270,7 @@ spurious_8259A_irq:
7091 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7092 spurious_irq_mask |= irqmask;
7093 }
7094- atomic_inc(&irq_err_count);
7095+ atomic_inc_unchecked(&irq_err_count);
7096 /*
7097 * Theoretically we do not have to handle this IRQ,
7098 * but in Linux this does not cause problems and is
7099diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7100index 41e873b..34d33a7 100644
7101--- a/arch/mips/vr41xx/common/icu.c
7102+++ b/arch/mips/vr41xx/common/icu.c
7103@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7104
7105 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7106
7107- atomic_inc(&irq_err_count);
7108+ atomic_inc_unchecked(&irq_err_count);
7109
7110 return -1;
7111 }
7112diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7113index ae0e4ee..e8f0692 100644
7114--- a/arch/mips/vr41xx/common/irq.c
7115+++ b/arch/mips/vr41xx/common/irq.c
7116@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7117 irq_cascade_t *cascade;
7118
7119 if (irq >= NR_IRQS) {
7120- atomic_inc(&irq_err_count);
7121+ atomic_inc_unchecked(&irq_err_count);
7122 return;
7123 }
7124
7125@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7126 ret = cascade->get_irq(irq);
7127 irq = ret;
7128 if (ret < 0)
7129- atomic_inc(&irq_err_count);
7130+ atomic_inc_unchecked(&irq_err_count);
7131 else
7132 irq_dispatch(irq);
7133 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7134diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7135index 967d144..db12197 100644
7136--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7137+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7138@@ -11,12 +11,14 @@
7139 #ifndef _ASM_PROC_CACHE_H
7140 #define _ASM_PROC_CACHE_H
7141
7142+#include <linux/const.h>
7143+
7144 /* L1 cache */
7145
7146 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7147 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7148-#define L1_CACHE_BYTES 16 /* bytes per entry */
7149 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7150+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7151 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7152
7153 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7154diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7155index bcb5df2..84fabd2 100644
7156--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7157+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7158@@ -16,13 +16,15 @@
7159 #ifndef _ASM_PROC_CACHE_H
7160 #define _ASM_PROC_CACHE_H
7161
7162+#include <linux/const.h>
7163+
7164 /*
7165 * L1 cache
7166 */
7167 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7168 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7169-#define L1_CACHE_BYTES 32 /* bytes per entry */
7170 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7171+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7172 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7173
7174 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7175diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7176index 4ce7a01..449202a 100644
7177--- a/arch/openrisc/include/asm/cache.h
7178+++ b/arch/openrisc/include/asm/cache.h
7179@@ -19,11 +19,13 @@
7180 #ifndef __ASM_OPENRISC_CACHE_H
7181 #define __ASM_OPENRISC_CACHE_H
7182
7183+#include <linux/const.h>
7184+
7185 /* FIXME: How can we replace these with values from the CPU...
7186 * they shouldn't be hard-coded!
7187 */
7188
7189-#define L1_CACHE_BYTES 16
7190 #define L1_CACHE_SHIFT 4
7191+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7192
7193 #endif /* __ASM_OPENRISC_CACHE_H */
7194diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7195index 226f8ca..9d9b87d 100644
7196--- a/arch/parisc/include/asm/atomic.h
7197+++ b/arch/parisc/include/asm/atomic.h
7198@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7199 return dec;
7200 }
7201
7202+#define atomic64_read_unchecked(v) atomic64_read(v)
7203+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7204+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7205+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7206+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7207+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7208+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7209+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7210+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7211+
7212 #endif /* !CONFIG_64BIT */
7213
7214
7215diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7216index 47f11c7..3420df2 100644
7217--- a/arch/parisc/include/asm/cache.h
7218+++ b/arch/parisc/include/asm/cache.h
7219@@ -5,6 +5,7 @@
7220 #ifndef __ARCH_PARISC_CACHE_H
7221 #define __ARCH_PARISC_CACHE_H
7222
7223+#include <linux/const.h>
7224
7225 /*
7226 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7227@@ -15,13 +16,13 @@
7228 * just ruin performance.
7229 */
7230 #ifdef CONFIG_PA20
7231-#define L1_CACHE_BYTES 64
7232 #define L1_CACHE_SHIFT 6
7233 #else
7234-#define L1_CACHE_BYTES 32
7235 #define L1_CACHE_SHIFT 5
7236 #endif
7237
7238+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7239+
7240 #ifndef __ASSEMBLY__
7241
7242 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7243diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7244index 3391d06..c23a2cc 100644
7245--- a/arch/parisc/include/asm/elf.h
7246+++ b/arch/parisc/include/asm/elf.h
7247@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7248
7249 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7250
7251+#ifdef CONFIG_PAX_ASLR
7252+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7253+
7254+#define PAX_DELTA_MMAP_LEN 16
7255+#define PAX_DELTA_STACK_LEN 16
7256+#endif
7257+
7258 /* This yields a mask that user programs can use to figure out what
7259 instruction set this CPU supports. This could be done in user space,
7260 but it's not easy, and we've already done it here. */
7261diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7262index f213f5b..0af3e8e 100644
7263--- a/arch/parisc/include/asm/pgalloc.h
7264+++ b/arch/parisc/include/asm/pgalloc.h
7265@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7266 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7267 }
7268
7269+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7270+{
7271+ pgd_populate(mm, pgd, pmd);
7272+}
7273+
7274 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7275 {
7276 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7277@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7278 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7279 #define pmd_free(mm, x) do { } while (0)
7280 #define pgd_populate(mm, pmd, pte) BUG()
7281+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7282
7283 #endif
7284
7285diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7286index 22b89d1..ce34230 100644
7287--- a/arch/parisc/include/asm/pgtable.h
7288+++ b/arch/parisc/include/asm/pgtable.h
7289@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7290 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7291 #define PAGE_COPY PAGE_EXECREAD
7292 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7293+
7294+#ifdef CONFIG_PAX_PAGEEXEC
7295+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7296+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7297+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7298+#else
7299+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7300+# define PAGE_COPY_NOEXEC PAGE_COPY
7301+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7302+#endif
7303+
7304 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7305 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7306 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7307diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7308index a5cb070..8604ddc 100644
7309--- a/arch/parisc/include/asm/uaccess.h
7310+++ b/arch/parisc/include/asm/uaccess.h
7311@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7312 const void __user *from,
7313 unsigned long n)
7314 {
7315- int sz = __compiletime_object_size(to);
7316+ size_t sz = __compiletime_object_size(to);
7317 int ret = -EFAULT;
7318
7319- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7320+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7321 ret = __copy_from_user(to, from, n);
7322 else
7323 copy_from_user_overflow();
7324diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7325index 5822e8e..bc5e638 100644
7326--- a/arch/parisc/kernel/module.c
7327+++ b/arch/parisc/kernel/module.c
7328@@ -98,16 +98,38 @@
7329
7330 /* three functions to determine where in the module core
7331 * or init pieces the location is */
7332+static inline int in_init_rx(struct module *me, void *loc)
7333+{
7334+ return (loc >= me->module_init_rx &&
7335+ loc < (me->module_init_rx + me->init_size_rx));
7336+}
7337+
7338+static inline int in_init_rw(struct module *me, void *loc)
7339+{
7340+ return (loc >= me->module_init_rw &&
7341+ loc < (me->module_init_rw + me->init_size_rw));
7342+}
7343+
7344 static inline int in_init(struct module *me, void *loc)
7345 {
7346- return (loc >= me->module_init &&
7347- loc <= (me->module_init + me->init_size));
7348+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7349+}
7350+
7351+static inline int in_core_rx(struct module *me, void *loc)
7352+{
7353+ return (loc >= me->module_core_rx &&
7354+ loc < (me->module_core_rx + me->core_size_rx));
7355+}
7356+
7357+static inline int in_core_rw(struct module *me, void *loc)
7358+{
7359+ return (loc >= me->module_core_rw &&
7360+ loc < (me->module_core_rw + me->core_size_rw));
7361 }
7362
7363 static inline int in_core(struct module *me, void *loc)
7364 {
7365- return (loc >= me->module_core &&
7366- loc <= (me->module_core + me->core_size));
7367+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7368 }
7369
7370 static inline int in_local(struct module *me, void *loc)
7371@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7372 }
7373
7374 /* align things a bit */
7375- me->core_size = ALIGN(me->core_size, 16);
7376- me->arch.got_offset = me->core_size;
7377- me->core_size += gots * sizeof(struct got_entry);
7378+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7379+ me->arch.got_offset = me->core_size_rw;
7380+ me->core_size_rw += gots * sizeof(struct got_entry);
7381
7382- me->core_size = ALIGN(me->core_size, 16);
7383- me->arch.fdesc_offset = me->core_size;
7384- me->core_size += fdescs * sizeof(Elf_Fdesc);
7385+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7386+ me->arch.fdesc_offset = me->core_size_rw;
7387+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7388
7389 me->arch.got_max = gots;
7390 me->arch.fdesc_max = fdescs;
7391@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7392
7393 BUG_ON(value == 0);
7394
7395- got = me->module_core + me->arch.got_offset;
7396+ got = me->module_core_rw + me->arch.got_offset;
7397 for (i = 0; got[i].addr; i++)
7398 if (got[i].addr == value)
7399 goto out;
7400@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7401 #ifdef CONFIG_64BIT
7402 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7403 {
7404- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7405+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7406
7407 if (!value) {
7408 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7409@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7410
7411 /* Create new one */
7412 fdesc->addr = value;
7413- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7414+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7415 return (Elf_Addr)fdesc;
7416 }
7417 #endif /* CONFIG_64BIT */
7418@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7419
7420 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7421 end = table + sechdrs[me->arch.unwind_section].sh_size;
7422- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7423+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7424
7425 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7426 me->arch.unwind_section, table, end, gp);
7427diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7428index e1ffea2..46ed66e 100644
7429--- a/arch/parisc/kernel/sys_parisc.c
7430+++ b/arch/parisc/kernel/sys_parisc.c
7431@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 unsigned long task_size = TASK_SIZE;
7433 int do_color_align, last_mmap;
7434 struct vm_unmapped_area_info info;
7435+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7436
7437 if (len > task_size)
7438 return -ENOMEM;
7439@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7440 goto found_addr;
7441 }
7442
7443+#ifdef CONFIG_PAX_RANDMMAP
7444+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7445+#endif
7446+
7447 if (addr) {
7448 if (do_color_align && last_mmap)
7449 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7450@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7451 info.high_limit = mmap_upper_limit();
7452 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7453 info.align_offset = shared_align_offset(last_mmap, pgoff);
7454+ info.threadstack_offset = offset;
7455 addr = vm_unmapped_area(&info);
7456
7457 found_addr:
7458@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 unsigned long addr = addr0;
7460 int do_color_align, last_mmap;
7461 struct vm_unmapped_area_info info;
7462+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7463
7464 #ifdef CONFIG_64BIT
7465 /* This should only ever run for 32-bit processes. */
7466@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7467 }
7468
7469 /* requesting a specific address */
7470+#ifdef CONFIG_PAX_RANDMMAP
7471+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7472+#endif
7473+
7474 if (addr) {
7475 if (do_color_align && last_mmap)
7476 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7477@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7478 info.high_limit = mm->mmap_base;
7479 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7480 info.align_offset = shared_align_offset(last_mmap, pgoff);
7481+ info.threadstack_offset = offset;
7482 addr = vm_unmapped_area(&info);
7483 if (!(addr & ~PAGE_MASK))
7484 goto found_addr;
7485@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7486 mm->mmap_legacy_base = mmap_legacy_base();
7487 mm->mmap_base = mmap_upper_limit();
7488
7489+#ifdef CONFIG_PAX_RANDMMAP
7490+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7491+ mm->mmap_legacy_base += mm->delta_mmap;
7492+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7493+ }
7494+#endif
7495+
7496 if (mmap_is_legacy()) {
7497 mm->mmap_base = mm->mmap_legacy_base;
7498 mm->get_unmapped_area = arch_get_unmapped_area;
7499diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7500index 47ee620..1107387 100644
7501--- a/arch/parisc/kernel/traps.c
7502+++ b/arch/parisc/kernel/traps.c
7503@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7504
7505 down_read(&current->mm->mmap_sem);
7506 vma = find_vma(current->mm,regs->iaoq[0]);
7507- if (vma && (regs->iaoq[0] >= vma->vm_start)
7508- && (vma->vm_flags & VM_EXEC)) {
7509-
7510+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7511 fault_address = regs->iaoq[0];
7512 fault_space = regs->iasq[0];
7513
7514diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7515index e5120e6..8ddb5cc 100644
7516--- a/arch/parisc/mm/fault.c
7517+++ b/arch/parisc/mm/fault.c
7518@@ -15,6 +15,7 @@
7519 #include <linux/sched.h>
7520 #include <linux/interrupt.h>
7521 #include <linux/module.h>
7522+#include <linux/unistd.h>
7523
7524 #include <asm/uaccess.h>
7525 #include <asm/traps.h>
7526@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7527 static unsigned long
7528 parisc_acctyp(unsigned long code, unsigned int inst)
7529 {
7530- if (code == 6 || code == 16)
7531+ if (code == 6 || code == 7 || code == 16)
7532 return VM_EXEC;
7533
7534 switch (inst & 0xf0000000) {
7535@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7536 }
7537 #endif
7538
7539+#ifdef CONFIG_PAX_PAGEEXEC
7540+/*
7541+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7542+ *
7543+ * returns 1 when task should be killed
7544+ * 2 when rt_sigreturn trampoline was detected
7545+ * 3 when unpatched PLT trampoline was detected
7546+ */
7547+static int pax_handle_fetch_fault(struct pt_regs *regs)
7548+{
7549+
7550+#ifdef CONFIG_PAX_EMUPLT
7551+ int err;
7552+
7553+ do { /* PaX: unpatched PLT emulation */
7554+ unsigned int bl, depwi;
7555+
7556+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7557+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7558+
7559+ if (err)
7560+ break;
7561+
7562+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7563+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7564+
7565+ err = get_user(ldw, (unsigned int *)addr);
7566+ err |= get_user(bv, (unsigned int *)(addr+4));
7567+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7568+
7569+ if (err)
7570+ break;
7571+
7572+ if (ldw == 0x0E801096U &&
7573+ bv == 0xEAC0C000U &&
7574+ ldw2 == 0x0E881095U)
7575+ {
7576+ unsigned int resolver, map;
7577+
7578+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7579+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7580+ if (err)
7581+ break;
7582+
7583+ regs->gr[20] = instruction_pointer(regs)+8;
7584+ regs->gr[21] = map;
7585+ regs->gr[22] = resolver;
7586+ regs->iaoq[0] = resolver | 3UL;
7587+ regs->iaoq[1] = regs->iaoq[0] + 4;
7588+ return 3;
7589+ }
7590+ }
7591+ } while (0);
7592+#endif
7593+
7594+#ifdef CONFIG_PAX_EMUTRAMP
7595+
7596+#ifndef CONFIG_PAX_EMUSIGRT
7597+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7598+ return 1;
7599+#endif
7600+
7601+ do { /* PaX: rt_sigreturn emulation */
7602+ unsigned int ldi1, ldi2, bel, nop;
7603+
7604+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7605+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7606+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7607+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7608+
7609+ if (err)
7610+ break;
7611+
7612+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7613+ ldi2 == 0x3414015AU &&
7614+ bel == 0xE4008200U &&
7615+ nop == 0x08000240U)
7616+ {
7617+ regs->gr[25] = (ldi1 & 2) >> 1;
7618+ regs->gr[20] = __NR_rt_sigreturn;
7619+ regs->gr[31] = regs->iaoq[1] + 16;
7620+ regs->sr[0] = regs->iasq[1];
7621+ regs->iaoq[0] = 0x100UL;
7622+ regs->iaoq[1] = regs->iaoq[0] + 4;
7623+ regs->iasq[0] = regs->sr[2];
7624+ regs->iasq[1] = regs->sr[2];
7625+ return 2;
7626+ }
7627+ } while (0);
7628+#endif
7629+
7630+ return 1;
7631+}
7632+
7633+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7634+{
7635+ unsigned long i;
7636+
7637+ printk(KERN_ERR "PAX: bytes at PC: ");
7638+ for (i = 0; i < 5; i++) {
7639+ unsigned int c;
7640+ if (get_user(c, (unsigned int *)pc+i))
7641+ printk(KERN_CONT "???????? ");
7642+ else
7643+ printk(KERN_CONT "%08x ", c);
7644+ }
7645+ printk("\n");
7646+}
7647+#endif
7648+
7649 int fixup_exception(struct pt_regs *regs)
7650 {
7651 const struct exception_table_entry *fix;
7652@@ -234,8 +345,33 @@ retry:
7653
7654 good_area:
7655
7656- if ((vma->vm_flags & acc_type) != acc_type)
7657+ if ((vma->vm_flags & acc_type) != acc_type) {
7658+
7659+#ifdef CONFIG_PAX_PAGEEXEC
7660+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7661+ (address & ~3UL) == instruction_pointer(regs))
7662+ {
7663+ up_read(&mm->mmap_sem);
7664+ switch (pax_handle_fetch_fault(regs)) {
7665+
7666+#ifdef CONFIG_PAX_EMUPLT
7667+ case 3:
7668+ return;
7669+#endif
7670+
7671+#ifdef CONFIG_PAX_EMUTRAMP
7672+ case 2:
7673+ return;
7674+#endif
7675+
7676+ }
7677+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7678+ do_group_exit(SIGKILL);
7679+ }
7680+#endif
7681+
7682 goto bad_area;
7683+ }
7684
7685 /*
7686 * If for any reason at all we couldn't handle the fault, make
7687diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7688index a2a168e..e484682 100644
7689--- a/arch/powerpc/Kconfig
7690+++ b/arch/powerpc/Kconfig
7691@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7692 config KEXEC
7693 bool "kexec system call"
7694 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7695+ depends on !GRKERNSEC_KMEM
7696 help
7697 kexec is a system call that implements the ability to shutdown your
7698 current kernel, and to start another kernel. It is like a reboot
7699diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7700index 512d278..d31fadd 100644
7701--- a/arch/powerpc/include/asm/atomic.h
7702+++ b/arch/powerpc/include/asm/atomic.h
7703@@ -12,6 +12,11 @@
7704
7705 #define ATOMIC_INIT(i) { (i) }
7706
7707+#define _ASM_EXTABLE(from, to) \
7708+" .section __ex_table,\"a\"\n" \
7709+ PPC_LONG" " #from ", " #to"\n" \
7710+" .previous\n"
7711+
7712 static __inline__ int atomic_read(const atomic_t *v)
7713 {
7714 int t;
7715@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7716 return t;
7717 }
7718
7719+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7720+{
7721+ int t;
7722+
7723+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7724+
7725+ return t;
7726+}
7727+
7728 static __inline__ void atomic_set(atomic_t *v, int i)
7729 {
7730 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7731 }
7732
7733-#define ATOMIC_OP(op, asm_op) \
7734-static __inline__ void atomic_##op(int a, atomic_t *v) \
7735+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7736+{
7737+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7738+}
7739+
7740+#ifdef CONFIG_PAX_REFCOUNT
7741+#define __REFCOUNT_OP(op) op##o.
7742+#define __OVERFLOW_PRE \
7743+ " mcrxr cr0\n"
7744+#define __OVERFLOW_POST \
7745+ " bf 4*cr0+so, 3f\n" \
7746+ "2: .long 0x00c00b00\n" \
7747+ "3:\n"
7748+#define __OVERFLOW_EXTABLE \
7749+ "\n4:\n"
7750+ _ASM_EXTABLE(2b, 4b)
7751+#else
7752+#define __REFCOUNT_OP(op) op
7753+#define __OVERFLOW_PRE
7754+#define __OVERFLOW_POST
7755+#define __OVERFLOW_EXTABLE
7756+#endif
7757+
7758+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7759+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7760 { \
7761 int t; \
7762 \
7763 __asm__ __volatile__( \
7764-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7765+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7766+ pre_op \
7767 #asm_op " %0,%2,%0\n" \
7768+ post_op \
7769 PPC405_ERR77(0,%3) \
7770 " stwcx. %0,0,%3 \n" \
7771 " bne- 1b\n" \
7772+ extable \
7773 : "=&r" (t), "+m" (v->counter) \
7774 : "r" (a), "r" (&v->counter) \
7775 : "cc"); \
7776 } \
7777
7778-#define ATOMIC_OP_RETURN(op, asm_op) \
7779-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7780+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7781+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7782+
7783+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7784+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7785 { \
7786 int t; \
7787 \
7788 __asm__ __volatile__( \
7789 PPC_ATOMIC_ENTRY_BARRIER \
7790-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7791+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7792+ pre_op \
7793 #asm_op " %0,%1,%0\n" \
7794+ post_op \
7795 PPC405_ERR77(0,%2) \
7796 " stwcx. %0,0,%2 \n" \
7797 " bne- 1b\n" \
7798+ extable \
7799 PPC_ATOMIC_EXIT_BARRIER \
7800 : "=&r" (t) \
7801 : "r" (a), "r" (&v->counter) \
7802@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7803 return t; \
7804 }
7805
7806+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7807+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7808+
7809 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7810
7811 ATOMIC_OPS(add, add)
7812@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7813
7814 #undef ATOMIC_OPS
7815 #undef ATOMIC_OP_RETURN
7816+#undef __ATOMIC_OP_RETURN
7817 #undef ATOMIC_OP
7818+#undef __ATOMIC_OP
7819
7820 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7821
7822-static __inline__ void atomic_inc(atomic_t *v)
7823-{
7824- int t;
7825+/*
7826+ * atomic_inc - increment atomic variable
7827+ * @v: pointer of type atomic_t
7828+ *
7829+ * Automatically increments @v by 1
7830+ */
7831+#define atomic_inc(v) atomic_add(1, (v))
7832+#define atomic_inc_return(v) atomic_add_return(1, (v))
7833
7834- __asm__ __volatile__(
7835-"1: lwarx %0,0,%2 # atomic_inc\n\
7836- addic %0,%0,1\n"
7837- PPC405_ERR77(0,%2)
7838-" stwcx. %0,0,%2 \n\
7839- bne- 1b"
7840- : "=&r" (t), "+m" (v->counter)
7841- : "r" (&v->counter)
7842- : "cc", "xer");
7843+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7844+{
7845+ atomic_add_unchecked(1, v);
7846 }
7847
7848-static __inline__ int atomic_inc_return(atomic_t *v)
7849+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7850 {
7851- int t;
7852-
7853- __asm__ __volatile__(
7854- PPC_ATOMIC_ENTRY_BARRIER
7855-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7856- addic %0,%0,1\n"
7857- PPC405_ERR77(0,%1)
7858-" stwcx. %0,0,%1 \n\
7859- bne- 1b"
7860- PPC_ATOMIC_EXIT_BARRIER
7861- : "=&r" (t)
7862- : "r" (&v->counter)
7863- : "cc", "xer", "memory");
7864-
7865- return t;
7866+ return atomic_add_return_unchecked(1, v);
7867 }
7868
7869 /*
7870@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7871 */
7872 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7873
7874-static __inline__ void atomic_dec(atomic_t *v)
7875+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7876 {
7877- int t;
7878-
7879- __asm__ __volatile__(
7880-"1: lwarx %0,0,%2 # atomic_dec\n\
7881- addic %0,%0,-1\n"
7882- PPC405_ERR77(0,%2)\
7883-" stwcx. %0,0,%2\n\
7884- bne- 1b"
7885- : "=&r" (t), "+m" (v->counter)
7886- : "r" (&v->counter)
7887- : "cc", "xer");
7888+ return atomic_add_return_unchecked(1, v) == 0;
7889 }
7890
7891-static __inline__ int atomic_dec_return(atomic_t *v)
7892+/*
7893+ * atomic_dec - decrement atomic variable
7894+ * @v: pointer of type atomic_t
7895+ *
7896+ * Atomically decrements @v by 1
7897+ */
7898+#define atomic_dec(v) atomic_sub(1, (v))
7899+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7900+
7901+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7902 {
7903- int t;
7904-
7905- __asm__ __volatile__(
7906- PPC_ATOMIC_ENTRY_BARRIER
7907-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7908- addic %0,%0,-1\n"
7909- PPC405_ERR77(0,%1)
7910-" stwcx. %0,0,%1\n\
7911- bne- 1b"
7912- PPC_ATOMIC_EXIT_BARRIER
7913- : "=&r" (t)
7914- : "r" (&v->counter)
7915- : "cc", "xer", "memory");
7916-
7917- return t;
7918+ atomic_sub_unchecked(1, v);
7919 }
7920
7921 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7922 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7923
7924+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7925+{
7926+ return cmpxchg(&(v->counter), old, new);
7927+}
7928+
7929+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7930+{
7931+ return xchg(&(v->counter), new);
7932+}
7933+
7934 /**
7935 * __atomic_add_unless - add unless the number is a given value
7936 * @v: pointer of type atomic_t
7937@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7938 PPC_ATOMIC_ENTRY_BARRIER
7939 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7940 cmpw 0,%0,%3 \n\
7941- beq- 2f \n\
7942- add %0,%2,%0 \n"
7943+ beq- 2f \n"
7944+
7945+#ifdef CONFIG_PAX_REFCOUNT
7946+" mcrxr cr0\n"
7947+" addo. %0,%2,%0\n"
7948+" bf 4*cr0+so, 4f\n"
7949+"3:.long " "0x00c00b00""\n"
7950+"4:\n"
7951+#else
7952+ "add %0,%2,%0 \n"
7953+#endif
7954+
7955 PPC405_ERR77(0,%2)
7956 " stwcx. %0,0,%1 \n\
7957 bne- 1b \n"
7958+"5:"
7959+
7960+#ifdef CONFIG_PAX_REFCOUNT
7961+ _ASM_EXTABLE(3b, 5b)
7962+#endif
7963+
7964 PPC_ATOMIC_EXIT_BARRIER
7965 " subf %0,%2,%0 \n\
7966 2:"
7967@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7968 }
7969 #define atomic_dec_if_positive atomic_dec_if_positive
7970
7971+#define smp_mb__before_atomic_dec() smp_mb()
7972+#define smp_mb__after_atomic_dec() smp_mb()
7973+#define smp_mb__before_atomic_inc() smp_mb()
7974+#define smp_mb__after_atomic_inc() smp_mb()
7975+
7976 #ifdef __powerpc64__
7977
7978 #define ATOMIC64_INIT(i) { (i) }
7979@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7980 return t;
7981 }
7982
7983+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7984+{
7985+ long t;
7986+
7987+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7988+
7989+ return t;
7990+}
7991+
7992 static __inline__ void atomic64_set(atomic64_t *v, long i)
7993 {
7994 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7995 }
7996
7997-#define ATOMIC64_OP(op, asm_op) \
7998-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7999+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8000+{
8001+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8002+}
8003+
8004+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
8005+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8006 { \
8007 long t; \
8008 \
8009 __asm__ __volatile__( \
8010 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8011+ pre_op \
8012 #asm_op " %0,%2,%0\n" \
8013+ post_op \
8014 " stdcx. %0,0,%3 \n" \
8015 " bne- 1b\n" \
8016+ extable \
8017 : "=&r" (t), "+m" (v->counter) \
8018 : "r" (a), "r" (&v->counter) \
8019 : "cc"); \
8020 }
8021
8022-#define ATOMIC64_OP_RETURN(op, asm_op) \
8023-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8024+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8025+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8026+
8027+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8028+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8029 { \
8030 long t; \
8031 \
8032 __asm__ __volatile__( \
8033 PPC_ATOMIC_ENTRY_BARRIER \
8034 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8035+ pre_op \
8036 #asm_op " %0,%1,%0\n" \
8037+ post_op \
8038 " stdcx. %0,0,%2 \n" \
8039 " bne- 1b\n" \
8040+ extable \
8041 PPC_ATOMIC_EXIT_BARRIER \
8042 : "=&r" (t) \
8043 : "r" (a), "r" (&v->counter) \
8044@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8045 return t; \
8046 }
8047
8048+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8049+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8050+
8051 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8052
8053 ATOMIC64_OPS(add, add)
8054@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8055
8056 #undef ATOMIC64_OPS
8057 #undef ATOMIC64_OP_RETURN
8058+#undef __ATOMIC64_OP_RETURN
8059 #undef ATOMIC64_OP
8060+#undef __ATOMIC64_OP
8061+#undef __OVERFLOW_EXTABLE
8062+#undef __OVERFLOW_POST
8063+#undef __OVERFLOW_PRE
8064+#undef __REFCOUNT_OP
8065
8066 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8067
8068-static __inline__ void atomic64_inc(atomic64_t *v)
8069-{
8070- long t;
8071+/*
8072+ * atomic64_inc - increment atomic variable
8073+ * @v: pointer of type atomic64_t
8074+ *
8075+ * Automatically increments @v by 1
8076+ */
8077+#define atomic64_inc(v) atomic64_add(1, (v))
8078+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8079
8080- __asm__ __volatile__(
8081-"1: ldarx %0,0,%2 # atomic64_inc\n\
8082- addic %0,%0,1\n\
8083- stdcx. %0,0,%2 \n\
8084- bne- 1b"
8085- : "=&r" (t), "+m" (v->counter)
8086- : "r" (&v->counter)
8087- : "cc", "xer");
8088+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8089+{
8090+ atomic64_add_unchecked(1, v);
8091 }
8092
8093-static __inline__ long atomic64_inc_return(atomic64_t *v)
8094+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8095 {
8096- long t;
8097-
8098- __asm__ __volatile__(
8099- PPC_ATOMIC_ENTRY_BARRIER
8100-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8101- addic %0,%0,1\n\
8102- stdcx. %0,0,%1 \n\
8103- bne- 1b"
8104- PPC_ATOMIC_EXIT_BARRIER
8105- : "=&r" (t)
8106- : "r" (&v->counter)
8107- : "cc", "xer", "memory");
8108-
8109- return t;
8110+ return atomic64_add_return_unchecked(1, v);
8111 }
8112
8113 /*
8114@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8115 */
8116 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8117
8118-static __inline__ void atomic64_dec(atomic64_t *v)
8119+/*
8120+ * atomic64_dec - decrement atomic variable
8121+ * @v: pointer of type atomic64_t
8122+ *
8123+ * Atomically decrements @v by 1
8124+ */
8125+#define atomic64_dec(v) atomic64_sub(1, (v))
8126+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8127+
8128+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8129 {
8130- long t;
8131-
8132- __asm__ __volatile__(
8133-"1: ldarx %0,0,%2 # atomic64_dec\n\
8134- addic %0,%0,-1\n\
8135- stdcx. %0,0,%2\n\
8136- bne- 1b"
8137- : "=&r" (t), "+m" (v->counter)
8138- : "r" (&v->counter)
8139- : "cc", "xer");
8140-}
8141-
8142-static __inline__ long atomic64_dec_return(atomic64_t *v)
8143-{
8144- long t;
8145-
8146- __asm__ __volatile__(
8147- PPC_ATOMIC_ENTRY_BARRIER
8148-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8149- addic %0,%0,-1\n\
8150- stdcx. %0,0,%1\n\
8151- bne- 1b"
8152- PPC_ATOMIC_EXIT_BARRIER
8153- : "=&r" (t)
8154- : "r" (&v->counter)
8155- : "cc", "xer", "memory");
8156-
8157- return t;
8158+ atomic64_sub_unchecked(1, v);
8159 }
8160
8161 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8162@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8163 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8164 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8165
8166+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8167+{
8168+ return cmpxchg(&(v->counter), old, new);
8169+}
8170+
8171+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8172+{
8173+ return xchg(&(v->counter), new);
8174+}
8175+
8176 /**
8177 * atomic64_add_unless - add unless the number is a given value
8178 * @v: pointer of type atomic64_t
8179@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8180
8181 __asm__ __volatile__ (
8182 PPC_ATOMIC_ENTRY_BARRIER
8183-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8184+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8185 cmpd 0,%0,%3 \n\
8186- beq- 2f \n\
8187- add %0,%2,%0 \n"
8188+ beq- 2f \n"
8189+
8190+#ifdef CONFIG_PAX_REFCOUNT
8191+" mcrxr cr0\n"
8192+" addo. %0,%2,%0\n"
8193+" bf 4*cr0+so, 4f\n"
8194+"3:.long " "0x00c00b00""\n"
8195+"4:\n"
8196+#else
8197+ "add %0,%2,%0 \n"
8198+#endif
8199+
8200 " stdcx. %0,0,%1 \n\
8201 bne- 1b \n"
8202 PPC_ATOMIC_EXIT_BARRIER
8203+"5:"
8204+
8205+#ifdef CONFIG_PAX_REFCOUNT
8206+ _ASM_EXTABLE(3b, 5b)
8207+#endif
8208+
8209 " subf %0,%2,%0 \n\
8210 2:"
8211 : "=&r" (t)
8212diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8213index a3bf5be..e03ba81 100644
8214--- a/arch/powerpc/include/asm/barrier.h
8215+++ b/arch/powerpc/include/asm/barrier.h
8216@@ -76,7 +76,7 @@
8217 do { \
8218 compiletime_assert_atomic_type(*p); \
8219 smp_lwsync(); \
8220- ACCESS_ONCE(*p) = (v); \
8221+ ACCESS_ONCE_RW(*p) = (v); \
8222 } while (0)
8223
8224 #define smp_load_acquire(p) \
8225diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8226index 34a05a1..a1f2c67 100644
8227--- a/arch/powerpc/include/asm/cache.h
8228+++ b/arch/powerpc/include/asm/cache.h
8229@@ -4,6 +4,7 @@
8230 #ifdef __KERNEL__
8231
8232 #include <asm/reg.h>
8233+#include <linux/const.h>
8234
8235 /* bytes per L1 cache line */
8236 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8237@@ -23,7 +24,7 @@
8238 #define L1_CACHE_SHIFT 7
8239 #endif
8240
8241-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8242+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8243
8244 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8245
8246diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8247index 57d289a..b36c98c 100644
8248--- a/arch/powerpc/include/asm/elf.h
8249+++ b/arch/powerpc/include/asm/elf.h
8250@@ -30,6 +30,18 @@
8251
8252 #define ELF_ET_DYN_BASE 0x20000000
8253
8254+#ifdef CONFIG_PAX_ASLR
8255+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8256+
8257+#ifdef __powerpc64__
8258+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8259+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8260+#else
8261+#define PAX_DELTA_MMAP_LEN 15
8262+#define PAX_DELTA_STACK_LEN 15
8263+#endif
8264+#endif
8265+
8266 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8267
8268 /*
8269@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8270 (0x7ff >> (PAGE_SHIFT - 12)) : \
8271 (0x3ffff >> (PAGE_SHIFT - 12)))
8272
8273-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8274-#define arch_randomize_brk arch_randomize_brk
8275-
8276-
8277 #ifdef CONFIG_SPU_BASE
8278 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8279 #define NT_SPU 1
8280diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8281index 8196e9c..d83a9f3 100644
8282--- a/arch/powerpc/include/asm/exec.h
8283+++ b/arch/powerpc/include/asm/exec.h
8284@@ -4,6 +4,6 @@
8285 #ifndef _ASM_POWERPC_EXEC_H
8286 #define _ASM_POWERPC_EXEC_H
8287
8288-extern unsigned long arch_align_stack(unsigned long sp);
8289+#define arch_align_stack(x) ((x) & ~0xfUL)
8290
8291 #endif /* _ASM_POWERPC_EXEC_H */
8292diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8293index 5acabbd..7ea14fa 100644
8294--- a/arch/powerpc/include/asm/kmap_types.h
8295+++ b/arch/powerpc/include/asm/kmap_types.h
8296@@ -10,7 +10,7 @@
8297 * 2 of the License, or (at your option) any later version.
8298 */
8299
8300-#define KM_TYPE_NR 16
8301+#define KM_TYPE_NR 17
8302
8303 #endif /* __KERNEL__ */
8304 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8305diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8306index b8da913..c02b593 100644
8307--- a/arch/powerpc/include/asm/local.h
8308+++ b/arch/powerpc/include/asm/local.h
8309@@ -9,21 +9,65 @@ typedef struct
8310 atomic_long_t a;
8311 } local_t;
8312
8313+typedef struct
8314+{
8315+ atomic_long_unchecked_t a;
8316+} local_unchecked_t;
8317+
8318 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8319
8320 #define local_read(l) atomic_long_read(&(l)->a)
8321+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8322 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8323+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8324
8325 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8326+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8327 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8328+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8329 #define local_inc(l) atomic_long_inc(&(l)->a)
8330+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8331 #define local_dec(l) atomic_long_dec(&(l)->a)
8332+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8333
8334 static __inline__ long local_add_return(long a, local_t *l)
8335 {
8336 long t;
8337
8338 __asm__ __volatile__(
8339+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8340+
8341+#ifdef CONFIG_PAX_REFCOUNT
8342+" mcrxr cr0\n"
8343+" addo. %0,%1,%0\n"
8344+" bf 4*cr0+so, 3f\n"
8345+"2:.long " "0x00c00b00""\n"
8346+#else
8347+" add %0,%1,%0\n"
8348+#endif
8349+
8350+"3:\n"
8351+ PPC405_ERR77(0,%2)
8352+ PPC_STLCX "%0,0,%2 \n\
8353+ bne- 1b"
8354+
8355+#ifdef CONFIG_PAX_REFCOUNT
8356+"\n4:\n"
8357+ _ASM_EXTABLE(2b, 4b)
8358+#endif
8359+
8360+ : "=&r" (t)
8361+ : "r" (a), "r" (&(l->a.counter))
8362+ : "cc", "memory");
8363+
8364+ return t;
8365+}
8366+
8367+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8368+{
8369+ long t;
8370+
8371+ __asm__ __volatile__(
8372 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8373 add %0,%1,%0\n"
8374 PPC405_ERR77(0,%2)
8375@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8376
8377 #define local_cmpxchg(l, o, n) \
8378 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8379+#define local_cmpxchg_unchecked(l, o, n) \
8380+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8381 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8382
8383 /**
8384diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8385index 8565c25..2865190 100644
8386--- a/arch/powerpc/include/asm/mman.h
8387+++ b/arch/powerpc/include/asm/mman.h
8388@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8389 }
8390 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8391
8392-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8393+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8394 {
8395 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8396 }
8397diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8398index 69c0598..2c56964 100644
8399--- a/arch/powerpc/include/asm/page.h
8400+++ b/arch/powerpc/include/asm/page.h
8401@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8402 * and needs to be executable. This means the whole heap ends
8403 * up being executable.
8404 */
8405-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8406- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8407+#define VM_DATA_DEFAULT_FLAGS32 \
8408+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8409+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8410
8411 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8412 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8413@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8414 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8415 #endif
8416
8417+#define ktla_ktva(addr) (addr)
8418+#define ktva_ktla(addr) (addr)
8419+
8420 #ifndef CONFIG_PPC_BOOK3S_64
8421 /*
8422 * Use the top bit of the higher-level page table entries to indicate whether
8423diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8424index d908a46..3753f71 100644
8425--- a/arch/powerpc/include/asm/page_64.h
8426+++ b/arch/powerpc/include/asm/page_64.h
8427@@ -172,15 +172,18 @@ do { \
8428 * stack by default, so in the absence of a PT_GNU_STACK program header
8429 * we turn execute permission off.
8430 */
8431-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8432- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8433+#define VM_STACK_DEFAULT_FLAGS32 \
8434+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8435+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8436
8437 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8438 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8439
8440+#ifndef CONFIG_PAX_PAGEEXEC
8441 #define VM_STACK_DEFAULT_FLAGS \
8442 (is_32bit_task() ? \
8443 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8444+#endif
8445
8446 #include <asm-generic/getorder.h>
8447
8448diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8449index 4b0be20..c15a27d 100644
8450--- a/arch/powerpc/include/asm/pgalloc-64.h
8451+++ b/arch/powerpc/include/asm/pgalloc-64.h
8452@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8453 #ifndef CONFIG_PPC_64K_PAGES
8454
8455 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8456+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8457
8458 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8459 {
8460@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8461 pud_set(pud, (unsigned long)pmd);
8462 }
8463
8464+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8465+{
8466+ pud_populate(mm, pud, pmd);
8467+}
8468+
8469 #define pmd_populate(mm, pmd, pte_page) \
8470 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8471 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8472@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8473 #endif
8474
8475 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8476+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8477
8478 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8479 pte_t *pte)
8480diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8481index a8805fe..6d69617 100644
8482--- a/arch/powerpc/include/asm/pgtable.h
8483+++ b/arch/powerpc/include/asm/pgtable.h
8484@@ -2,6 +2,7 @@
8485 #define _ASM_POWERPC_PGTABLE_H
8486 #ifdef __KERNEL__
8487
8488+#include <linux/const.h>
8489 #ifndef __ASSEMBLY__
8490 #include <linux/mmdebug.h>
8491 #include <linux/mmzone.h>
8492diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8493index 4aad413..85d86bf 100644
8494--- a/arch/powerpc/include/asm/pte-hash32.h
8495+++ b/arch/powerpc/include/asm/pte-hash32.h
8496@@ -21,6 +21,7 @@
8497 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8498 #define _PAGE_USER 0x004 /* usermode access allowed */
8499 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8500+#define _PAGE_EXEC _PAGE_GUARDED
8501 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8502 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8503 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8504diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8505index 1c874fb..e8480a4 100644
8506--- a/arch/powerpc/include/asm/reg.h
8507+++ b/arch/powerpc/include/asm/reg.h
8508@@ -253,6 +253,7 @@
8509 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8510 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8511 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8512+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8513 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8514 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8515 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8516diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8517index 5a6614a..d89995d1 100644
8518--- a/arch/powerpc/include/asm/smp.h
8519+++ b/arch/powerpc/include/asm/smp.h
8520@@ -51,7 +51,7 @@ struct smp_ops_t {
8521 int (*cpu_disable)(void);
8522 void (*cpu_die)(unsigned int nr);
8523 int (*cpu_bootable)(unsigned int nr);
8524-};
8525+} __no_const;
8526
8527 extern void smp_send_debugger_break(void);
8528 extern void start_secondary_resume(void);
8529diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8530index 4dbe072..b803275 100644
8531--- a/arch/powerpc/include/asm/spinlock.h
8532+++ b/arch/powerpc/include/asm/spinlock.h
8533@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8534 __asm__ __volatile__(
8535 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8536 __DO_SIGN_EXTEND
8537-" addic. %0,%0,1\n\
8538- ble- 2f\n"
8539+
8540+#ifdef CONFIG_PAX_REFCOUNT
8541+" mcrxr cr0\n"
8542+" addico. %0,%0,1\n"
8543+" bf 4*cr0+so, 3f\n"
8544+"2:.long " "0x00c00b00""\n"
8545+#else
8546+" addic. %0,%0,1\n"
8547+#endif
8548+
8549+"3:\n"
8550+ "ble- 4f\n"
8551 PPC405_ERR77(0,%1)
8552 " stwcx. %0,0,%1\n\
8553 bne- 1b\n"
8554 PPC_ACQUIRE_BARRIER
8555-"2:" : "=&r" (tmp)
8556+"4:"
8557+
8558+#ifdef CONFIG_PAX_REFCOUNT
8559+ _ASM_EXTABLE(2b,4b)
8560+#endif
8561+
8562+ : "=&r" (tmp)
8563 : "r" (&rw->lock)
8564 : "cr0", "xer", "memory");
8565
8566@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8567 __asm__ __volatile__(
8568 "# read_unlock\n\t"
8569 PPC_RELEASE_BARRIER
8570-"1: lwarx %0,0,%1\n\
8571- addic %0,%0,-1\n"
8572+"1: lwarx %0,0,%1\n"
8573+
8574+#ifdef CONFIG_PAX_REFCOUNT
8575+" mcrxr cr0\n"
8576+" addico. %0,%0,-1\n"
8577+" bf 4*cr0+so, 3f\n"
8578+"2:.long " "0x00c00b00""\n"
8579+#else
8580+" addic. %0,%0,-1\n"
8581+#endif
8582+
8583+"3:\n"
8584 PPC405_ERR77(0,%1)
8585 " stwcx. %0,0,%1\n\
8586 bne- 1b"
8587+
8588+#ifdef CONFIG_PAX_REFCOUNT
8589+"\n4:\n"
8590+ _ASM_EXTABLE(2b, 4b)
8591+#endif
8592+
8593 : "=&r"(tmp)
8594 : "r"(&rw->lock)
8595 : "cr0", "xer", "memory");
8596diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8597index 0be6c68..9c3c6ee 100644
8598--- a/arch/powerpc/include/asm/thread_info.h
8599+++ b/arch/powerpc/include/asm/thread_info.h
8600@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8601 #if defined(CONFIG_PPC64)
8602 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8603 #endif
8604+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8605+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8606
8607 /* as above, but as bit values */
8608 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8609@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8610 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8611 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8612 #define _TIF_NOHZ (1<<TIF_NOHZ)
8613+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8614 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8615 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8616- _TIF_NOHZ)
8617+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8618
8619 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8620 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8621diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8622index a0c071d..49cdc7f 100644
8623--- a/arch/powerpc/include/asm/uaccess.h
8624+++ b/arch/powerpc/include/asm/uaccess.h
8625@@ -58,6 +58,7 @@
8626
8627 #endif
8628
8629+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8630 #define access_ok(type, addr, size) \
8631 (__chk_user_ptr(addr), \
8632 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8633@@ -318,52 +319,6 @@ do { \
8634 extern unsigned long __copy_tofrom_user(void __user *to,
8635 const void __user *from, unsigned long size);
8636
8637-#ifndef __powerpc64__
8638-
8639-static inline unsigned long copy_from_user(void *to,
8640- const void __user *from, unsigned long n)
8641-{
8642- unsigned long over;
8643-
8644- if (access_ok(VERIFY_READ, from, n))
8645- return __copy_tofrom_user((__force void __user *)to, from, n);
8646- if ((unsigned long)from < TASK_SIZE) {
8647- over = (unsigned long)from + n - TASK_SIZE;
8648- return __copy_tofrom_user((__force void __user *)to, from,
8649- n - over) + over;
8650- }
8651- return n;
8652-}
8653-
8654-static inline unsigned long copy_to_user(void __user *to,
8655- const void *from, unsigned long n)
8656-{
8657- unsigned long over;
8658-
8659- if (access_ok(VERIFY_WRITE, to, n))
8660- return __copy_tofrom_user(to, (__force void __user *)from, n);
8661- if ((unsigned long)to < TASK_SIZE) {
8662- over = (unsigned long)to + n - TASK_SIZE;
8663- return __copy_tofrom_user(to, (__force void __user *)from,
8664- n - over) + over;
8665- }
8666- return n;
8667-}
8668-
8669-#else /* __powerpc64__ */
8670-
8671-#define __copy_in_user(to, from, size) \
8672- __copy_tofrom_user((to), (from), (size))
8673-
8674-extern unsigned long copy_from_user(void *to, const void __user *from,
8675- unsigned long n);
8676-extern unsigned long copy_to_user(void __user *to, const void *from,
8677- unsigned long n);
8678-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8679- unsigned long n);
8680-
8681-#endif /* __powerpc64__ */
8682-
8683 static inline unsigned long __copy_from_user_inatomic(void *to,
8684 const void __user *from, unsigned long n)
8685 {
8686@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8687 if (ret == 0)
8688 return 0;
8689 }
8690+
8691+ if (!__builtin_constant_p(n))
8692+ check_object_size(to, n, false);
8693+
8694 return __copy_tofrom_user((__force void __user *)to, from, n);
8695 }
8696
8697@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8698 if (ret == 0)
8699 return 0;
8700 }
8701+
8702+ if (!__builtin_constant_p(n))
8703+ check_object_size(from, n, true);
8704+
8705 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8706 }
8707
8708@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8709 return __copy_to_user_inatomic(to, from, size);
8710 }
8711
8712+#ifndef __powerpc64__
8713+
8714+static inline unsigned long __must_check copy_from_user(void *to,
8715+ const void __user *from, unsigned long n)
8716+{
8717+ unsigned long over;
8718+
8719+ if ((long)n < 0)
8720+ return n;
8721+
8722+ if (access_ok(VERIFY_READ, from, n)) {
8723+ if (!__builtin_constant_p(n))
8724+ check_object_size(to, n, false);
8725+ return __copy_tofrom_user((__force void __user *)to, from, n);
8726+ }
8727+ if ((unsigned long)from < TASK_SIZE) {
8728+ over = (unsigned long)from + n - TASK_SIZE;
8729+ if (!__builtin_constant_p(n - over))
8730+ check_object_size(to, n - over, false);
8731+ return __copy_tofrom_user((__force void __user *)to, from,
8732+ n - over) + over;
8733+ }
8734+ return n;
8735+}
8736+
8737+static inline unsigned long __must_check copy_to_user(void __user *to,
8738+ const void *from, unsigned long n)
8739+{
8740+ unsigned long over;
8741+
8742+ if ((long)n < 0)
8743+ return n;
8744+
8745+ if (access_ok(VERIFY_WRITE, to, n)) {
8746+ if (!__builtin_constant_p(n))
8747+ check_object_size(from, n, true);
8748+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8749+ }
8750+ if ((unsigned long)to < TASK_SIZE) {
8751+ over = (unsigned long)to + n - TASK_SIZE;
8752+ if (!__builtin_constant_p(n))
8753+ check_object_size(from, n - over, true);
8754+ return __copy_tofrom_user(to, (__force void __user *)from,
8755+ n - over) + over;
8756+ }
8757+ return n;
8758+}
8759+
8760+#else /* __powerpc64__ */
8761+
8762+#define __copy_in_user(to, from, size) \
8763+ __copy_tofrom_user((to), (from), (size))
8764+
8765+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8766+{
8767+ if ((long)n < 0 || n > INT_MAX)
8768+ return n;
8769+
8770+ if (!__builtin_constant_p(n))
8771+ check_object_size(to, n, false);
8772+
8773+ if (likely(access_ok(VERIFY_READ, from, n)))
8774+ n = __copy_from_user(to, from, n);
8775+ else
8776+ memset(to, 0, n);
8777+ return n;
8778+}
8779+
8780+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8781+{
8782+ if ((long)n < 0 || n > INT_MAX)
8783+ return n;
8784+
8785+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8786+ if (!__builtin_constant_p(n))
8787+ check_object_size(from, n, true);
8788+ n = __copy_to_user(to, from, n);
8789+ }
8790+ return n;
8791+}
8792+
8793+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8794+ unsigned long n);
8795+
8796+#endif /* __powerpc64__ */
8797+
8798 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8799
8800 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8801diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8802index 502cf69..53936a1 100644
8803--- a/arch/powerpc/kernel/Makefile
8804+++ b/arch/powerpc/kernel/Makefile
8805@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8806 CFLAGS_btext.o += -fPIC
8807 endif
8808
8809+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8810+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8811+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8812+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8813+
8814 ifdef CONFIG_FUNCTION_TRACER
8815 # Do not trace early boot code
8816 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8817@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8818 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8819 endif
8820
8821+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8822+
8823 obj-y := cputable.o ptrace.o syscalls.o \
8824 irq.o align.o signal_32.o pmc.o vdso.o \
8825 process.o systbl.o idle.o \
8826diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8827index 3e68d1c..72a5ee6 100644
8828--- a/arch/powerpc/kernel/exceptions-64e.S
8829+++ b/arch/powerpc/kernel/exceptions-64e.S
8830@@ -1010,6 +1010,7 @@ storage_fault_common:
8831 std r14,_DAR(r1)
8832 std r15,_DSISR(r1)
8833 addi r3,r1,STACK_FRAME_OVERHEAD
8834+ bl save_nvgprs
8835 mr r4,r14
8836 mr r5,r15
8837 ld r14,PACA_EXGEN+EX_R14(r13)
8838@@ -1018,8 +1019,7 @@ storage_fault_common:
8839 cmpdi r3,0
8840 bne- 1f
8841 b ret_from_except_lite
8842-1: bl save_nvgprs
8843- mr r5,r3
8844+1: mr r5,r3
8845 addi r3,r1,STACK_FRAME_OVERHEAD
8846 ld r4,_DAR(r1)
8847 bl bad_page_fault
8848diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8849index c2df815..bae3d12 100644
8850--- a/arch/powerpc/kernel/exceptions-64s.S
8851+++ b/arch/powerpc/kernel/exceptions-64s.S
8852@@ -1599,10 +1599,10 @@ handle_page_fault:
8853 11: ld r4,_DAR(r1)
8854 ld r5,_DSISR(r1)
8855 addi r3,r1,STACK_FRAME_OVERHEAD
8856+ bl save_nvgprs
8857 bl do_page_fault
8858 cmpdi r3,0
8859 beq+ 12f
8860- bl save_nvgprs
8861 mr r5,r3
8862 addi r3,r1,STACK_FRAME_OVERHEAD
8863 lwz r4,_DAR(r1)
8864diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8865index 4509603..cdb491f 100644
8866--- a/arch/powerpc/kernel/irq.c
8867+++ b/arch/powerpc/kernel/irq.c
8868@@ -460,6 +460,8 @@ void migrate_irqs(void)
8869 }
8870 #endif
8871
8872+extern void gr_handle_kernel_exploit(void);
8873+
8874 static inline void check_stack_overflow(void)
8875 {
8876 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8877@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8878 pr_err("do_IRQ: stack overflow: %ld\n",
8879 sp - sizeof(struct thread_info));
8880 dump_stack();
8881+ gr_handle_kernel_exploit();
8882 }
8883 #endif
8884 }
8885diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8886index c94d2e0..992a9ce 100644
8887--- a/arch/powerpc/kernel/module_32.c
8888+++ b/arch/powerpc/kernel/module_32.c
8889@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8890 me->arch.core_plt_section = i;
8891 }
8892 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8893- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8894+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8895 return -ENOEXEC;
8896 }
8897
8898@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8899
8900 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8901 /* Init, or core PLT? */
8902- if (location >= mod->module_core
8903- && location < mod->module_core + mod->core_size)
8904+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8905+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8906 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8907- else
8908+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8909+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8910 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8911+ else {
8912+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8913+ return ~0UL;
8914+ }
8915
8916 /* Find this entry, or if that fails, the next avail. entry */
8917 while (entry->jump[0]) {
8918@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8919 }
8920 #ifdef CONFIG_DYNAMIC_FTRACE
8921 module->arch.tramp =
8922- do_plt_call(module->module_core,
8923+ do_plt_call(module->module_core_rx,
8924 (unsigned long)ftrace_caller,
8925 sechdrs, module);
8926 #endif
8927diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8928index b4cc7be..1fe8bb3 100644
8929--- a/arch/powerpc/kernel/process.c
8930+++ b/arch/powerpc/kernel/process.c
8931@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8932 * Lookup NIP late so we have the best change of getting the
8933 * above info out without failing
8934 */
8935- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8936- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8937+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8938+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8939 #endif
8940 show_stack(current, (unsigned long *) regs->gpr[1]);
8941 if (!user_mode(regs))
8942@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8943 newsp = stack[0];
8944 ip = stack[STACK_FRAME_LR_SAVE];
8945 if (!firstframe || ip != lr) {
8946- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8947+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8948 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8949 if ((ip == rth) && curr_frame >= 0) {
8950- printk(" (%pS)",
8951+ printk(" (%pA)",
8952 (void *)current->ret_stack[curr_frame].ret);
8953 curr_frame--;
8954 }
8955@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8956 struct pt_regs *regs = (struct pt_regs *)
8957 (sp + STACK_FRAME_OVERHEAD);
8958 lr = regs->link;
8959- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8960+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8961 regs->trap, (void *)regs->nip, (void *)lr);
8962 firstframe = 1;
8963 }
8964@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8965 mtspr(SPRN_CTRLT, ctrl);
8966 }
8967 #endif /* CONFIG_PPC64 */
8968-
8969-unsigned long arch_align_stack(unsigned long sp)
8970-{
8971- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8972- sp -= get_random_int() & ~PAGE_MASK;
8973- return sp & ~0xf;
8974-}
8975-
8976-static inline unsigned long brk_rnd(void)
8977-{
8978- unsigned long rnd = 0;
8979-
8980- /* 8MB for 32bit, 1GB for 64bit */
8981- if (is_32bit_task())
8982- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8983- else
8984- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8985-
8986- return rnd << PAGE_SHIFT;
8987-}
8988-
8989-unsigned long arch_randomize_brk(struct mm_struct *mm)
8990-{
8991- unsigned long base = mm->brk;
8992- unsigned long ret;
8993-
8994-#ifdef CONFIG_PPC_STD_MMU_64
8995- /*
8996- * If we are using 1TB segments and we are allowed to randomise
8997- * the heap, we can put it above 1TB so it is backed by a 1TB
8998- * segment. Otherwise the heap will be in the bottom 1TB
8999- * which always uses 256MB segments and this may result in a
9000- * performance penalty.
9001- */
9002- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9003- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9004-#endif
9005-
9006- ret = PAGE_ALIGN(base + brk_rnd());
9007-
9008- if (ret < mm->brk)
9009- return mm->brk;
9010-
9011- return ret;
9012-}
9013-
9014diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9015index f21897b..28c0428 100644
9016--- a/arch/powerpc/kernel/ptrace.c
9017+++ b/arch/powerpc/kernel/ptrace.c
9018@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9019 return ret;
9020 }
9021
9022+#ifdef CONFIG_GRKERNSEC_SETXID
9023+extern void gr_delayed_cred_worker(void);
9024+#endif
9025+
9026 /*
9027 * We must return the syscall number to actually look up in the table.
9028 * This can be -1L to skip running any syscall at all.
9029@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9030
9031 secure_computing_strict(regs->gpr[0]);
9032
9033+#ifdef CONFIG_GRKERNSEC_SETXID
9034+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9035+ gr_delayed_cred_worker();
9036+#endif
9037+
9038 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9039 tracehook_report_syscall_entry(regs))
9040 /*
9041@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9042 {
9043 int step;
9044
9045+#ifdef CONFIG_GRKERNSEC_SETXID
9046+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9047+ gr_delayed_cred_worker();
9048+#endif
9049+
9050 audit_syscall_exit(regs);
9051
9052 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9053diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9054index b171001..4ac7ac5 100644
9055--- a/arch/powerpc/kernel/signal_32.c
9056+++ b/arch/powerpc/kernel/signal_32.c
9057@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9058 /* Save user registers on the stack */
9059 frame = &rt_sf->uc.uc_mcontext;
9060 addr = frame;
9061- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9062+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9063 sigret = 0;
9064 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9065 } else {
9066diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9067index 2cb0c94..c0c0bc9 100644
9068--- a/arch/powerpc/kernel/signal_64.c
9069+++ b/arch/powerpc/kernel/signal_64.c
9070@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9071 current->thread.fp_state.fpscr = 0;
9072
9073 /* Set up to return from userspace. */
9074- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9075+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9076 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9077 } else {
9078 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9079diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9080index e6595b7..24bde6e 100644
9081--- a/arch/powerpc/kernel/traps.c
9082+++ b/arch/powerpc/kernel/traps.c
9083@@ -36,6 +36,7 @@
9084 #include <linux/debugfs.h>
9085 #include <linux/ratelimit.h>
9086 #include <linux/context_tracking.h>
9087+#include <linux/uaccess.h>
9088
9089 #include <asm/emulated_ops.h>
9090 #include <asm/pgtable.h>
9091@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9092 return flags;
9093 }
9094
9095+extern void gr_handle_kernel_exploit(void);
9096+
9097 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9098 int signr)
9099 {
9100@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9101 panic("Fatal exception in interrupt");
9102 if (panic_on_oops)
9103 panic("Fatal exception");
9104+
9105+ gr_handle_kernel_exploit();
9106+
9107 do_exit(signr);
9108 }
9109
9110@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9111 enum ctx_state prev_state = exception_enter();
9112 unsigned int reason = get_reason(regs);
9113
9114+#ifdef CONFIG_PAX_REFCOUNT
9115+ unsigned int bkpt;
9116+ const struct exception_table_entry *entry;
9117+
9118+ if (reason & REASON_ILLEGAL) {
9119+ /* Check if PaX bad instruction */
9120+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9121+ current->thread.trap_nr = 0;
9122+ pax_report_refcount_overflow(regs);
9123+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9124+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9125+ regs->nip = entry->fixup;
9126+ return;
9127+ }
9128+ /* fixup_exception() could not handle */
9129+ goto bail;
9130+ }
9131+ }
9132+#endif
9133+
9134 /* We can now get here via a FP Unavailable exception if the core
9135 * has no FPU, in that case the reason flags will be 0 */
9136
9137diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9138index 305eb0d..accc5b40 100644
9139--- a/arch/powerpc/kernel/vdso.c
9140+++ b/arch/powerpc/kernel/vdso.c
9141@@ -34,6 +34,7 @@
9142 #include <asm/vdso.h>
9143 #include <asm/vdso_datapage.h>
9144 #include <asm/setup.h>
9145+#include <asm/mman.h>
9146
9147 #undef DEBUG
9148
9149@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9150 vdso_base = VDSO32_MBASE;
9151 #endif
9152
9153- current->mm->context.vdso_base = 0;
9154+ current->mm->context.vdso_base = ~0UL;
9155
9156 /* vDSO has a problem and was disabled, just don't "enable" it for the
9157 * process
9158@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9159 vdso_base = get_unmapped_area(NULL, vdso_base,
9160 (vdso_pages << PAGE_SHIFT) +
9161 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9162- 0, 0);
9163+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9164 if (IS_ERR_VALUE(vdso_base)) {
9165 rc = vdso_base;
9166 goto fail_mmapsem;
9167diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9168index c45eaab..5f41b57 100644
9169--- a/arch/powerpc/kvm/powerpc.c
9170+++ b/arch/powerpc/kvm/powerpc.c
9171@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9172 }
9173 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9174
9175-int kvm_arch_init(void *opaque)
9176+int kvm_arch_init(const void *opaque)
9177 {
9178 return 0;
9179 }
9180diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9181index 5eea6f3..5d10396 100644
9182--- a/arch/powerpc/lib/usercopy_64.c
9183+++ b/arch/powerpc/lib/usercopy_64.c
9184@@ -9,22 +9,6 @@
9185 #include <linux/module.h>
9186 #include <asm/uaccess.h>
9187
9188-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9189-{
9190- if (likely(access_ok(VERIFY_READ, from, n)))
9191- n = __copy_from_user(to, from, n);
9192- else
9193- memset(to, 0, n);
9194- return n;
9195-}
9196-
9197-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9198-{
9199- if (likely(access_ok(VERIFY_WRITE, to, n)))
9200- n = __copy_to_user(to, from, n);
9201- return n;
9202-}
9203-
9204 unsigned long copy_in_user(void __user *to, const void __user *from,
9205 unsigned long n)
9206 {
9207@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9208 return n;
9209 }
9210
9211-EXPORT_SYMBOL(copy_from_user);
9212-EXPORT_SYMBOL(copy_to_user);
9213 EXPORT_SYMBOL(copy_in_user);
9214
9215diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9216index 6154b0a..4de2b19 100644
9217--- a/arch/powerpc/mm/fault.c
9218+++ b/arch/powerpc/mm/fault.c
9219@@ -33,6 +33,10 @@
9220 #include <linux/ratelimit.h>
9221 #include <linux/context_tracking.h>
9222 #include <linux/hugetlb.h>
9223+#include <linux/slab.h>
9224+#include <linux/pagemap.h>
9225+#include <linux/compiler.h>
9226+#include <linux/unistd.h>
9227
9228 #include <asm/firmware.h>
9229 #include <asm/page.h>
9230@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9231 }
9232 #endif
9233
9234+#ifdef CONFIG_PAX_PAGEEXEC
9235+/*
9236+ * PaX: decide what to do with offenders (regs->nip = fault address)
9237+ *
9238+ * returns 1 when task should be killed
9239+ */
9240+static int pax_handle_fetch_fault(struct pt_regs *regs)
9241+{
9242+ return 1;
9243+}
9244+
9245+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9246+{
9247+ unsigned long i;
9248+
9249+ printk(KERN_ERR "PAX: bytes at PC: ");
9250+ for (i = 0; i < 5; i++) {
9251+ unsigned int c;
9252+ if (get_user(c, (unsigned int __user *)pc+i))
9253+ printk(KERN_CONT "???????? ");
9254+ else
9255+ printk(KERN_CONT "%08x ", c);
9256+ }
9257+ printk("\n");
9258+}
9259+#endif
9260+
9261 /*
9262 * Check whether the instruction at regs->nip is a store using
9263 * an update addressing form which will update r1.
9264@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9265 * indicate errors in DSISR but can validly be set in SRR1.
9266 */
9267 if (trap == 0x400)
9268- error_code &= 0x48200000;
9269+ error_code &= 0x58200000;
9270 else
9271 is_write = error_code & DSISR_ISSTORE;
9272 #else
9273@@ -383,7 +414,7 @@ good_area:
9274 * "undefined". Of those that can be set, this is the only
9275 * one which seems bad.
9276 */
9277- if (error_code & 0x10000000)
9278+ if (error_code & DSISR_GUARDED)
9279 /* Guarded storage error. */
9280 goto bad_area;
9281 #endif /* CONFIG_8xx */
9282@@ -398,7 +429,7 @@ good_area:
9283 * processors use the same I/D cache coherency mechanism
9284 * as embedded.
9285 */
9286- if (error_code & DSISR_PROTFAULT)
9287+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9288 goto bad_area;
9289 #endif /* CONFIG_PPC_STD_MMU */
9290
9291@@ -490,6 +521,23 @@ bad_area:
9292 bad_area_nosemaphore:
9293 /* User mode accesses cause a SIGSEGV */
9294 if (user_mode(regs)) {
9295+
9296+#ifdef CONFIG_PAX_PAGEEXEC
9297+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9298+#ifdef CONFIG_PPC_STD_MMU
9299+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9300+#else
9301+ if (is_exec && regs->nip == address) {
9302+#endif
9303+ switch (pax_handle_fetch_fault(regs)) {
9304+ }
9305+
9306+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9307+ do_group_exit(SIGKILL);
9308+ }
9309+ }
9310+#endif
9311+
9312 _exception(SIGSEGV, regs, code, address);
9313 goto bail;
9314 }
9315diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9316index cb8bdbe..cde4bc7 100644
9317--- a/arch/powerpc/mm/mmap.c
9318+++ b/arch/powerpc/mm/mmap.c
9319@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9320 return sysctl_legacy_va_layout;
9321 }
9322
9323-static unsigned long mmap_rnd(void)
9324+static unsigned long mmap_rnd(struct mm_struct *mm)
9325 {
9326 unsigned long rnd = 0;
9327
9328+#ifdef CONFIG_PAX_RANDMMAP
9329+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9330+#endif
9331+
9332 if (current->flags & PF_RANDOMIZE) {
9333 /* 8MB for 32bit, 1GB for 64bit */
9334 if (is_32bit_task())
9335@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9336 return rnd << PAGE_SHIFT;
9337 }
9338
9339-static inline unsigned long mmap_base(void)
9340+static inline unsigned long mmap_base(struct mm_struct *mm)
9341 {
9342 unsigned long gap = rlimit(RLIMIT_STACK);
9343
9344@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9345 else if (gap > MAX_GAP)
9346 gap = MAX_GAP;
9347
9348- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9349+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9350 }
9351
9352 /*
9353@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9354 */
9355 if (mmap_is_legacy()) {
9356 mm->mmap_base = TASK_UNMAPPED_BASE;
9357+
9358+#ifdef CONFIG_PAX_RANDMMAP
9359+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9360+ mm->mmap_base += mm->delta_mmap;
9361+#endif
9362+
9363 mm->get_unmapped_area = arch_get_unmapped_area;
9364 } else {
9365- mm->mmap_base = mmap_base();
9366+ mm->mmap_base = mmap_base(mm);
9367+
9368+#ifdef CONFIG_PAX_RANDMMAP
9369+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9370+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9371+#endif
9372+
9373 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9374 }
9375 }
9376diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9377index ded0ea1..f213a9b 100644
9378--- a/arch/powerpc/mm/slice.c
9379+++ b/arch/powerpc/mm/slice.c
9380@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9381 if ((mm->task_size - len) < addr)
9382 return 0;
9383 vma = find_vma(mm, addr);
9384- return (!vma || (addr + len) <= vma->vm_start);
9385+ return check_heap_stack_gap(vma, addr, len, 0);
9386 }
9387
9388 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9389@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9390 info.align_offset = 0;
9391
9392 addr = TASK_UNMAPPED_BASE;
9393+
9394+#ifdef CONFIG_PAX_RANDMMAP
9395+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9396+ addr += mm->delta_mmap;
9397+#endif
9398+
9399 while (addr < TASK_SIZE) {
9400 info.low_limit = addr;
9401 if (!slice_scan_available(addr, available, 1, &addr))
9402@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9403 if (fixed && addr > (mm->task_size - len))
9404 return -ENOMEM;
9405
9406+#ifdef CONFIG_PAX_RANDMMAP
9407+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9408+ addr = 0;
9409+#endif
9410+
9411 /* If hint, make sure it matches our alignment restrictions */
9412 if (!fixed && addr) {
9413 addr = _ALIGN_UP(addr, 1ul << pshift);
9414diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9415index f223875..94170e4 100644
9416--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9417+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9418@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9419 }
9420
9421 static struct pci_ops scc_pciex_pci_ops = {
9422- scc_pciex_read_config,
9423- scc_pciex_write_config,
9424+ .read = scc_pciex_read_config,
9425+ .write = scc_pciex_write_config,
9426 };
9427
9428 static void pciex_clear_intr_all(unsigned int __iomem *base)
9429diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9430index d966bbe..372124a 100644
9431--- a/arch/powerpc/platforms/cell/spufs/file.c
9432+++ b/arch/powerpc/platforms/cell/spufs/file.c
9433@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9434 return VM_FAULT_NOPAGE;
9435 }
9436
9437-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9438+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9439 unsigned long address,
9440- void *buf, int len, int write)
9441+ void *buf, size_t len, int write)
9442 {
9443 struct spu_context *ctx = vma->vm_file->private_data;
9444 unsigned long offset = address - vma->vm_start;
9445diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9446index fa934fe..c296056 100644
9447--- a/arch/s390/include/asm/atomic.h
9448+++ b/arch/s390/include/asm/atomic.h
9449@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9450 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9451 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9452
9453+#define atomic64_read_unchecked(v) atomic64_read(v)
9454+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9455+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9456+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9457+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9458+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9459+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9460+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9461+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9462+
9463 #endif /* __ARCH_S390_ATOMIC__ */
9464diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9465index 8d72471..5322500 100644
9466--- a/arch/s390/include/asm/barrier.h
9467+++ b/arch/s390/include/asm/barrier.h
9468@@ -42,7 +42,7 @@
9469 do { \
9470 compiletime_assert_atomic_type(*p); \
9471 barrier(); \
9472- ACCESS_ONCE(*p) = (v); \
9473+ ACCESS_ONCE_RW(*p) = (v); \
9474 } while (0)
9475
9476 #define smp_load_acquire(p) \
9477diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9478index 4d7ccac..d03d0ad 100644
9479--- a/arch/s390/include/asm/cache.h
9480+++ b/arch/s390/include/asm/cache.h
9481@@ -9,8 +9,10 @@
9482 #ifndef __ARCH_S390_CACHE_H
9483 #define __ARCH_S390_CACHE_H
9484
9485-#define L1_CACHE_BYTES 256
9486+#include <linux/const.h>
9487+
9488 #define L1_CACHE_SHIFT 8
9489+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9490 #define NET_SKB_PAD 32
9491
9492 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9493diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9494index f6e43d3..5f57681 100644
9495--- a/arch/s390/include/asm/elf.h
9496+++ b/arch/s390/include/asm/elf.h
9497@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9498 the loader. We need to make sure that it is out of the way of the program
9499 that it will "exec", and that there is sufficient room for the brk. */
9500
9501-extern unsigned long randomize_et_dyn(unsigned long base);
9502-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9503+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9504+
9505+#ifdef CONFIG_PAX_ASLR
9506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9507+
9508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9510+#endif
9511
9512 /* This yields a mask that user programs can use to figure out what
9513 instruction set this CPU supports. */
9514@@ -223,9 +229,6 @@ struct linux_binprm;
9515 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9516 int arch_setup_additional_pages(struct linux_binprm *, int);
9517
9518-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9519-#define arch_randomize_brk arch_randomize_brk
9520-
9521 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9522
9523 #endif
9524diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9525index c4a93d6..4d2a9b4 100644
9526--- a/arch/s390/include/asm/exec.h
9527+++ b/arch/s390/include/asm/exec.h
9528@@ -7,6 +7,6 @@
9529 #ifndef __ASM_EXEC_H
9530 #define __ASM_EXEC_H
9531
9532-extern unsigned long arch_align_stack(unsigned long sp);
9533+#define arch_align_stack(x) ((x) & ~0xfUL)
9534
9535 #endif /* __ASM_EXEC_H */
9536diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9537index cd4c68e..6764641 100644
9538--- a/arch/s390/include/asm/uaccess.h
9539+++ b/arch/s390/include/asm/uaccess.h
9540@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9541 __range_ok((unsigned long)(addr), (size)); \
9542 })
9543
9544+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9545 #define access_ok(type, addr, size) __access_ok(addr, size)
9546
9547 /*
9548@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9549 copy_to_user(void __user *to, const void *from, unsigned long n)
9550 {
9551 might_fault();
9552+
9553+ if ((long)n < 0)
9554+ return n;
9555+
9556 return __copy_to_user(to, from, n);
9557 }
9558
9559@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9560 static inline unsigned long __must_check
9561 copy_from_user(void *to, const void __user *from, unsigned long n)
9562 {
9563- unsigned int sz = __compiletime_object_size(to);
9564+ size_t sz = __compiletime_object_size(to);
9565
9566 might_fault();
9567- if (unlikely(sz != -1 && sz < n)) {
9568+
9569+ if ((long)n < 0)
9570+ return n;
9571+
9572+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9573 copy_from_user_overflow();
9574 return n;
9575 }
9576diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9577index 409d152..d90d368 100644
9578--- a/arch/s390/kernel/module.c
9579+++ b/arch/s390/kernel/module.c
9580@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9581
9582 /* Increase core size by size of got & plt and set start
9583 offsets for got and plt. */
9584- me->core_size = ALIGN(me->core_size, 4);
9585- me->arch.got_offset = me->core_size;
9586- me->core_size += me->arch.got_size;
9587- me->arch.plt_offset = me->core_size;
9588- me->core_size += me->arch.plt_size;
9589+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9590+ me->arch.got_offset = me->core_size_rw;
9591+ me->core_size_rw += me->arch.got_size;
9592+ me->arch.plt_offset = me->core_size_rx;
9593+ me->core_size_rx += me->arch.plt_size;
9594 return 0;
9595 }
9596
9597@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9598 if (info->got_initialized == 0) {
9599 Elf_Addr *gotent;
9600
9601- gotent = me->module_core + me->arch.got_offset +
9602+ gotent = me->module_core_rw + me->arch.got_offset +
9603 info->got_offset;
9604 *gotent = val;
9605 info->got_initialized = 1;
9606@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9607 rc = apply_rela_bits(loc, val, 0, 64, 0);
9608 else if (r_type == R_390_GOTENT ||
9609 r_type == R_390_GOTPLTENT) {
9610- val += (Elf_Addr) me->module_core - loc;
9611+ val += (Elf_Addr) me->module_core_rw - loc;
9612 rc = apply_rela_bits(loc, val, 1, 32, 1);
9613 }
9614 break;
9615@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9616 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9617 if (info->plt_initialized == 0) {
9618 unsigned int *ip;
9619- ip = me->module_core + me->arch.plt_offset +
9620+ ip = me->module_core_rx + me->arch.plt_offset +
9621 info->plt_offset;
9622 #ifndef CONFIG_64BIT
9623 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9624@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9625 val - loc + 0xffffUL < 0x1ffffeUL) ||
9626 (r_type == R_390_PLT32DBL &&
9627 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9628- val = (Elf_Addr) me->module_core +
9629+ val = (Elf_Addr) me->module_core_rx +
9630 me->arch.plt_offset +
9631 info->plt_offset;
9632 val += rela->r_addend - loc;
9633@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9634 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9635 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9636 val = val + rela->r_addend -
9637- ((Elf_Addr) me->module_core + me->arch.got_offset);
9638+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9639 if (r_type == R_390_GOTOFF16)
9640 rc = apply_rela_bits(loc, val, 0, 16, 0);
9641 else if (r_type == R_390_GOTOFF32)
9642@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9643 break;
9644 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9645 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9646- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9647+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9648 rela->r_addend - loc;
9649 if (r_type == R_390_GOTPC)
9650 rc = apply_rela_bits(loc, val, 1, 32, 0);
9651diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9652index aa7a839..6c2a916 100644
9653--- a/arch/s390/kernel/process.c
9654+++ b/arch/s390/kernel/process.c
9655@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
9656 }
9657 return 0;
9658 }
9659-
9660-unsigned long arch_align_stack(unsigned long sp)
9661-{
9662- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9663- sp -= get_random_int() & ~PAGE_MASK;
9664- return sp & ~0xf;
9665-}
9666-
9667-static inline unsigned long brk_rnd(void)
9668-{
9669- /* 8MB for 32bit, 1GB for 64bit */
9670- if (is_32bit_task())
9671- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9672- else
9673- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9674-}
9675-
9676-unsigned long arch_randomize_brk(struct mm_struct *mm)
9677-{
9678- unsigned long ret;
9679-
9680- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9681- return (ret > mm->brk) ? ret : mm->brk;
9682-}
9683-
9684-unsigned long randomize_et_dyn(unsigned long base)
9685-{
9686- unsigned long ret;
9687-
9688- if (!(current->flags & PF_RANDOMIZE))
9689- return base;
9690- ret = PAGE_ALIGN(base + brk_rnd());
9691- return (ret > base) ? ret : base;
9692-}
9693diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9694index 9b436c2..54fbf0a 100644
9695--- a/arch/s390/mm/mmap.c
9696+++ b/arch/s390/mm/mmap.c
9697@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9698 */
9699 if (mmap_is_legacy()) {
9700 mm->mmap_base = mmap_base_legacy();
9701+
9702+#ifdef CONFIG_PAX_RANDMMAP
9703+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9704+ mm->mmap_base += mm->delta_mmap;
9705+#endif
9706+
9707 mm->get_unmapped_area = arch_get_unmapped_area;
9708 } else {
9709 mm->mmap_base = mmap_base();
9710+
9711+#ifdef CONFIG_PAX_RANDMMAP
9712+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9713+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9714+#endif
9715+
9716 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9717 }
9718 }
9719@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9720 */
9721 if (mmap_is_legacy()) {
9722 mm->mmap_base = mmap_base_legacy();
9723+
9724+#ifdef CONFIG_PAX_RANDMMAP
9725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9726+ mm->mmap_base += mm->delta_mmap;
9727+#endif
9728+
9729 mm->get_unmapped_area = s390_get_unmapped_area;
9730 } else {
9731 mm->mmap_base = mmap_base();
9732+
9733+#ifdef CONFIG_PAX_RANDMMAP
9734+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9735+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9736+#endif
9737+
9738 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9739 }
9740 }
9741diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9742index ae3d59f..f65f075 100644
9743--- a/arch/score/include/asm/cache.h
9744+++ b/arch/score/include/asm/cache.h
9745@@ -1,7 +1,9 @@
9746 #ifndef _ASM_SCORE_CACHE_H
9747 #define _ASM_SCORE_CACHE_H
9748
9749+#include <linux/const.h>
9750+
9751 #define L1_CACHE_SHIFT 4
9752-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9753+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9754
9755 #endif /* _ASM_SCORE_CACHE_H */
9756diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9757index f9f3cd5..58ff438 100644
9758--- a/arch/score/include/asm/exec.h
9759+++ b/arch/score/include/asm/exec.h
9760@@ -1,6 +1,6 @@
9761 #ifndef _ASM_SCORE_EXEC_H
9762 #define _ASM_SCORE_EXEC_H
9763
9764-extern unsigned long arch_align_stack(unsigned long sp);
9765+#define arch_align_stack(x) (x)
9766
9767 #endif /* _ASM_SCORE_EXEC_H */
9768diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9769index a1519ad3..e8ac1ff 100644
9770--- a/arch/score/kernel/process.c
9771+++ b/arch/score/kernel/process.c
9772@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9773
9774 return task_pt_regs(task)->cp0_epc;
9775 }
9776-
9777-unsigned long arch_align_stack(unsigned long sp)
9778-{
9779- return sp;
9780-}
9781diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9782index ef9e555..331bd29 100644
9783--- a/arch/sh/include/asm/cache.h
9784+++ b/arch/sh/include/asm/cache.h
9785@@ -9,10 +9,11 @@
9786 #define __ASM_SH_CACHE_H
9787 #ifdef __KERNEL__
9788
9789+#include <linux/const.h>
9790 #include <linux/init.h>
9791 #include <cpu/cache.h>
9792
9793-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9794+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9795
9796 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9797
9798diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9799index 6777177..cb5e44f 100644
9800--- a/arch/sh/mm/mmap.c
9801+++ b/arch/sh/mm/mmap.c
9802@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9803 struct mm_struct *mm = current->mm;
9804 struct vm_area_struct *vma;
9805 int do_colour_align;
9806+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9807 struct vm_unmapped_area_info info;
9808
9809 if (flags & MAP_FIXED) {
9810@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9811 if (filp || (flags & MAP_SHARED))
9812 do_colour_align = 1;
9813
9814+#ifdef CONFIG_PAX_RANDMMAP
9815+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9816+#endif
9817+
9818 if (addr) {
9819 if (do_colour_align)
9820 addr = COLOUR_ALIGN(addr, pgoff);
9821@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9822 addr = PAGE_ALIGN(addr);
9823
9824 vma = find_vma(mm, addr);
9825- if (TASK_SIZE - len >= addr &&
9826- (!vma || addr + len <= vma->vm_start))
9827+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9828 return addr;
9829 }
9830
9831 info.flags = 0;
9832 info.length = len;
9833- info.low_limit = TASK_UNMAPPED_BASE;
9834+ info.low_limit = mm->mmap_base;
9835 info.high_limit = TASK_SIZE;
9836 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9837 info.align_offset = pgoff << PAGE_SHIFT;
9838@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9839 struct mm_struct *mm = current->mm;
9840 unsigned long addr = addr0;
9841 int do_colour_align;
9842+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9843 struct vm_unmapped_area_info info;
9844
9845 if (flags & MAP_FIXED) {
9846@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9847 if (filp || (flags & MAP_SHARED))
9848 do_colour_align = 1;
9849
9850+#ifdef CONFIG_PAX_RANDMMAP
9851+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9852+#endif
9853+
9854 /* requesting a specific address */
9855 if (addr) {
9856 if (do_colour_align)
9857@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9858 addr = PAGE_ALIGN(addr);
9859
9860 vma = find_vma(mm, addr);
9861- if (TASK_SIZE - len >= addr &&
9862- (!vma || addr + len <= vma->vm_start))
9863+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9864 return addr;
9865 }
9866
9867@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9868 VM_BUG_ON(addr != -ENOMEM);
9869 info.flags = 0;
9870 info.low_limit = TASK_UNMAPPED_BASE;
9871+
9872+#ifdef CONFIG_PAX_RANDMMAP
9873+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9874+ info.low_limit += mm->delta_mmap;
9875+#endif
9876+
9877 info.high_limit = TASK_SIZE;
9878 addr = vm_unmapped_area(&info);
9879 }
9880diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9881index 4082749..fd97781 100644
9882--- a/arch/sparc/include/asm/atomic_64.h
9883+++ b/arch/sparc/include/asm/atomic_64.h
9884@@ -15,18 +15,38 @@
9885 #define ATOMIC64_INIT(i) { (i) }
9886
9887 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9889+{
9890+ return ACCESS_ONCE(v->counter);
9891+}
9892 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9893+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9894+{
9895+ return ACCESS_ONCE(v->counter);
9896+}
9897
9898 #define atomic_set(v, i) (((v)->counter) = i)
9899+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9900+{
9901+ v->counter = i;
9902+}
9903 #define atomic64_set(v, i) (((v)->counter) = i)
9904+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9905+{
9906+ v->counter = i;
9907+}
9908
9909-#define ATOMIC_OP(op) \
9910-void atomic_##op(int, atomic_t *); \
9911-void atomic64_##op(long, atomic64_t *);
9912+#define __ATOMIC_OP(op, suffix) \
9913+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9914+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9915
9916-#define ATOMIC_OP_RETURN(op) \
9917-int atomic_##op##_return(int, atomic_t *); \
9918-long atomic64_##op##_return(long, atomic64_t *);
9919+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9920+
9921+#define __ATOMIC_OP_RETURN(op, suffix) \
9922+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9923+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9924+
9925+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9926
9927 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9928
9929@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9930
9931 #undef ATOMIC_OPS
9932 #undef ATOMIC_OP_RETURN
9933+#undef __ATOMIC_OP_RETURN
9934 #undef ATOMIC_OP
9935+#undef __ATOMIC_OP
9936
9937 #define atomic_dec_return(v) atomic_sub_return(1, v)
9938 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9939
9940 #define atomic_inc_return(v) atomic_add_return(1, v)
9941+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9942+{
9943+ return atomic_add_return_unchecked(1, v);
9944+}
9945 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9946+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9947+{
9948+ return atomic64_add_return_unchecked(1, v);
9949+}
9950
9951 /*
9952 * atomic_inc_and_test - increment and test
9953@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9954 * other cases.
9955 */
9956 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9957+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9958+{
9959+ return atomic_inc_return_unchecked(v) == 0;
9960+}
9961 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9962
9963 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9964@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9965 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9966
9967 #define atomic_inc(v) atomic_add(1, v)
9968+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9969+{
9970+ atomic_add_unchecked(1, v);
9971+}
9972 #define atomic64_inc(v) atomic64_add(1, v)
9973+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9974+{
9975+ atomic64_add_unchecked(1, v);
9976+}
9977
9978 #define atomic_dec(v) atomic_sub(1, v)
9979+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9980+{
9981+ atomic_sub_unchecked(1, v);
9982+}
9983 #define atomic64_dec(v) atomic64_sub(1, v)
9984+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9985+{
9986+ atomic64_sub_unchecked(1, v);
9987+}
9988
9989 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9990 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9991
9992 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9993+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9994+{
9995+ return cmpxchg(&v->counter, old, new);
9996+}
9997 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9998+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9999+{
10000+ return xchg(&v->counter, new);
10001+}
10002
10003 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10004 {
10005- int c, old;
10006+ int c, old, new;
10007 c = atomic_read(v);
10008 for (;;) {
10009- if (unlikely(c == (u)))
10010+ if (unlikely(c == u))
10011 break;
10012- old = atomic_cmpxchg((v), c, c + (a));
10013+
10014+ asm volatile("addcc %2, %0, %0\n"
10015+
10016+#ifdef CONFIG_PAX_REFCOUNT
10017+ "tvs %%icc, 6\n"
10018+#endif
10019+
10020+ : "=r" (new)
10021+ : "0" (c), "ir" (a)
10022+ : "cc");
10023+
10024+ old = atomic_cmpxchg(v, c, new);
10025 if (likely(old == c))
10026 break;
10027 c = old;
10028@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10029 #define atomic64_cmpxchg(v, o, n) \
10030 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10031 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10032+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10033+{
10034+ return xchg(&v->counter, new);
10035+}
10036
10037 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10038 {
10039- long c, old;
10040+ long c, old, new;
10041 c = atomic64_read(v);
10042 for (;;) {
10043- if (unlikely(c == (u)))
10044+ if (unlikely(c == u))
10045 break;
10046- old = atomic64_cmpxchg((v), c, c + (a));
10047+
10048+ asm volatile("addcc %2, %0, %0\n"
10049+
10050+#ifdef CONFIG_PAX_REFCOUNT
10051+ "tvs %%xcc, 6\n"
10052+#endif
10053+
10054+ : "=r" (new)
10055+ : "0" (c), "ir" (a)
10056+ : "cc");
10057+
10058+ old = atomic64_cmpxchg(v, c, new);
10059 if (likely(old == c))
10060 break;
10061 c = old;
10062 }
10063- return c != (u);
10064+ return c != u;
10065 }
10066
10067 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10068diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10069index 7664894..45a974b 100644
10070--- a/arch/sparc/include/asm/barrier_64.h
10071+++ b/arch/sparc/include/asm/barrier_64.h
10072@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10073 do { \
10074 compiletime_assert_atomic_type(*p); \
10075 barrier(); \
10076- ACCESS_ONCE(*p) = (v); \
10077+ ACCESS_ONCE_RW(*p) = (v); \
10078 } while (0)
10079
10080 #define smp_load_acquire(p) \
10081diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10082index 5bb6991..5c2132e 100644
10083--- a/arch/sparc/include/asm/cache.h
10084+++ b/arch/sparc/include/asm/cache.h
10085@@ -7,10 +7,12 @@
10086 #ifndef _SPARC_CACHE_H
10087 #define _SPARC_CACHE_H
10088
10089+#include <linux/const.h>
10090+
10091 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10092
10093 #define L1_CACHE_SHIFT 5
10094-#define L1_CACHE_BYTES 32
10095+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10096
10097 #ifdef CONFIG_SPARC32
10098 #define SMP_CACHE_BYTES_SHIFT 5
10099diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10100index a24e41f..47677ff 100644
10101--- a/arch/sparc/include/asm/elf_32.h
10102+++ b/arch/sparc/include/asm/elf_32.h
10103@@ -114,6 +114,13 @@ typedef struct {
10104
10105 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10106
10107+#ifdef CONFIG_PAX_ASLR
10108+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10109+
10110+#define PAX_DELTA_MMAP_LEN 16
10111+#define PAX_DELTA_STACK_LEN 16
10112+#endif
10113+
10114 /* This yields a mask that user programs can use to figure out what
10115 instruction set this cpu supports. This can NOT be done in userspace
10116 on Sparc. */
10117diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10118index 370ca1e..d4f4a98 100644
10119--- a/arch/sparc/include/asm/elf_64.h
10120+++ b/arch/sparc/include/asm/elf_64.h
10121@@ -189,6 +189,13 @@ typedef struct {
10122 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10123 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10124
10125+#ifdef CONFIG_PAX_ASLR
10126+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10127+
10128+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10129+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10130+#endif
10131+
10132 extern unsigned long sparc64_elf_hwcap;
10133 #define ELF_HWCAP sparc64_elf_hwcap
10134
10135diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10136index a3890da..f6a408e 100644
10137--- a/arch/sparc/include/asm/pgalloc_32.h
10138+++ b/arch/sparc/include/asm/pgalloc_32.h
10139@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10140 }
10141
10142 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10143+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10144
10145 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10146 unsigned long address)
10147diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10148index 5e31871..13469c6 100644
10149--- a/arch/sparc/include/asm/pgalloc_64.h
10150+++ b/arch/sparc/include/asm/pgalloc_64.h
10151@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10152 }
10153
10154 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10155+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10156
10157 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10158 {
10159@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10160 }
10161
10162 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10163+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10164
10165 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10166 {
10167diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10168index 59ba6f6..4518128 100644
10169--- a/arch/sparc/include/asm/pgtable.h
10170+++ b/arch/sparc/include/asm/pgtable.h
10171@@ -5,4 +5,8 @@
10172 #else
10173 #include <asm/pgtable_32.h>
10174 #endif
10175+
10176+#define ktla_ktva(addr) (addr)
10177+#define ktva_ktla(addr) (addr)
10178+
10179 #endif
10180diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10181index b9b91ae..950b91e 100644
10182--- a/arch/sparc/include/asm/pgtable_32.h
10183+++ b/arch/sparc/include/asm/pgtable_32.h
10184@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10185 #define PAGE_SHARED SRMMU_PAGE_SHARED
10186 #define PAGE_COPY SRMMU_PAGE_COPY
10187 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10188+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10189+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10190+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10191 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10192
10193 /* Top-level page directory - dummy used by init-mm.
10194@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10195
10196 /* xwr */
10197 #define __P000 PAGE_NONE
10198-#define __P001 PAGE_READONLY
10199-#define __P010 PAGE_COPY
10200-#define __P011 PAGE_COPY
10201+#define __P001 PAGE_READONLY_NOEXEC
10202+#define __P010 PAGE_COPY_NOEXEC
10203+#define __P011 PAGE_COPY_NOEXEC
10204 #define __P100 PAGE_READONLY
10205 #define __P101 PAGE_READONLY
10206 #define __P110 PAGE_COPY
10207 #define __P111 PAGE_COPY
10208
10209 #define __S000 PAGE_NONE
10210-#define __S001 PAGE_READONLY
10211-#define __S010 PAGE_SHARED
10212-#define __S011 PAGE_SHARED
10213+#define __S001 PAGE_READONLY_NOEXEC
10214+#define __S010 PAGE_SHARED_NOEXEC
10215+#define __S011 PAGE_SHARED_NOEXEC
10216 #define __S100 PAGE_READONLY
10217 #define __S101 PAGE_READONLY
10218 #define __S110 PAGE_SHARED
10219diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10220index 79da178..c2eede8 100644
10221--- a/arch/sparc/include/asm/pgtsrmmu.h
10222+++ b/arch/sparc/include/asm/pgtsrmmu.h
10223@@ -115,6 +115,11 @@
10224 SRMMU_EXEC | SRMMU_REF)
10225 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10226 SRMMU_EXEC | SRMMU_REF)
10227+
10228+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10229+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10230+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10231+
10232 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10233 SRMMU_DIRTY | SRMMU_REF)
10234
10235diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10236index 29d64b1..4272fe8 100644
10237--- a/arch/sparc/include/asm/setup.h
10238+++ b/arch/sparc/include/asm/setup.h
10239@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10240 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10241
10242 /* init_64.c */
10243-extern atomic_t dcpage_flushes;
10244-extern atomic_t dcpage_flushes_xcall;
10245+extern atomic_unchecked_t dcpage_flushes;
10246+extern atomic_unchecked_t dcpage_flushes_xcall;
10247
10248 extern int sysctl_tsb_ratio;
10249 #endif
10250diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10251index 9689176..63c18ea 100644
10252--- a/arch/sparc/include/asm/spinlock_64.h
10253+++ b/arch/sparc/include/asm/spinlock_64.h
10254@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10255
10256 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10257
10258-static void inline arch_read_lock(arch_rwlock_t *lock)
10259+static inline void arch_read_lock(arch_rwlock_t *lock)
10260 {
10261 unsigned long tmp1, tmp2;
10262
10263 __asm__ __volatile__ (
10264 "1: ldsw [%2], %0\n"
10265 " brlz,pn %0, 2f\n"
10266-"4: add %0, 1, %1\n"
10267+"4: addcc %0, 1, %1\n"
10268+
10269+#ifdef CONFIG_PAX_REFCOUNT
10270+" tvs %%icc, 6\n"
10271+#endif
10272+
10273 " cas [%2], %0, %1\n"
10274 " cmp %0, %1\n"
10275 " bne,pn %%icc, 1b\n"
10276@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10277 " .previous"
10278 : "=&r" (tmp1), "=&r" (tmp2)
10279 : "r" (lock)
10280- : "memory");
10281+ : "memory", "cc");
10282 }
10283
10284-static int inline arch_read_trylock(arch_rwlock_t *lock)
10285+static inline int arch_read_trylock(arch_rwlock_t *lock)
10286 {
10287 int tmp1, tmp2;
10288
10289@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10290 "1: ldsw [%2], %0\n"
10291 " brlz,a,pn %0, 2f\n"
10292 " mov 0, %0\n"
10293-" add %0, 1, %1\n"
10294+" addcc %0, 1, %1\n"
10295+
10296+#ifdef CONFIG_PAX_REFCOUNT
10297+" tvs %%icc, 6\n"
10298+#endif
10299+
10300 " cas [%2], %0, %1\n"
10301 " cmp %0, %1\n"
10302 " bne,pn %%icc, 1b\n"
10303@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10304 return tmp1;
10305 }
10306
10307-static void inline arch_read_unlock(arch_rwlock_t *lock)
10308+static inline void arch_read_unlock(arch_rwlock_t *lock)
10309 {
10310 unsigned long tmp1, tmp2;
10311
10312 __asm__ __volatile__(
10313 "1: lduw [%2], %0\n"
10314-" sub %0, 1, %1\n"
10315+" subcc %0, 1, %1\n"
10316+
10317+#ifdef CONFIG_PAX_REFCOUNT
10318+" tvs %%icc, 6\n"
10319+#endif
10320+
10321 " cas [%2], %0, %1\n"
10322 " cmp %0, %1\n"
10323 " bne,pn %%xcc, 1b\n"
10324@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10325 : "memory");
10326 }
10327
10328-static void inline arch_write_lock(arch_rwlock_t *lock)
10329+static inline void arch_write_lock(arch_rwlock_t *lock)
10330 {
10331 unsigned long mask, tmp1, tmp2;
10332
10333@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10334 : "memory");
10335 }
10336
10337-static void inline arch_write_unlock(arch_rwlock_t *lock)
10338+static inline void arch_write_unlock(arch_rwlock_t *lock)
10339 {
10340 __asm__ __volatile__(
10341 " stw %%g0, [%0]"
10342@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10343 : "memory");
10344 }
10345
10346-static int inline arch_write_trylock(arch_rwlock_t *lock)
10347+static inline int arch_write_trylock(arch_rwlock_t *lock)
10348 {
10349 unsigned long mask, tmp1, tmp2, result;
10350
10351diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10352index 025c984..a216504 100644
10353--- a/arch/sparc/include/asm/thread_info_32.h
10354+++ b/arch/sparc/include/asm/thread_info_32.h
10355@@ -49,6 +49,8 @@ struct thread_info {
10356 unsigned long w_saved;
10357
10358 struct restart_block restart_block;
10359+
10360+ unsigned long lowest_stack;
10361 };
10362
10363 /*
10364diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10365index 798f027..b009941 100644
10366--- a/arch/sparc/include/asm/thread_info_64.h
10367+++ b/arch/sparc/include/asm/thread_info_64.h
10368@@ -63,6 +63,8 @@ struct thread_info {
10369 struct pt_regs *kern_una_regs;
10370 unsigned int kern_una_insn;
10371
10372+ unsigned long lowest_stack;
10373+
10374 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10375 __attribute__ ((aligned(64)));
10376 };
10377@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10378 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10379 /* flag bit 4 is available */
10380 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10381-/* flag bit 6 is available */
10382+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10383 #define TIF_32BIT 7 /* 32-bit binary */
10384 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10385 #define TIF_SECCOMP 9 /* secure computing */
10386 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10387 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10388+
10389 /* NOTE: Thread flags >= 12 should be ones we have no interest
10390 * in using in assembly, else we can't use the mask as
10391 * an immediate value in instructions such as andcc.
10392@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10393 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10394 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10395 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10396+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10397
10398 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10399 _TIF_DO_NOTIFY_RESUME_MASK | \
10400 _TIF_NEED_RESCHED)
10401 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10402
10403+#define _TIF_WORK_SYSCALL \
10404+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10405+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10406+
10407 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10408
10409 /*
10410diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10411index bd56c28..4b63d83 100644
10412--- a/arch/sparc/include/asm/uaccess.h
10413+++ b/arch/sparc/include/asm/uaccess.h
10414@@ -1,5 +1,6 @@
10415 #ifndef ___ASM_SPARC_UACCESS_H
10416 #define ___ASM_SPARC_UACCESS_H
10417+
10418 #if defined(__sparc__) && defined(__arch64__)
10419 #include <asm/uaccess_64.h>
10420 #else
10421diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10422index 9634d08..f55fe4f 100644
10423--- a/arch/sparc/include/asm/uaccess_32.h
10424+++ b/arch/sparc/include/asm/uaccess_32.h
10425@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10426
10427 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10428 {
10429- if (n && __access_ok((unsigned long) to, n))
10430+ if ((long)n < 0)
10431+ return n;
10432+
10433+ if (n && __access_ok((unsigned long) to, n)) {
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(from, n, true);
10436 return __copy_user(to, (__force void __user *) from, n);
10437- else
10438+ } else
10439 return n;
10440 }
10441
10442 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10443 {
10444+ if ((long)n < 0)
10445+ return n;
10446+
10447+ if (!__builtin_constant_p(n))
10448+ check_object_size(from, n, true);
10449+
10450 return __copy_user(to, (__force void __user *) from, n);
10451 }
10452
10453 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10454 {
10455- if (n && __access_ok((unsigned long) from, n))
10456+ if ((long)n < 0)
10457+ return n;
10458+
10459+ if (n && __access_ok((unsigned long) from, n)) {
10460+ if (!__builtin_constant_p(n))
10461+ check_object_size(to, n, false);
10462 return __copy_user((__force void __user *) to, from, n);
10463- else
10464+ } else
10465 return n;
10466 }
10467
10468 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10469 {
10470+ if ((long)n < 0)
10471+ return n;
10472+
10473 return __copy_user((__force void __user *) to, from, n);
10474 }
10475
10476diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10477index c990a5e..f17b9c1 100644
10478--- a/arch/sparc/include/asm/uaccess_64.h
10479+++ b/arch/sparc/include/asm/uaccess_64.h
10480@@ -10,6 +10,7 @@
10481 #include <linux/compiler.h>
10482 #include <linux/string.h>
10483 #include <linux/thread_info.h>
10484+#include <linux/kernel.h>
10485 #include <asm/asi.h>
10486 #include <asm/spitfire.h>
10487 #include <asm-generic/uaccess-unaligned.h>
10488@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10489 static inline unsigned long __must_check
10490 copy_from_user(void *to, const void __user *from, unsigned long size)
10491 {
10492- unsigned long ret = ___copy_from_user(to, from, size);
10493+ unsigned long ret;
10494
10495+ if ((long)size < 0 || size > INT_MAX)
10496+ return size;
10497+
10498+ if (!__builtin_constant_p(size))
10499+ check_object_size(to, size, false);
10500+
10501+ ret = ___copy_from_user(to, from, size);
10502 if (unlikely(ret))
10503 ret = copy_from_user_fixup(to, from, size);
10504
10505@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10506 static inline unsigned long __must_check
10507 copy_to_user(void __user *to, const void *from, unsigned long size)
10508 {
10509- unsigned long ret = ___copy_to_user(to, from, size);
10510+ unsigned long ret;
10511
10512+ if ((long)size < 0 || size > INT_MAX)
10513+ return size;
10514+
10515+ if (!__builtin_constant_p(size))
10516+ check_object_size(from, size, true);
10517+
10518+ ret = ___copy_to_user(to, from, size);
10519 if (unlikely(ret))
10520 ret = copy_to_user_fixup(to, from, size);
10521 return ret;
10522diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10523index 7cf9c6e..6206648 100644
10524--- a/arch/sparc/kernel/Makefile
10525+++ b/arch/sparc/kernel/Makefile
10526@@ -4,7 +4,7 @@
10527 #
10528
10529 asflags-y := -ansi
10530-ccflags-y := -Werror
10531+#ccflags-y := -Werror
10532
10533 extra-y := head_$(BITS).o
10534
10535diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10536index 50e7b62..79fae35 100644
10537--- a/arch/sparc/kernel/process_32.c
10538+++ b/arch/sparc/kernel/process_32.c
10539@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10540
10541 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10542 r->psr, r->pc, r->npc, r->y, print_tainted());
10543- printk("PC: <%pS>\n", (void *) r->pc);
10544+ printk("PC: <%pA>\n", (void *) r->pc);
10545 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10546 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10547 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10548 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10549 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10550 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10551- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10552+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10553
10554 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10555 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10556@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10557 rw = (struct reg_window32 *) fp;
10558 pc = rw->ins[7];
10559 printk("[%08lx : ", pc);
10560- printk("%pS ] ", (void *) pc);
10561+ printk("%pA ] ", (void *) pc);
10562 fp = rw->ins[6];
10563 } while (++count < 16);
10564 printk("\n");
10565diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10566index 0be7bf9..2b1cba8 100644
10567--- a/arch/sparc/kernel/process_64.c
10568+++ b/arch/sparc/kernel/process_64.c
10569@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10570 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10571 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10572 if (regs->tstate & TSTATE_PRIV)
10573- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10574+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10575 }
10576
10577 void show_regs(struct pt_regs *regs)
10578@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10579
10580 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10581 regs->tpc, regs->tnpc, regs->y, print_tainted());
10582- printk("TPC: <%pS>\n", (void *) regs->tpc);
10583+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10584 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10585 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10586 regs->u_regs[3]);
10587@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10588 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10589 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10590 regs->u_regs[15]);
10591- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10592+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10593 show_regwindow(regs);
10594 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10595 }
10596@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10597 ((tp && tp->task) ? tp->task->pid : -1));
10598
10599 if (gp->tstate & TSTATE_PRIV) {
10600- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10601+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10602 (void *) gp->tpc,
10603 (void *) gp->o7,
10604 (void *) gp->i7,
10605diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10606index 79cc0d1..ec62734 100644
10607--- a/arch/sparc/kernel/prom_common.c
10608+++ b/arch/sparc/kernel/prom_common.c
10609@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10610
10611 unsigned int prom_early_allocated __initdata;
10612
10613-static struct of_pdt_ops prom_sparc_ops __initdata = {
10614+static struct of_pdt_ops prom_sparc_ops __initconst = {
10615 .nextprop = prom_common_nextprop,
10616 .getproplen = prom_getproplen,
10617 .getproperty = prom_getproperty,
10618diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10619index 9ddc492..27a5619 100644
10620--- a/arch/sparc/kernel/ptrace_64.c
10621+++ b/arch/sparc/kernel/ptrace_64.c
10622@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10623 return ret;
10624 }
10625
10626+#ifdef CONFIG_GRKERNSEC_SETXID
10627+extern void gr_delayed_cred_worker(void);
10628+#endif
10629+
10630 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10631 {
10632 int ret = 0;
10633@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10634 if (test_thread_flag(TIF_NOHZ))
10635 user_exit();
10636
10637+#ifdef CONFIG_GRKERNSEC_SETXID
10638+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10639+ gr_delayed_cred_worker();
10640+#endif
10641+
10642 if (test_thread_flag(TIF_SYSCALL_TRACE))
10643 ret = tracehook_report_syscall_entry(regs);
10644
10645@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10646 if (test_thread_flag(TIF_NOHZ))
10647 user_exit();
10648
10649+#ifdef CONFIG_GRKERNSEC_SETXID
10650+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10651+ gr_delayed_cred_worker();
10652+#endif
10653+
10654 audit_syscall_exit(regs);
10655
10656 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10657diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10658index da6f1a7..e5dea8f 100644
10659--- a/arch/sparc/kernel/smp_64.c
10660+++ b/arch/sparc/kernel/smp_64.c
10661@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10662 return;
10663
10664 #ifdef CONFIG_DEBUG_DCFLUSH
10665- atomic_inc(&dcpage_flushes);
10666+ atomic_inc_unchecked(&dcpage_flushes);
10667 #endif
10668
10669 this_cpu = get_cpu();
10670@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10671 xcall_deliver(data0, __pa(pg_addr),
10672 (u64) pg_addr, cpumask_of(cpu));
10673 #ifdef CONFIG_DEBUG_DCFLUSH
10674- atomic_inc(&dcpage_flushes_xcall);
10675+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10676 #endif
10677 }
10678 }
10679@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10680 preempt_disable();
10681
10682 #ifdef CONFIG_DEBUG_DCFLUSH
10683- atomic_inc(&dcpage_flushes);
10684+ atomic_inc_unchecked(&dcpage_flushes);
10685 #endif
10686 data0 = 0;
10687 pg_addr = page_address(page);
10688@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10689 xcall_deliver(data0, __pa(pg_addr),
10690 (u64) pg_addr, cpu_online_mask);
10691 #ifdef CONFIG_DEBUG_DCFLUSH
10692- atomic_inc(&dcpage_flushes_xcall);
10693+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10694 #endif
10695 }
10696 __local_flush_dcache_page(page);
10697diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10698index 646988d..b88905f 100644
10699--- a/arch/sparc/kernel/sys_sparc_32.c
10700+++ b/arch/sparc/kernel/sys_sparc_32.c
10701@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10702 if (len > TASK_SIZE - PAGE_SIZE)
10703 return -ENOMEM;
10704 if (!addr)
10705- addr = TASK_UNMAPPED_BASE;
10706+ addr = current->mm->mmap_base;
10707
10708 info.flags = 0;
10709 info.length = len;
10710diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10711index c85403d..6af95c9 100644
10712--- a/arch/sparc/kernel/sys_sparc_64.c
10713+++ b/arch/sparc/kernel/sys_sparc_64.c
10714@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10715 struct vm_area_struct * vma;
10716 unsigned long task_size = TASK_SIZE;
10717 int do_color_align;
10718+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10719 struct vm_unmapped_area_info info;
10720
10721 if (flags & MAP_FIXED) {
10722 /* We do not accept a shared mapping if it would violate
10723 * cache aliasing constraints.
10724 */
10725- if ((flags & MAP_SHARED) &&
10726+ if ((filp || (flags & MAP_SHARED)) &&
10727 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10728 return -EINVAL;
10729 return addr;
10730@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10731 if (filp || (flags & MAP_SHARED))
10732 do_color_align = 1;
10733
10734+#ifdef CONFIG_PAX_RANDMMAP
10735+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10736+#endif
10737+
10738 if (addr) {
10739 if (do_color_align)
10740 addr = COLOR_ALIGN(addr, pgoff);
10741@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10742 addr = PAGE_ALIGN(addr);
10743
10744 vma = find_vma(mm, addr);
10745- if (task_size - len >= addr &&
10746- (!vma || addr + len <= vma->vm_start))
10747+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10748 return addr;
10749 }
10750
10751 info.flags = 0;
10752 info.length = len;
10753- info.low_limit = TASK_UNMAPPED_BASE;
10754+ info.low_limit = mm->mmap_base;
10755 info.high_limit = min(task_size, VA_EXCLUDE_START);
10756 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10757 info.align_offset = pgoff << PAGE_SHIFT;
10758+ info.threadstack_offset = offset;
10759 addr = vm_unmapped_area(&info);
10760
10761 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10762 VM_BUG_ON(addr != -ENOMEM);
10763 info.low_limit = VA_EXCLUDE_END;
10764+
10765+#ifdef CONFIG_PAX_RANDMMAP
10766+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10767+ info.low_limit += mm->delta_mmap;
10768+#endif
10769+
10770 info.high_limit = task_size;
10771 addr = vm_unmapped_area(&info);
10772 }
10773@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10774 unsigned long task_size = STACK_TOP32;
10775 unsigned long addr = addr0;
10776 int do_color_align;
10777+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10778 struct vm_unmapped_area_info info;
10779
10780 /* This should only ever run for 32-bit processes. */
10781@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10782 /* We do not accept a shared mapping if it would violate
10783 * cache aliasing constraints.
10784 */
10785- if ((flags & MAP_SHARED) &&
10786+ if ((filp || (flags & MAP_SHARED)) &&
10787 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10788 return -EINVAL;
10789 return addr;
10790@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10791 if (filp || (flags & MAP_SHARED))
10792 do_color_align = 1;
10793
10794+#ifdef CONFIG_PAX_RANDMMAP
10795+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10796+#endif
10797+
10798 /* requesting a specific address */
10799 if (addr) {
10800 if (do_color_align)
10801@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10802 addr = PAGE_ALIGN(addr);
10803
10804 vma = find_vma(mm, addr);
10805- if (task_size - len >= addr &&
10806- (!vma || addr + len <= vma->vm_start))
10807+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10808 return addr;
10809 }
10810
10811@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10812 info.high_limit = mm->mmap_base;
10813 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10814 info.align_offset = pgoff << PAGE_SHIFT;
10815+ info.threadstack_offset = offset;
10816 addr = vm_unmapped_area(&info);
10817
10818 /*
10819@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10820 VM_BUG_ON(addr != -ENOMEM);
10821 info.flags = 0;
10822 info.low_limit = TASK_UNMAPPED_BASE;
10823+
10824+#ifdef CONFIG_PAX_RANDMMAP
10825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10826+ info.low_limit += mm->delta_mmap;
10827+#endif
10828+
10829 info.high_limit = STACK_TOP32;
10830 addr = vm_unmapped_area(&info);
10831 }
10832@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10833 EXPORT_SYMBOL(get_fb_unmapped_area);
10834
10835 /* Essentially the same as PowerPC. */
10836-static unsigned long mmap_rnd(void)
10837+static unsigned long mmap_rnd(struct mm_struct *mm)
10838 {
10839 unsigned long rnd = 0UL;
10840
10841+#ifdef CONFIG_PAX_RANDMMAP
10842+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10843+#endif
10844+
10845 if (current->flags & PF_RANDOMIZE) {
10846 unsigned long val = get_random_int();
10847 if (test_thread_flag(TIF_32BIT))
10848@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10849
10850 void arch_pick_mmap_layout(struct mm_struct *mm)
10851 {
10852- unsigned long random_factor = mmap_rnd();
10853+ unsigned long random_factor = mmap_rnd(mm);
10854 unsigned long gap;
10855
10856 /*
10857@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10858 gap == RLIM_INFINITY ||
10859 sysctl_legacy_va_layout) {
10860 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10861+
10862+#ifdef CONFIG_PAX_RANDMMAP
10863+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10864+ mm->mmap_base += mm->delta_mmap;
10865+#endif
10866+
10867 mm->get_unmapped_area = arch_get_unmapped_area;
10868 } else {
10869 /* We know it's 32-bit */
10870@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10871 gap = (task_size / 6 * 5);
10872
10873 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10874+
10875+#ifdef CONFIG_PAX_RANDMMAP
10876+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10877+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10878+#endif
10879+
10880 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10881 }
10882 }
10883diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10884index bb00089..e0ea580 100644
10885--- a/arch/sparc/kernel/syscalls.S
10886+++ b/arch/sparc/kernel/syscalls.S
10887@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10888 #endif
10889 .align 32
10890 1: ldx [%g6 + TI_FLAGS], %l5
10891- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10892+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10893 be,pt %icc, rtrap
10894 nop
10895 call syscall_trace_leave
10896@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10897
10898 srl %i3, 0, %o3 ! IEU0
10899 srl %i2, 0, %o2 ! IEU0 Group
10900- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10901+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10902 bne,pn %icc, linux_syscall_trace32 ! CTI
10903 mov %i0, %l5 ! IEU1
10904 5: call %l7 ! CTI Group brk forced
10905@@ -218,7 +218,7 @@ linux_sparc_syscall:
10906
10907 mov %i3, %o3 ! IEU1
10908 mov %i4, %o4 ! IEU0 Group
10909- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10910+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10911 bne,pn %icc, linux_syscall_trace ! CTI Group
10912 mov %i0, %l5 ! IEU0
10913 2: call %l7 ! CTI Group brk forced
10914@@ -233,7 +233,7 @@ ret_sys_call:
10915
10916 cmp %o0, -ERESTART_RESTARTBLOCK
10917 bgeu,pn %xcc, 1f
10918- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10919+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10920 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10921
10922 2:
10923diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10924index 6fd386c5..6907d81 100644
10925--- a/arch/sparc/kernel/traps_32.c
10926+++ b/arch/sparc/kernel/traps_32.c
10927@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10928 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10929 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10930
10931+extern void gr_handle_kernel_exploit(void);
10932+
10933 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10934 {
10935 static int die_counter;
10936@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10937 count++ < 30 &&
10938 (((unsigned long) rw) >= PAGE_OFFSET) &&
10939 !(((unsigned long) rw) & 0x7)) {
10940- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10941+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10942 (void *) rw->ins[7]);
10943 rw = (struct reg_window32 *)rw->ins[6];
10944 }
10945 }
10946 printk("Instruction DUMP:");
10947 instruction_dump ((unsigned long *) regs->pc);
10948- if(regs->psr & PSR_PS)
10949+ if(regs->psr & PSR_PS) {
10950+ gr_handle_kernel_exploit();
10951 do_exit(SIGKILL);
10952+ }
10953 do_exit(SIGSEGV);
10954 }
10955
10956diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10957index 981a769..d906eda 100644
10958--- a/arch/sparc/kernel/traps_64.c
10959+++ b/arch/sparc/kernel/traps_64.c
10960@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10961 i + 1,
10962 p->trapstack[i].tstate, p->trapstack[i].tpc,
10963 p->trapstack[i].tnpc, p->trapstack[i].tt);
10964- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10965+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10966 }
10967 }
10968
10969@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10970
10971 lvl -= 0x100;
10972 if (regs->tstate & TSTATE_PRIV) {
10973+
10974+#ifdef CONFIG_PAX_REFCOUNT
10975+ if (lvl == 6)
10976+ pax_report_refcount_overflow(regs);
10977+#endif
10978+
10979 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10980 die_if_kernel(buffer, regs);
10981 }
10982@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10983 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10984 {
10985 char buffer[32];
10986-
10987+
10988 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10989 0, lvl, SIGTRAP) == NOTIFY_STOP)
10990 return;
10991
10992+#ifdef CONFIG_PAX_REFCOUNT
10993+ if (lvl == 6)
10994+ pax_report_refcount_overflow(regs);
10995+#endif
10996+
10997 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10998
10999 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11000@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11001 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11002 printk("%s" "ERROR(%d): ",
11003 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11004- printk("TPC<%pS>\n", (void *) regs->tpc);
11005+ printk("TPC<%pA>\n", (void *) regs->tpc);
11006 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11007 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11008 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11009@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11010 smp_processor_id(),
11011 (type & 0x1) ? 'I' : 'D',
11012 regs->tpc);
11013- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11014+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11015 panic("Irrecoverable Cheetah+ parity error.");
11016 }
11017
11018@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11019 smp_processor_id(),
11020 (type & 0x1) ? 'I' : 'D',
11021 regs->tpc);
11022- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11023+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11024 }
11025
11026 struct sun4v_error_entry {
11027@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11028 /*0x38*/u64 reserved_5;
11029 };
11030
11031-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11032-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11033+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11034+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11035
11036 static const char *sun4v_err_type_to_str(u8 type)
11037 {
11038@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11039 }
11040
11041 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11042- int cpu, const char *pfx, atomic_t *ocnt)
11043+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11044 {
11045 u64 *raw_ptr = (u64 *) ent;
11046 u32 attrs;
11047@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11048
11049 show_regs(regs);
11050
11051- if ((cnt = atomic_read(ocnt)) != 0) {
11052- atomic_set(ocnt, 0);
11053+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11054+ atomic_set_unchecked(ocnt, 0);
11055 wmb();
11056 printk("%s: Queue overflowed %d times.\n",
11057 pfx, cnt);
11058@@ -2048,7 +2059,7 @@ out:
11059 */
11060 void sun4v_resum_overflow(struct pt_regs *regs)
11061 {
11062- atomic_inc(&sun4v_resum_oflow_cnt);
11063+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11064 }
11065
11066 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11067@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11068 /* XXX Actually even this can make not that much sense. Perhaps
11069 * XXX we should just pull the plug and panic directly from here?
11070 */
11071- atomic_inc(&sun4v_nonresum_oflow_cnt);
11072+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11073 }
11074
11075 static void sun4v_tlb_error(struct pt_regs *regs)
11076@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11077
11078 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11079 regs->tpc, tl);
11080- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11081+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11082 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11083- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11084+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11085 (void *) regs->u_regs[UREG_I7]);
11086 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11087 "pte[%lx] error[%lx]\n",
11088@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11089
11090 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11091 regs->tpc, tl);
11092- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11093+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11094 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11095- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11096+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11097 (void *) regs->u_regs[UREG_I7]);
11098 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11099 "pte[%lx] error[%lx]\n",
11100@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11101 fp = (unsigned long)sf->fp + STACK_BIAS;
11102 }
11103
11104- printk(" [%016lx] %pS\n", pc, (void *) pc);
11105+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11106 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11107 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11108 int index = tsk->curr_ret_stack;
11109 if (tsk->ret_stack && index >= graph) {
11110 pc = tsk->ret_stack[index - graph].ret;
11111- printk(" [%016lx] %pS\n", pc, (void *) pc);
11112+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11113 graph++;
11114 }
11115 }
11116@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11117 return (struct reg_window *) (fp + STACK_BIAS);
11118 }
11119
11120+extern void gr_handle_kernel_exploit(void);
11121+
11122 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11123 {
11124 static int die_counter;
11125@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11126 while (rw &&
11127 count++ < 30 &&
11128 kstack_valid(tp, (unsigned long) rw)) {
11129- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11130+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11131 (void *) rw->ins[7]);
11132
11133 rw = kernel_stack_up(rw);
11134@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11135 }
11136 user_instruction_dump ((unsigned int __user *) regs->tpc);
11137 }
11138- if (regs->tstate & TSTATE_PRIV)
11139+ if (regs->tstate & TSTATE_PRIV) {
11140+ gr_handle_kernel_exploit();
11141 do_exit(SIGKILL);
11142+ }
11143 do_exit(SIGSEGV);
11144 }
11145 EXPORT_SYMBOL(die_if_kernel);
11146diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11147index 62098a8..547ab2c 100644
11148--- a/arch/sparc/kernel/unaligned_64.c
11149+++ b/arch/sparc/kernel/unaligned_64.c
11150@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11151 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11152
11153 if (__ratelimit(&ratelimit)) {
11154- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11155+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11156 regs->tpc, (void *) regs->tpc);
11157 }
11158 }
11159diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11160index 3269b02..64f5231 100644
11161--- a/arch/sparc/lib/Makefile
11162+++ b/arch/sparc/lib/Makefile
11163@@ -2,7 +2,7 @@
11164 #
11165
11166 asflags-y := -ansi -DST_DIV0=0x02
11167-ccflags-y := -Werror
11168+#ccflags-y := -Werror
11169
11170 lib-$(CONFIG_SPARC32) += ashrdi3.o
11171 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11172diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11173index 05dac43..76f8ed4 100644
11174--- a/arch/sparc/lib/atomic_64.S
11175+++ b/arch/sparc/lib/atomic_64.S
11176@@ -15,11 +15,22 @@
11177 * a value and does the barriers.
11178 */
11179
11180-#define ATOMIC_OP(op) \
11181-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11182+#ifdef CONFIG_PAX_REFCOUNT
11183+#define __REFCOUNT_OP(op) op##cc
11184+#define __OVERFLOW_IOP tvs %icc, 6;
11185+#define __OVERFLOW_XOP tvs %xcc, 6;
11186+#else
11187+#define __REFCOUNT_OP(op) op
11188+#define __OVERFLOW_IOP
11189+#define __OVERFLOW_XOP
11190+#endif
11191+
11192+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11193+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11194 BACKOFF_SETUP(%o2); \
11195 1: lduw [%o1], %g1; \
11196- op %g1, %o0, %g7; \
11197+ asm_op %g1, %o0, %g7; \
11198+ post_op \
11199 cas [%o1], %g1, %g7; \
11200 cmp %g1, %g7; \
11201 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11202@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11203 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11204 ENDPROC(atomic_##op); \
11205
11206-#define ATOMIC_OP_RETURN(op) \
11207-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11208+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11209+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11210+
11211+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11212+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11213 BACKOFF_SETUP(%o2); \
11214 1: lduw [%o1], %g1; \
11215- op %g1, %o0, %g7; \
11216+ asm_op %g1, %o0, %g7; \
11217+ post_op \
11218 cas [%o1], %g1, %g7; \
11219 cmp %g1, %g7; \
11220 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11221@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11222 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11223 ENDPROC(atomic_##op##_return);
11224
11225+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11226+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11227+
11228 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11229
11230 ATOMIC_OPS(add)
11231@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11232
11233 #undef ATOMIC_OPS
11234 #undef ATOMIC_OP_RETURN
11235+#undef __ATOMIC_OP_RETURN
11236 #undef ATOMIC_OP
11237+#undef __ATOMIC_OP
11238
11239-#define ATOMIC64_OP(op) \
11240-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11241+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11242+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11243 BACKOFF_SETUP(%o2); \
11244 1: ldx [%o1], %g1; \
11245- op %g1, %o0, %g7; \
11246+ asm_op %g1, %o0, %g7; \
11247+ post_op \
11248 casx [%o1], %g1, %g7; \
11249 cmp %g1, %g7; \
11250 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11251@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11252 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11253 ENDPROC(atomic64_##op); \
11254
11255-#define ATOMIC64_OP_RETURN(op) \
11256-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11257+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11258+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11259+
11260+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11261+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11262 BACKOFF_SETUP(%o2); \
11263 1: ldx [%o1], %g1; \
11264- op %g1, %o0, %g7; \
11265+ asm_op %g1, %o0, %g7; \
11266+ post_op \
11267 casx [%o1], %g1, %g7; \
11268 cmp %g1, %g7; \
11269 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11270@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11271 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11272 ENDPROC(atomic64_##op##_return);
11273
11274+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11275+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11276+
11277 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11278
11279 ATOMIC64_OPS(add)
11280@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11281
11282 #undef ATOMIC64_OPS
11283 #undef ATOMIC64_OP_RETURN
11284+#undef __ATOMIC64_OP_RETURN
11285 #undef ATOMIC64_OP
11286+#undef __ATOMIC64_OP
11287+#undef __OVERFLOW_XOP
11288+#undef __OVERFLOW_IOP
11289+#undef __REFCOUNT_OP
11290
11291 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11292 BACKOFF_SETUP(%o2)
11293diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11294index 1d649a9..fbc5bfc 100644
11295--- a/arch/sparc/lib/ksyms.c
11296+++ b/arch/sparc/lib/ksyms.c
11297@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11298 /* Atomic counter implementation. */
11299 #define ATOMIC_OP(op) \
11300 EXPORT_SYMBOL(atomic_##op); \
11301-EXPORT_SYMBOL(atomic64_##op);
11302+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11303+EXPORT_SYMBOL(atomic64_##op); \
11304+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11305
11306 #define ATOMIC_OP_RETURN(op) \
11307 EXPORT_SYMBOL(atomic_##op##_return); \
11308@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11309 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11310
11311 ATOMIC_OPS(add)
11312+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11313+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11314 ATOMIC_OPS(sub)
11315
11316 #undef ATOMIC_OPS
11317diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11318index 30c3ecc..736f015 100644
11319--- a/arch/sparc/mm/Makefile
11320+++ b/arch/sparc/mm/Makefile
11321@@ -2,7 +2,7 @@
11322 #
11323
11324 asflags-y := -ansi
11325-ccflags-y := -Werror
11326+#ccflags-y := -Werror
11327
11328 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11329 obj-y += fault_$(BITS).o
11330diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11331index 70d8171..274c6c0 100644
11332--- a/arch/sparc/mm/fault_32.c
11333+++ b/arch/sparc/mm/fault_32.c
11334@@ -21,6 +21,9 @@
11335 #include <linux/perf_event.h>
11336 #include <linux/interrupt.h>
11337 #include <linux/kdebug.h>
11338+#include <linux/slab.h>
11339+#include <linux/pagemap.h>
11340+#include <linux/compiler.h>
11341
11342 #include <asm/page.h>
11343 #include <asm/pgtable.h>
11344@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11345 return safe_compute_effective_address(regs, insn);
11346 }
11347
11348+#ifdef CONFIG_PAX_PAGEEXEC
11349+#ifdef CONFIG_PAX_DLRESOLVE
11350+static void pax_emuplt_close(struct vm_area_struct *vma)
11351+{
11352+ vma->vm_mm->call_dl_resolve = 0UL;
11353+}
11354+
11355+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11356+{
11357+ unsigned int *kaddr;
11358+
11359+ vmf->page = alloc_page(GFP_HIGHUSER);
11360+ if (!vmf->page)
11361+ return VM_FAULT_OOM;
11362+
11363+ kaddr = kmap(vmf->page);
11364+ memset(kaddr, 0, PAGE_SIZE);
11365+ kaddr[0] = 0x9DE3BFA8U; /* save */
11366+ flush_dcache_page(vmf->page);
11367+ kunmap(vmf->page);
11368+ return VM_FAULT_MAJOR;
11369+}
11370+
11371+static const struct vm_operations_struct pax_vm_ops = {
11372+ .close = pax_emuplt_close,
11373+ .fault = pax_emuplt_fault
11374+};
11375+
11376+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11377+{
11378+ int ret;
11379+
11380+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11381+ vma->vm_mm = current->mm;
11382+ vma->vm_start = addr;
11383+ vma->vm_end = addr + PAGE_SIZE;
11384+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11385+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11386+ vma->vm_ops = &pax_vm_ops;
11387+
11388+ ret = insert_vm_struct(current->mm, vma);
11389+ if (ret)
11390+ return ret;
11391+
11392+ ++current->mm->total_vm;
11393+ return 0;
11394+}
11395+#endif
11396+
11397+/*
11398+ * PaX: decide what to do with offenders (regs->pc = fault address)
11399+ *
11400+ * returns 1 when task should be killed
11401+ * 2 when patched PLT trampoline was detected
11402+ * 3 when unpatched PLT trampoline was detected
11403+ */
11404+static int pax_handle_fetch_fault(struct pt_regs *regs)
11405+{
11406+
11407+#ifdef CONFIG_PAX_EMUPLT
11408+ int err;
11409+
11410+ do { /* PaX: patched PLT emulation #1 */
11411+ unsigned int sethi1, sethi2, jmpl;
11412+
11413+ err = get_user(sethi1, (unsigned int *)regs->pc);
11414+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11415+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11416+
11417+ if (err)
11418+ break;
11419+
11420+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11421+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11422+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11423+ {
11424+ unsigned int addr;
11425+
11426+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11427+ addr = regs->u_regs[UREG_G1];
11428+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11429+ regs->pc = addr;
11430+ regs->npc = addr+4;
11431+ return 2;
11432+ }
11433+ } while (0);
11434+
11435+ do { /* PaX: patched PLT emulation #2 */
11436+ unsigned int ba;
11437+
11438+ err = get_user(ba, (unsigned int *)regs->pc);
11439+
11440+ if (err)
11441+ break;
11442+
11443+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11444+ unsigned int addr;
11445+
11446+ if ((ba & 0xFFC00000U) == 0x30800000U)
11447+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11448+ else
11449+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11450+ regs->pc = addr;
11451+ regs->npc = addr+4;
11452+ return 2;
11453+ }
11454+ } while (0);
11455+
11456+ do { /* PaX: patched PLT emulation #3 */
11457+ unsigned int sethi, bajmpl, nop;
11458+
11459+ err = get_user(sethi, (unsigned int *)regs->pc);
11460+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11461+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11462+
11463+ if (err)
11464+ break;
11465+
11466+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11467+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11468+ nop == 0x01000000U)
11469+ {
11470+ unsigned int addr;
11471+
11472+ addr = (sethi & 0x003FFFFFU) << 10;
11473+ regs->u_regs[UREG_G1] = addr;
11474+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11475+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11476+ else
11477+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11478+ regs->pc = addr;
11479+ regs->npc = addr+4;
11480+ return 2;
11481+ }
11482+ } while (0);
11483+
11484+ do { /* PaX: unpatched PLT emulation step 1 */
11485+ unsigned int sethi, ba, nop;
11486+
11487+ err = get_user(sethi, (unsigned int *)regs->pc);
11488+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11489+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11490+
11491+ if (err)
11492+ break;
11493+
11494+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11495+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11496+ nop == 0x01000000U)
11497+ {
11498+ unsigned int addr, save, call;
11499+
11500+ if ((ba & 0xFFC00000U) == 0x30800000U)
11501+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11502+ else
11503+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11504+
11505+ err = get_user(save, (unsigned int *)addr);
11506+ err |= get_user(call, (unsigned int *)(addr+4));
11507+ err |= get_user(nop, (unsigned int *)(addr+8));
11508+ if (err)
11509+ break;
11510+
11511+#ifdef CONFIG_PAX_DLRESOLVE
11512+ if (save == 0x9DE3BFA8U &&
11513+ (call & 0xC0000000U) == 0x40000000U &&
11514+ nop == 0x01000000U)
11515+ {
11516+ struct vm_area_struct *vma;
11517+ unsigned long call_dl_resolve;
11518+
11519+ down_read(&current->mm->mmap_sem);
11520+ call_dl_resolve = current->mm->call_dl_resolve;
11521+ up_read(&current->mm->mmap_sem);
11522+ if (likely(call_dl_resolve))
11523+ goto emulate;
11524+
11525+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11526+
11527+ down_write(&current->mm->mmap_sem);
11528+ if (current->mm->call_dl_resolve) {
11529+ call_dl_resolve = current->mm->call_dl_resolve;
11530+ up_write(&current->mm->mmap_sem);
11531+ if (vma)
11532+ kmem_cache_free(vm_area_cachep, vma);
11533+ goto emulate;
11534+ }
11535+
11536+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11537+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11538+ up_write(&current->mm->mmap_sem);
11539+ if (vma)
11540+ kmem_cache_free(vm_area_cachep, vma);
11541+ return 1;
11542+ }
11543+
11544+ if (pax_insert_vma(vma, call_dl_resolve)) {
11545+ up_write(&current->mm->mmap_sem);
11546+ kmem_cache_free(vm_area_cachep, vma);
11547+ return 1;
11548+ }
11549+
11550+ current->mm->call_dl_resolve = call_dl_resolve;
11551+ up_write(&current->mm->mmap_sem);
11552+
11553+emulate:
11554+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11555+ regs->pc = call_dl_resolve;
11556+ regs->npc = addr+4;
11557+ return 3;
11558+ }
11559+#endif
11560+
11561+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11562+ if ((save & 0xFFC00000U) == 0x05000000U &&
11563+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11564+ nop == 0x01000000U)
11565+ {
11566+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11567+ regs->u_regs[UREG_G2] = addr + 4;
11568+ addr = (save & 0x003FFFFFU) << 10;
11569+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11570+ regs->pc = addr;
11571+ regs->npc = addr+4;
11572+ return 3;
11573+ }
11574+ }
11575+ } while (0);
11576+
11577+ do { /* PaX: unpatched PLT emulation step 2 */
11578+ unsigned int save, call, nop;
11579+
11580+ err = get_user(save, (unsigned int *)(regs->pc-4));
11581+ err |= get_user(call, (unsigned int *)regs->pc);
11582+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11583+ if (err)
11584+ break;
11585+
11586+ if (save == 0x9DE3BFA8U &&
11587+ (call & 0xC0000000U) == 0x40000000U &&
11588+ nop == 0x01000000U)
11589+ {
11590+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11591+
11592+ regs->u_regs[UREG_RETPC] = regs->pc;
11593+ regs->pc = dl_resolve;
11594+ regs->npc = dl_resolve+4;
11595+ return 3;
11596+ }
11597+ } while (0);
11598+#endif
11599+
11600+ return 1;
11601+}
11602+
11603+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11604+{
11605+ unsigned long i;
11606+
11607+ printk(KERN_ERR "PAX: bytes at PC: ");
11608+ for (i = 0; i < 8; i++) {
11609+ unsigned int c;
11610+ if (get_user(c, (unsigned int *)pc+i))
11611+ printk(KERN_CONT "???????? ");
11612+ else
11613+ printk(KERN_CONT "%08x ", c);
11614+ }
11615+ printk("\n");
11616+}
11617+#endif
11618+
11619 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11620 int text_fault)
11621 {
11622@@ -226,6 +500,24 @@ good_area:
11623 if (!(vma->vm_flags & VM_WRITE))
11624 goto bad_area;
11625 } else {
11626+
11627+#ifdef CONFIG_PAX_PAGEEXEC
11628+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11629+ up_read(&mm->mmap_sem);
11630+ switch (pax_handle_fetch_fault(regs)) {
11631+
11632+#ifdef CONFIG_PAX_EMUPLT
11633+ case 2:
11634+ case 3:
11635+ return;
11636+#endif
11637+
11638+ }
11639+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11640+ do_group_exit(SIGKILL);
11641+ }
11642+#endif
11643+
11644 /* Allow reads even for write-only mappings */
11645 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11646 goto bad_area;
11647diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11648index 4798232..f76e3aa 100644
11649--- a/arch/sparc/mm/fault_64.c
11650+++ b/arch/sparc/mm/fault_64.c
11651@@ -22,6 +22,9 @@
11652 #include <linux/kdebug.h>
11653 #include <linux/percpu.h>
11654 #include <linux/context_tracking.h>
11655+#include <linux/slab.h>
11656+#include <linux/pagemap.h>
11657+#include <linux/compiler.h>
11658
11659 #include <asm/page.h>
11660 #include <asm/pgtable.h>
11661@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11662 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11663 regs->tpc);
11664 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11665- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11666+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11667 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11668 dump_stack();
11669 unhandled_fault(regs->tpc, current, regs);
11670@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11671 show_regs(regs);
11672 }
11673
11674+#ifdef CONFIG_PAX_PAGEEXEC
11675+#ifdef CONFIG_PAX_DLRESOLVE
11676+static void pax_emuplt_close(struct vm_area_struct *vma)
11677+{
11678+ vma->vm_mm->call_dl_resolve = 0UL;
11679+}
11680+
11681+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11682+{
11683+ unsigned int *kaddr;
11684+
11685+ vmf->page = alloc_page(GFP_HIGHUSER);
11686+ if (!vmf->page)
11687+ return VM_FAULT_OOM;
11688+
11689+ kaddr = kmap(vmf->page);
11690+ memset(kaddr, 0, PAGE_SIZE);
11691+ kaddr[0] = 0x9DE3BFA8U; /* save */
11692+ flush_dcache_page(vmf->page);
11693+ kunmap(vmf->page);
11694+ return VM_FAULT_MAJOR;
11695+}
11696+
11697+static const struct vm_operations_struct pax_vm_ops = {
11698+ .close = pax_emuplt_close,
11699+ .fault = pax_emuplt_fault
11700+};
11701+
11702+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11703+{
11704+ int ret;
11705+
11706+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11707+ vma->vm_mm = current->mm;
11708+ vma->vm_start = addr;
11709+ vma->vm_end = addr + PAGE_SIZE;
11710+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11711+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11712+ vma->vm_ops = &pax_vm_ops;
11713+
11714+ ret = insert_vm_struct(current->mm, vma);
11715+ if (ret)
11716+ return ret;
11717+
11718+ ++current->mm->total_vm;
11719+ return 0;
11720+}
11721+#endif
11722+
11723+/*
11724+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11725+ *
11726+ * returns 1 when task should be killed
11727+ * 2 when patched PLT trampoline was detected
11728+ * 3 when unpatched PLT trampoline was detected
11729+ */
11730+static int pax_handle_fetch_fault(struct pt_regs *regs)
11731+{
11732+
11733+#ifdef CONFIG_PAX_EMUPLT
11734+ int err;
11735+
11736+ do { /* PaX: patched PLT emulation #1 */
11737+ unsigned int sethi1, sethi2, jmpl;
11738+
11739+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11740+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11741+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11742+
11743+ if (err)
11744+ break;
11745+
11746+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11747+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11748+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11749+ {
11750+ unsigned long addr;
11751+
11752+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11753+ addr = regs->u_regs[UREG_G1];
11754+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11755+
11756+ if (test_thread_flag(TIF_32BIT))
11757+ addr &= 0xFFFFFFFFUL;
11758+
11759+ regs->tpc = addr;
11760+ regs->tnpc = addr+4;
11761+ return 2;
11762+ }
11763+ } while (0);
11764+
11765+ do { /* PaX: patched PLT emulation #2 */
11766+ unsigned int ba;
11767+
11768+ err = get_user(ba, (unsigned int *)regs->tpc);
11769+
11770+ if (err)
11771+ break;
11772+
11773+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11774+ unsigned long addr;
11775+
11776+ if ((ba & 0xFFC00000U) == 0x30800000U)
11777+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11778+ else
11779+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11780+
11781+ if (test_thread_flag(TIF_32BIT))
11782+ addr &= 0xFFFFFFFFUL;
11783+
11784+ regs->tpc = addr;
11785+ regs->tnpc = addr+4;
11786+ return 2;
11787+ }
11788+ } while (0);
11789+
11790+ do { /* PaX: patched PLT emulation #3 */
11791+ unsigned int sethi, bajmpl, nop;
11792+
11793+ err = get_user(sethi, (unsigned int *)regs->tpc);
11794+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11795+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11796+
11797+ if (err)
11798+ break;
11799+
11800+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11801+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11802+ nop == 0x01000000U)
11803+ {
11804+ unsigned long addr;
11805+
11806+ addr = (sethi & 0x003FFFFFU) << 10;
11807+ regs->u_regs[UREG_G1] = addr;
11808+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11809+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11810+ else
11811+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11812+
11813+ if (test_thread_flag(TIF_32BIT))
11814+ addr &= 0xFFFFFFFFUL;
11815+
11816+ regs->tpc = addr;
11817+ regs->tnpc = addr+4;
11818+ return 2;
11819+ }
11820+ } while (0);
11821+
11822+ do { /* PaX: patched PLT emulation #4 */
11823+ unsigned int sethi, mov1, call, mov2;
11824+
11825+ err = get_user(sethi, (unsigned int *)regs->tpc);
11826+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11827+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11828+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11829+
11830+ if (err)
11831+ break;
11832+
11833+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11834+ mov1 == 0x8210000FU &&
11835+ (call & 0xC0000000U) == 0x40000000U &&
11836+ mov2 == 0x9E100001U)
11837+ {
11838+ unsigned long addr;
11839+
11840+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11841+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11842+
11843+ if (test_thread_flag(TIF_32BIT))
11844+ addr &= 0xFFFFFFFFUL;
11845+
11846+ regs->tpc = addr;
11847+ regs->tnpc = addr+4;
11848+ return 2;
11849+ }
11850+ } while (0);
11851+
11852+ do { /* PaX: patched PLT emulation #5 */
11853+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11854+
11855+ err = get_user(sethi, (unsigned int *)regs->tpc);
11856+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11857+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11858+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11859+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11860+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11861+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11862+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11863+
11864+ if (err)
11865+ break;
11866+
11867+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11868+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11869+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11870+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11871+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11872+ sllx == 0x83287020U &&
11873+ jmpl == 0x81C04005U &&
11874+ nop == 0x01000000U)
11875+ {
11876+ unsigned long addr;
11877+
11878+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11879+ regs->u_regs[UREG_G1] <<= 32;
11880+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11881+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11882+ regs->tpc = addr;
11883+ regs->tnpc = addr+4;
11884+ return 2;
11885+ }
11886+ } while (0);
11887+
11888+ do { /* PaX: patched PLT emulation #6 */
11889+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11890+
11891+ err = get_user(sethi, (unsigned int *)regs->tpc);
11892+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11893+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11894+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11895+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11896+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11897+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11898+
11899+ if (err)
11900+ break;
11901+
11902+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11903+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11904+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11905+ sllx == 0x83287020U &&
11906+ (or & 0xFFFFE000U) == 0x8A116000U &&
11907+ jmpl == 0x81C04005U &&
11908+ nop == 0x01000000U)
11909+ {
11910+ unsigned long addr;
11911+
11912+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11913+ regs->u_regs[UREG_G1] <<= 32;
11914+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11915+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11916+ regs->tpc = addr;
11917+ regs->tnpc = addr+4;
11918+ return 2;
11919+ }
11920+ } while (0);
11921+
11922+ do { /* PaX: unpatched PLT emulation step 1 */
11923+ unsigned int sethi, ba, nop;
11924+
11925+ err = get_user(sethi, (unsigned int *)regs->tpc);
11926+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11927+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11928+
11929+ if (err)
11930+ break;
11931+
11932+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11933+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11934+ nop == 0x01000000U)
11935+ {
11936+ unsigned long addr;
11937+ unsigned int save, call;
11938+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11939+
11940+ if ((ba & 0xFFC00000U) == 0x30800000U)
11941+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11942+ else
11943+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11944+
11945+ if (test_thread_flag(TIF_32BIT))
11946+ addr &= 0xFFFFFFFFUL;
11947+
11948+ err = get_user(save, (unsigned int *)addr);
11949+ err |= get_user(call, (unsigned int *)(addr+4));
11950+ err |= get_user(nop, (unsigned int *)(addr+8));
11951+ if (err)
11952+ break;
11953+
11954+#ifdef CONFIG_PAX_DLRESOLVE
11955+ if (save == 0x9DE3BFA8U &&
11956+ (call & 0xC0000000U) == 0x40000000U &&
11957+ nop == 0x01000000U)
11958+ {
11959+ struct vm_area_struct *vma;
11960+ unsigned long call_dl_resolve;
11961+
11962+ down_read(&current->mm->mmap_sem);
11963+ call_dl_resolve = current->mm->call_dl_resolve;
11964+ up_read(&current->mm->mmap_sem);
11965+ if (likely(call_dl_resolve))
11966+ goto emulate;
11967+
11968+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11969+
11970+ down_write(&current->mm->mmap_sem);
11971+ if (current->mm->call_dl_resolve) {
11972+ call_dl_resolve = current->mm->call_dl_resolve;
11973+ up_write(&current->mm->mmap_sem);
11974+ if (vma)
11975+ kmem_cache_free(vm_area_cachep, vma);
11976+ goto emulate;
11977+ }
11978+
11979+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11980+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11981+ up_write(&current->mm->mmap_sem);
11982+ if (vma)
11983+ kmem_cache_free(vm_area_cachep, vma);
11984+ return 1;
11985+ }
11986+
11987+ if (pax_insert_vma(vma, call_dl_resolve)) {
11988+ up_write(&current->mm->mmap_sem);
11989+ kmem_cache_free(vm_area_cachep, vma);
11990+ return 1;
11991+ }
11992+
11993+ current->mm->call_dl_resolve = call_dl_resolve;
11994+ up_write(&current->mm->mmap_sem);
11995+
11996+emulate:
11997+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11998+ regs->tpc = call_dl_resolve;
11999+ regs->tnpc = addr+4;
12000+ return 3;
12001+ }
12002+#endif
12003+
12004+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12005+ if ((save & 0xFFC00000U) == 0x05000000U &&
12006+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12007+ nop == 0x01000000U)
12008+ {
12009+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12010+ regs->u_regs[UREG_G2] = addr + 4;
12011+ addr = (save & 0x003FFFFFU) << 10;
12012+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12013+
12014+ if (test_thread_flag(TIF_32BIT))
12015+ addr &= 0xFFFFFFFFUL;
12016+
12017+ regs->tpc = addr;
12018+ regs->tnpc = addr+4;
12019+ return 3;
12020+ }
12021+
12022+ /* PaX: 64-bit PLT stub */
12023+ err = get_user(sethi1, (unsigned int *)addr);
12024+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12025+ err |= get_user(or1, (unsigned int *)(addr+8));
12026+ err |= get_user(or2, (unsigned int *)(addr+12));
12027+ err |= get_user(sllx, (unsigned int *)(addr+16));
12028+ err |= get_user(add, (unsigned int *)(addr+20));
12029+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12030+ err |= get_user(nop, (unsigned int *)(addr+28));
12031+ if (err)
12032+ break;
12033+
12034+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12035+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12036+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12037+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12038+ sllx == 0x89293020U &&
12039+ add == 0x8A010005U &&
12040+ jmpl == 0x89C14000U &&
12041+ nop == 0x01000000U)
12042+ {
12043+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12044+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12045+ regs->u_regs[UREG_G4] <<= 32;
12046+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12047+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12048+ regs->u_regs[UREG_G4] = addr + 24;
12049+ addr = regs->u_regs[UREG_G5];
12050+ regs->tpc = addr;
12051+ regs->tnpc = addr+4;
12052+ return 3;
12053+ }
12054+ }
12055+ } while (0);
12056+
12057+#ifdef CONFIG_PAX_DLRESOLVE
12058+ do { /* PaX: unpatched PLT emulation step 2 */
12059+ unsigned int save, call, nop;
12060+
12061+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12062+ err |= get_user(call, (unsigned int *)regs->tpc);
12063+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12064+ if (err)
12065+ break;
12066+
12067+ if (save == 0x9DE3BFA8U &&
12068+ (call & 0xC0000000U) == 0x40000000U &&
12069+ nop == 0x01000000U)
12070+ {
12071+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12072+
12073+ if (test_thread_flag(TIF_32BIT))
12074+ dl_resolve &= 0xFFFFFFFFUL;
12075+
12076+ regs->u_regs[UREG_RETPC] = regs->tpc;
12077+ regs->tpc = dl_resolve;
12078+ regs->tnpc = dl_resolve+4;
12079+ return 3;
12080+ }
12081+ } while (0);
12082+#endif
12083+
12084+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12085+ unsigned int sethi, ba, nop;
12086+
12087+ err = get_user(sethi, (unsigned int *)regs->tpc);
12088+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12089+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12090+
12091+ if (err)
12092+ break;
12093+
12094+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12095+ (ba & 0xFFF00000U) == 0x30600000U &&
12096+ nop == 0x01000000U)
12097+ {
12098+ unsigned long addr;
12099+
12100+ addr = (sethi & 0x003FFFFFU) << 10;
12101+ regs->u_regs[UREG_G1] = addr;
12102+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12103+
12104+ if (test_thread_flag(TIF_32BIT))
12105+ addr &= 0xFFFFFFFFUL;
12106+
12107+ regs->tpc = addr;
12108+ regs->tnpc = addr+4;
12109+ return 2;
12110+ }
12111+ } while (0);
12112+
12113+#endif
12114+
12115+ return 1;
12116+}
12117+
12118+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12119+{
12120+ unsigned long i;
12121+
12122+ printk(KERN_ERR "PAX: bytes at PC: ");
12123+ for (i = 0; i < 8; i++) {
12124+ unsigned int c;
12125+ if (get_user(c, (unsigned int *)pc+i))
12126+ printk(KERN_CONT "???????? ");
12127+ else
12128+ printk(KERN_CONT "%08x ", c);
12129+ }
12130+ printk("\n");
12131+}
12132+#endif
12133+
12134 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12135 {
12136 enum ctx_state prev_state = exception_enter();
12137@@ -353,6 +816,29 @@ retry:
12138 if (!vma)
12139 goto bad_area;
12140
12141+#ifdef CONFIG_PAX_PAGEEXEC
12142+ /* PaX: detect ITLB misses on non-exec pages */
12143+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12144+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12145+ {
12146+ if (address != regs->tpc)
12147+ goto good_area;
12148+
12149+ up_read(&mm->mmap_sem);
12150+ switch (pax_handle_fetch_fault(regs)) {
12151+
12152+#ifdef CONFIG_PAX_EMUPLT
12153+ case 2:
12154+ case 3:
12155+ return;
12156+#endif
12157+
12158+ }
12159+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12160+ do_group_exit(SIGKILL);
12161+ }
12162+#endif
12163+
12164 /* Pure DTLB misses do not tell us whether the fault causing
12165 * load/store/atomic was a write or not, it only says that there
12166 * was no match. So in such a case we (carefully) read the
12167diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12168index d329537..2c3746a 100644
12169--- a/arch/sparc/mm/hugetlbpage.c
12170+++ b/arch/sparc/mm/hugetlbpage.c
12171@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12172 unsigned long addr,
12173 unsigned long len,
12174 unsigned long pgoff,
12175- unsigned long flags)
12176+ unsigned long flags,
12177+ unsigned long offset)
12178 {
12179+ struct mm_struct *mm = current->mm;
12180 unsigned long task_size = TASK_SIZE;
12181 struct vm_unmapped_area_info info;
12182
12183@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12184
12185 info.flags = 0;
12186 info.length = len;
12187- info.low_limit = TASK_UNMAPPED_BASE;
12188+ info.low_limit = mm->mmap_base;
12189 info.high_limit = min(task_size, VA_EXCLUDE_START);
12190 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12191 info.align_offset = 0;
12192+ info.threadstack_offset = offset;
12193 addr = vm_unmapped_area(&info);
12194
12195 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12196 VM_BUG_ON(addr != -ENOMEM);
12197 info.low_limit = VA_EXCLUDE_END;
12198+
12199+#ifdef CONFIG_PAX_RANDMMAP
12200+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12201+ info.low_limit += mm->delta_mmap;
12202+#endif
12203+
12204 info.high_limit = task_size;
12205 addr = vm_unmapped_area(&info);
12206 }
12207@@ -55,7 +64,8 @@ static unsigned long
12208 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12209 const unsigned long len,
12210 const unsigned long pgoff,
12211- const unsigned long flags)
12212+ const unsigned long flags,
12213+ const unsigned long offset)
12214 {
12215 struct mm_struct *mm = current->mm;
12216 unsigned long addr = addr0;
12217@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12218 info.high_limit = mm->mmap_base;
12219 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12220 info.align_offset = 0;
12221+ info.threadstack_offset = offset;
12222 addr = vm_unmapped_area(&info);
12223
12224 /*
12225@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12226 VM_BUG_ON(addr != -ENOMEM);
12227 info.flags = 0;
12228 info.low_limit = TASK_UNMAPPED_BASE;
12229+
12230+#ifdef CONFIG_PAX_RANDMMAP
12231+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12232+ info.low_limit += mm->delta_mmap;
12233+#endif
12234+
12235 info.high_limit = STACK_TOP32;
12236 addr = vm_unmapped_area(&info);
12237 }
12238@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12239 struct mm_struct *mm = current->mm;
12240 struct vm_area_struct *vma;
12241 unsigned long task_size = TASK_SIZE;
12242+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12243
12244 if (test_thread_flag(TIF_32BIT))
12245 task_size = STACK_TOP32;
12246@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12247 return addr;
12248 }
12249
12250+#ifdef CONFIG_PAX_RANDMMAP
12251+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12252+#endif
12253+
12254 if (addr) {
12255 addr = ALIGN(addr, HPAGE_SIZE);
12256 vma = find_vma(mm, addr);
12257- if (task_size - len >= addr &&
12258- (!vma || addr + len <= vma->vm_start))
12259+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12260 return addr;
12261 }
12262 if (mm->get_unmapped_area == arch_get_unmapped_area)
12263 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12264- pgoff, flags);
12265+ pgoff, flags, offset);
12266 else
12267 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12268- pgoff, flags);
12269+ pgoff, flags, offset);
12270 }
12271
12272 pte_t *huge_pte_alloc(struct mm_struct *mm,
12273diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12274index 3ea267c..93f0659 100644
12275--- a/arch/sparc/mm/init_64.c
12276+++ b/arch/sparc/mm/init_64.c
12277@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12278 int num_kernel_image_mappings;
12279
12280 #ifdef CONFIG_DEBUG_DCFLUSH
12281-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12282+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12283 #ifdef CONFIG_SMP
12284-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12285+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12286 #endif
12287 #endif
12288
12289@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12290 {
12291 BUG_ON(tlb_type == hypervisor);
12292 #ifdef CONFIG_DEBUG_DCFLUSH
12293- atomic_inc(&dcpage_flushes);
12294+ atomic_inc_unchecked(&dcpage_flushes);
12295 #endif
12296
12297 #ifdef DCACHE_ALIASING_POSSIBLE
12298@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12299
12300 #ifdef CONFIG_DEBUG_DCFLUSH
12301 seq_printf(m, "DCPageFlushes\t: %d\n",
12302- atomic_read(&dcpage_flushes));
12303+ atomic_read_unchecked(&dcpage_flushes));
12304 #ifdef CONFIG_SMP
12305 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12306- atomic_read(&dcpage_flushes_xcall));
12307+ atomic_read_unchecked(&dcpage_flushes_xcall));
12308 #endif /* CONFIG_SMP */
12309 #endif /* CONFIG_DEBUG_DCFLUSH */
12310 }
12311diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12312index 7cca418..53fc030 100644
12313--- a/arch/tile/Kconfig
12314+++ b/arch/tile/Kconfig
12315@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12316
12317 config KEXEC
12318 bool "kexec system call"
12319+ depends on !GRKERNSEC_KMEM
12320 ---help---
12321 kexec is a system call that implements the ability to shutdown your
12322 current kernel, and to start another kernel. It is like a reboot
12323diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12324index 7b11c5f..755a026 100644
12325--- a/arch/tile/include/asm/atomic_64.h
12326+++ b/arch/tile/include/asm/atomic_64.h
12327@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12328
12329 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12330
12331+#define atomic64_read_unchecked(v) atomic64_read(v)
12332+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12333+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12334+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12335+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12336+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12337+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12338+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12339+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12340+
12341 /* Define this to indicate that cmpxchg is an efficient operation. */
12342 #define __HAVE_ARCH_CMPXCHG
12343
12344diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12345index 6160761..00cac88 100644
12346--- a/arch/tile/include/asm/cache.h
12347+++ b/arch/tile/include/asm/cache.h
12348@@ -15,11 +15,12 @@
12349 #ifndef _ASM_TILE_CACHE_H
12350 #define _ASM_TILE_CACHE_H
12351
12352+#include <linux/const.h>
12353 #include <arch/chip.h>
12354
12355 /* bytes per L1 data cache line */
12356 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12357-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12358+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12359
12360 /* bytes per L2 cache line */
12361 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12362diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12363index b6cde32..c0cb736 100644
12364--- a/arch/tile/include/asm/uaccess.h
12365+++ b/arch/tile/include/asm/uaccess.h
12366@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12367 const void __user *from,
12368 unsigned long n)
12369 {
12370- int sz = __compiletime_object_size(to);
12371+ size_t sz = __compiletime_object_size(to);
12372
12373- if (likely(sz == -1 || sz >= n))
12374+ if (likely(sz == (size_t)-1 || sz >= n))
12375 n = _copy_from_user(to, from, n);
12376 else
12377 copy_from_user_overflow();
12378diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12379index 3270e00..a77236e 100644
12380--- a/arch/tile/mm/hugetlbpage.c
12381+++ b/arch/tile/mm/hugetlbpage.c
12382@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12383 info.high_limit = TASK_SIZE;
12384 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12385 info.align_offset = 0;
12386+ info.threadstack_offset = 0;
12387 return vm_unmapped_area(&info);
12388 }
12389
12390@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12391 info.high_limit = current->mm->mmap_base;
12392 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12393 info.align_offset = 0;
12394+ info.threadstack_offset = 0;
12395 addr = vm_unmapped_area(&info);
12396
12397 /*
12398diff --git a/arch/um/Makefile b/arch/um/Makefile
12399index e4b1a96..16162f8 100644
12400--- a/arch/um/Makefile
12401+++ b/arch/um/Makefile
12402@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12403 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12404 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12405
12406+ifdef CONSTIFY_PLUGIN
12407+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12408+endif
12409+
12410 #This will adjust *FLAGS accordingly to the platform.
12411 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12412
12413diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12414index 19e1bdd..3665b77 100644
12415--- a/arch/um/include/asm/cache.h
12416+++ b/arch/um/include/asm/cache.h
12417@@ -1,6 +1,7 @@
12418 #ifndef __UM_CACHE_H
12419 #define __UM_CACHE_H
12420
12421+#include <linux/const.h>
12422
12423 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12424 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12425@@ -12,6 +13,6 @@
12426 # define L1_CACHE_SHIFT 5
12427 #endif
12428
12429-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12430+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12431
12432 #endif
12433diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12434index 2e0a6b1..a64d0f5 100644
12435--- a/arch/um/include/asm/kmap_types.h
12436+++ b/arch/um/include/asm/kmap_types.h
12437@@ -8,6 +8,6 @@
12438
12439 /* No more #include "asm/arch/kmap_types.h" ! */
12440
12441-#define KM_TYPE_NR 14
12442+#define KM_TYPE_NR 15
12443
12444 #endif
12445diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12446index 71c5d13..4c7b9f1 100644
12447--- a/arch/um/include/asm/page.h
12448+++ b/arch/um/include/asm/page.h
12449@@ -14,6 +14,9 @@
12450 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12451 #define PAGE_MASK (~(PAGE_SIZE-1))
12452
12453+#define ktla_ktva(addr) (addr)
12454+#define ktva_ktla(addr) (addr)
12455+
12456 #ifndef __ASSEMBLY__
12457
12458 struct page;
12459diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12460index 0032f92..cd151e0 100644
12461--- a/arch/um/include/asm/pgtable-3level.h
12462+++ b/arch/um/include/asm/pgtable-3level.h
12463@@ -58,6 +58,7 @@
12464 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12465 #define pud_populate(mm, pud, pmd) \
12466 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12467+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12468
12469 #ifdef CONFIG_64BIT
12470 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12471diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12472index f17bca8..48adb87 100644
12473--- a/arch/um/kernel/process.c
12474+++ b/arch/um/kernel/process.c
12475@@ -356,22 +356,6 @@ int singlestepping(void * t)
12476 return 2;
12477 }
12478
12479-/*
12480- * Only x86 and x86_64 have an arch_align_stack().
12481- * All other arches have "#define arch_align_stack(x) (x)"
12482- * in their asm/exec.h
12483- * As this is included in UML from asm-um/system-generic.h,
12484- * we can use it to behave as the subarch does.
12485- */
12486-#ifndef arch_align_stack
12487-unsigned long arch_align_stack(unsigned long sp)
12488-{
12489- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12490- sp -= get_random_int() % 8192;
12491- return sp & ~0xf;
12492-}
12493-#endif
12494-
12495 unsigned long get_wchan(struct task_struct *p)
12496 {
12497 unsigned long stack_page, sp, ip;
12498diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12499index ad8f795..2c7eec6 100644
12500--- a/arch/unicore32/include/asm/cache.h
12501+++ b/arch/unicore32/include/asm/cache.h
12502@@ -12,8 +12,10 @@
12503 #ifndef __UNICORE_CACHE_H__
12504 #define __UNICORE_CACHE_H__
12505
12506-#define L1_CACHE_SHIFT (5)
12507-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12508+#include <linux/const.h>
12509+
12510+#define L1_CACHE_SHIFT 5
12511+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12512
12513 /*
12514 * Memory returned by kmalloc() may be used for DMA, so we must make
12515diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12516index 0dc9d01..98df103 100644
12517--- a/arch/x86/Kconfig
12518+++ b/arch/x86/Kconfig
12519@@ -130,7 +130,7 @@ config X86
12520 select RTC_LIB
12521 select HAVE_DEBUG_STACKOVERFLOW
12522 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12523- select HAVE_CC_STACKPROTECTOR
12524+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12525 select GENERIC_CPU_AUTOPROBE
12526 select HAVE_ARCH_AUDITSYSCALL
12527 select ARCH_SUPPORTS_ATOMIC_RMW
12528@@ -263,7 +263,7 @@ config X86_HT
12529
12530 config X86_32_LAZY_GS
12531 def_bool y
12532- depends on X86_32 && !CC_STACKPROTECTOR
12533+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12534
12535 config ARCH_HWEIGHT_CFLAGS
12536 string
12537@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
12538
12539 menuconfig HYPERVISOR_GUEST
12540 bool "Linux guest support"
12541+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12542 ---help---
12543 Say Y here to enable options for running Linux under various hyper-
12544 visors. This option enables basic hypervisor detection and platform
12545@@ -978,6 +979,7 @@ config VM86
12546
12547 config X86_16BIT
12548 bool "Enable support for 16-bit segments" if EXPERT
12549+ depends on !GRKERNSEC
12550 default y
12551 ---help---
12552 This option is required by programs like Wine to run 16-bit
12553@@ -1151,6 +1153,7 @@ choice
12554
12555 config NOHIGHMEM
12556 bool "off"
12557+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12558 ---help---
12559 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12560 However, the address space of 32-bit x86 processors is only 4
12561@@ -1187,6 +1190,7 @@ config NOHIGHMEM
12562
12563 config HIGHMEM4G
12564 bool "4GB"
12565+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12566 ---help---
12567 Select this if you have a 32-bit processor and between 1 and 4
12568 gigabytes of physical RAM.
12569@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
12570 hex
12571 default 0xB0000000 if VMSPLIT_3G_OPT
12572 default 0x80000000 if VMSPLIT_2G
12573- default 0x78000000 if VMSPLIT_2G_OPT
12574+ default 0x70000000 if VMSPLIT_2G_OPT
12575 default 0x40000000 if VMSPLIT_1G
12576 default 0xC0000000
12577 depends on X86_32
12578@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
12579
12580 config KEXEC
12581 bool "kexec system call"
12582+ depends on !GRKERNSEC_KMEM
12583 ---help---
12584 kexec is a system call that implements the ability to shutdown your
12585 current kernel, and to start another kernel. It is like a reboot
12586@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
12587
12588 config PHYSICAL_ALIGN
12589 hex "Alignment value to which kernel should be aligned"
12590- default "0x200000"
12591+ default "0x1000000"
12592+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12593+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12594 range 0x2000 0x1000000 if X86_32
12595 range 0x200000 0x1000000 if X86_64
12596 ---help---
12597@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
12598 def_bool n
12599 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12600 depends on X86_32 || IA32_EMULATION
12601+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12602 ---help---
12603 Certain buggy versions of glibc will crash if they are
12604 presented with a 32-bit vDSO that is not mapped at the address
12605diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12606index 6983314..54ad7e8 100644
12607--- a/arch/x86/Kconfig.cpu
12608+++ b/arch/x86/Kconfig.cpu
12609@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12610
12611 config X86_F00F_BUG
12612 def_bool y
12613- depends on M586MMX || M586TSC || M586 || M486
12614+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12615
12616 config X86_INVD_BUG
12617 def_bool y
12618@@ -327,7 +327,7 @@ config X86_INVD_BUG
12619
12620 config X86_ALIGNMENT_16
12621 def_bool y
12622- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12623+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12624
12625 config X86_INTEL_USERCOPY
12626 def_bool y
12627@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12628 # generates cmov.
12629 config X86_CMOV
12630 def_bool y
12631- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12632+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12633
12634 config X86_MINIMUM_CPU_FAMILY
12635 int
12636diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12637index 61bd2ad..50b625d 100644
12638--- a/arch/x86/Kconfig.debug
12639+++ b/arch/x86/Kconfig.debug
12640@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12641 config DEBUG_RODATA
12642 bool "Write protect kernel read-only data structures"
12643 default y
12644- depends on DEBUG_KERNEL
12645+ depends on DEBUG_KERNEL && BROKEN
12646 ---help---
12647 Mark the kernel read-only data as write-protected in the pagetables,
12648 in order to catch accidental (and incorrect) writes to such const
12649@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12650
12651 config DEBUG_SET_MODULE_RONX
12652 bool "Set loadable kernel module data as NX and text as RO"
12653- depends on MODULES
12654+ depends on MODULES && BROKEN
12655 ---help---
12656 This option helps catch unintended modifications to loadable
12657 kernel module's text and read-only data. It also prevents execution
12658diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12659index 920e616..ac3d4df 100644
12660--- a/arch/x86/Makefile
12661+++ b/arch/x86/Makefile
12662@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12663 # CPU-specific tuning. Anything which can be shared with UML should go here.
12664 include $(srctree)/arch/x86/Makefile_32.cpu
12665 KBUILD_CFLAGS += $(cflags-y)
12666-
12667- # temporary until string.h is fixed
12668- KBUILD_CFLAGS += -ffreestanding
12669 else
12670 BITS := 64
12671 UTS_MACHINE := x86_64
12672@@ -107,6 +104,9 @@ else
12673 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12674 endif
12675
12676+# temporary until string.h is fixed
12677+KBUILD_CFLAGS += -ffreestanding
12678+
12679 # Make sure compiler does not have buggy stack-protector support.
12680 ifdef CONFIG_CC_STACKPROTECTOR
12681 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12682@@ -180,6 +180,7 @@ archheaders:
12683 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12684
12685 archprepare:
12686+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12687 ifeq ($(CONFIG_KEXEC_FILE),y)
12688 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12689 endif
12690@@ -263,3 +264,9 @@ define archhelp
12691 echo ' FDARGS="..." arguments for the booted kernel'
12692 echo ' FDINITRD=file initrd for the booted kernel'
12693 endef
12694+
12695+define OLD_LD
12696+
12697+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12698+*** Please upgrade your binutils to 2.18 or newer
12699+endef
12700diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12701index 3db07f3..9d81d0f 100644
12702--- a/arch/x86/boot/Makefile
12703+++ b/arch/x86/boot/Makefile
12704@@ -56,6 +56,9 @@ clean-files += cpustr.h
12705 # ---------------------------------------------------------------------------
12706
12707 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12708+ifdef CONSTIFY_PLUGIN
12709+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12710+endif
12711 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12712 GCOV_PROFILE := n
12713
12714diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12715index 878e4b9..20537ab 100644
12716--- a/arch/x86/boot/bitops.h
12717+++ b/arch/x86/boot/bitops.h
12718@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12719 u8 v;
12720 const u32 *p = (const u32 *)addr;
12721
12722- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12723+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12724 return v;
12725 }
12726
12727@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12728
12729 static inline void set_bit(int nr, void *addr)
12730 {
12731- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12732+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12733 }
12734
12735 #endif /* BOOT_BITOPS_H */
12736diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12737index bd49ec6..94c7f58 100644
12738--- a/arch/x86/boot/boot.h
12739+++ b/arch/x86/boot/boot.h
12740@@ -84,7 +84,7 @@ static inline void io_delay(void)
12741 static inline u16 ds(void)
12742 {
12743 u16 seg;
12744- asm("movw %%ds,%0" : "=rm" (seg));
12745+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12746 return seg;
12747 }
12748
12749diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12750index 8bd44e8..6b111e9 100644
12751--- a/arch/x86/boot/compressed/Makefile
12752+++ b/arch/x86/boot/compressed/Makefile
12753@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
12754 KBUILD_CFLAGS += -mno-mmx -mno-sse
12755 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12756 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12757+ifdef CONSTIFY_PLUGIN
12758+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12759+endif
12760
12761 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12762 GCOV_PROFILE := n
12763diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12764index a53440e..c3dbf1e 100644
12765--- a/arch/x86/boot/compressed/efi_stub_32.S
12766+++ b/arch/x86/boot/compressed/efi_stub_32.S
12767@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12768 * parameter 2, ..., param n. To make things easy, we save the return
12769 * address of efi_call_phys in a global variable.
12770 */
12771- popl %ecx
12772- movl %ecx, saved_return_addr(%edx)
12773- /* get the function pointer into ECX*/
12774- popl %ecx
12775- movl %ecx, efi_rt_function_ptr(%edx)
12776+ popl saved_return_addr(%edx)
12777+ popl efi_rt_function_ptr(%edx)
12778
12779 /*
12780 * 3. Call the physical function.
12781 */
12782- call *%ecx
12783+ call *efi_rt_function_ptr(%edx)
12784
12785 /*
12786 * 4. Balance the stack. And because EAX contain the return value,
12787@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12788 1: popl %edx
12789 subl $1b, %edx
12790
12791- movl efi_rt_function_ptr(%edx), %ecx
12792- pushl %ecx
12793+ pushl efi_rt_function_ptr(%edx)
12794
12795 /*
12796 * 10. Push the saved return address onto the stack and return.
12797 */
12798- movl saved_return_addr(%edx), %ecx
12799- pushl %ecx
12800- ret
12801+ jmpl *saved_return_addr(%edx)
12802 ENDPROC(efi_call_phys)
12803 .previous
12804
12805diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12806index 630384a..278e788 100644
12807--- a/arch/x86/boot/compressed/efi_thunk_64.S
12808+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12809@@ -189,8 +189,8 @@ efi_gdt64:
12810 .long 0 /* Filled out by user */
12811 .word 0
12812 .quad 0x0000000000000000 /* NULL descriptor */
12813- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12814- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12815+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12816+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12817 .quad 0x0080890000000000 /* TS descriptor */
12818 .quad 0x0000000000000000 /* TS continued */
12819 efi_gdt64_end:
12820diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12821index 1d7fbbc..36ecd58 100644
12822--- a/arch/x86/boot/compressed/head_32.S
12823+++ b/arch/x86/boot/compressed/head_32.S
12824@@ -140,10 +140,10 @@ preferred_addr:
12825 addl %eax, %ebx
12826 notl %eax
12827 andl %eax, %ebx
12828- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12829+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12830 jge 1f
12831 #endif
12832- movl $LOAD_PHYSICAL_ADDR, %ebx
12833+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12834 1:
12835
12836 /* Target address to relocate to for decompression */
12837diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12838index 6b1766c..ad465c9 100644
12839--- a/arch/x86/boot/compressed/head_64.S
12840+++ b/arch/x86/boot/compressed/head_64.S
12841@@ -94,10 +94,10 @@ ENTRY(startup_32)
12842 addl %eax, %ebx
12843 notl %eax
12844 andl %eax, %ebx
12845- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12846+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12847 jge 1f
12848 #endif
12849- movl $LOAD_PHYSICAL_ADDR, %ebx
12850+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12851 1:
12852
12853 /* Target address to relocate to for decompression */
12854@@ -322,10 +322,10 @@ preferred_addr:
12855 addq %rax, %rbp
12856 notq %rax
12857 andq %rax, %rbp
12858- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12859+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12860 jge 1f
12861 #endif
12862- movq $LOAD_PHYSICAL_ADDR, %rbp
12863+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12864 1:
12865
12866 /* Target address to relocate to for decompression */
12867@@ -434,8 +434,8 @@ gdt:
12868 .long gdt
12869 .word 0
12870 .quad 0x0000000000000000 /* NULL descriptor */
12871- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12872- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12873+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12874+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12875 .quad 0x0080890000000000 /* TS descriptor */
12876 .quad 0x0000000000000000 /* TS continued */
12877 gdt_end:
12878diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12879index a950864..c710239 100644
12880--- a/arch/x86/boot/compressed/misc.c
12881+++ b/arch/x86/boot/compressed/misc.c
12882@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12883 * Calculate the delta between where vmlinux was linked to load
12884 * and where it was actually loaded.
12885 */
12886- delta = min_addr - LOAD_PHYSICAL_ADDR;
12887+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12888 if (!delta) {
12889 debug_putstr("No relocation needed... ");
12890 return;
12891@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12892 Elf32_Ehdr ehdr;
12893 Elf32_Phdr *phdrs, *phdr;
12894 #endif
12895- void *dest;
12896+ void *dest, *prev;
12897 int i;
12898
12899 memcpy(&ehdr, output, sizeof(ehdr));
12900@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12901 case PT_LOAD:
12902 #ifdef CONFIG_RELOCATABLE
12903 dest = output;
12904- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12905+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12906 #else
12907 dest = (void *)(phdr->p_paddr);
12908 #endif
12909 memcpy(dest,
12910 output + phdr->p_offset,
12911 phdr->p_filesz);
12912+ if (i)
12913+ memset(prev, 0xff, dest - prev);
12914+ prev = dest + phdr->p_filesz;
12915 break;
12916 default: /* Ignore other PT_* */ break;
12917 }
12918@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12919 error("Destination address too large");
12920 #endif
12921 #ifndef CONFIG_RELOCATABLE
12922- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12923+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12924 error("Wrong destination address");
12925 #endif
12926
12927diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12928index 1fd7d57..0f7d096 100644
12929--- a/arch/x86/boot/cpucheck.c
12930+++ b/arch/x86/boot/cpucheck.c
12931@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12932 u32 ecx = MSR_K7_HWCR;
12933 u32 eax, edx;
12934
12935- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12936+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12937 eax &= ~(1 << 15);
12938- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12939+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12940
12941 get_cpuflags(); /* Make sure it really did something */
12942 err = check_cpuflags();
12943@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12944 u32 ecx = MSR_VIA_FCR;
12945 u32 eax, edx;
12946
12947- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12948+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12949 eax |= (1<<1)|(1<<7);
12950- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12951+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12952
12953 set_bit(X86_FEATURE_CX8, cpu.flags);
12954 err = check_cpuflags();
12955@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12956 u32 eax, edx;
12957 u32 level = 1;
12958
12959- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12960- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12961- asm("cpuid"
12962+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12963+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12964+ asm volatile("cpuid"
12965 : "+a" (level), "=d" (cpu.flags[0])
12966 : : "ecx", "ebx");
12967- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12968+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12969
12970 err = check_cpuflags();
12971 } else if (err == 0x01 &&
12972diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12973index 16ef025..91e033b 100644
12974--- a/arch/x86/boot/header.S
12975+++ b/arch/x86/boot/header.S
12976@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12977 # single linked list of
12978 # struct setup_data
12979
12980-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12981+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12982
12983 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12984+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12985+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12986+#else
12987 #define VO_INIT_SIZE (VO__end - VO__text)
12988+#endif
12989 #if ZO_INIT_SIZE > VO_INIT_SIZE
12990 #define INIT_SIZE ZO_INIT_SIZE
12991 #else
12992diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12993index db75d07..8e6d0af 100644
12994--- a/arch/x86/boot/memory.c
12995+++ b/arch/x86/boot/memory.c
12996@@ -19,7 +19,7 @@
12997
12998 static int detect_memory_e820(void)
12999 {
13000- int count = 0;
13001+ unsigned int count = 0;
13002 struct biosregs ireg, oreg;
13003 struct e820entry *desc = boot_params.e820_map;
13004 static struct e820entry buf; /* static so it is zeroed */
13005diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13006index ba3e100..6501b8f 100644
13007--- a/arch/x86/boot/video-vesa.c
13008+++ b/arch/x86/boot/video-vesa.c
13009@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13010
13011 boot_params.screen_info.vesapm_seg = oreg.es;
13012 boot_params.screen_info.vesapm_off = oreg.di;
13013+ boot_params.screen_info.vesapm_size = oreg.cx;
13014 }
13015
13016 /*
13017diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13018index 43eda28..5ab5fdb 100644
13019--- a/arch/x86/boot/video.c
13020+++ b/arch/x86/boot/video.c
13021@@ -96,7 +96,7 @@ static void store_mode_params(void)
13022 static unsigned int get_entry(void)
13023 {
13024 char entry_buf[4];
13025- int i, len = 0;
13026+ unsigned int i, len = 0;
13027 int key;
13028 unsigned int v;
13029
13030diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13031index 9105655..41779c1 100644
13032--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13033+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13034@@ -8,6 +8,8 @@
13035 * including this sentence is retained in full.
13036 */
13037
13038+#include <asm/alternative-asm.h>
13039+
13040 .extern crypto_ft_tab
13041 .extern crypto_it_tab
13042 .extern crypto_fl_tab
13043@@ -70,6 +72,8 @@
13044 je B192; \
13045 leaq 32(r9),r9;
13046
13047+#define ret pax_force_retaddr; ret
13048+
13049 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13050 movq r1,r2; \
13051 movq r3,r4; \
13052diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13053index 477e9d7..c92c7d8 100644
13054--- a/arch/x86/crypto/aesni-intel_asm.S
13055+++ b/arch/x86/crypto/aesni-intel_asm.S
13056@@ -31,6 +31,7 @@
13057
13058 #include <linux/linkage.h>
13059 #include <asm/inst.h>
13060+#include <asm/alternative-asm.h>
13061
13062 #ifdef __x86_64__
13063 .data
13064@@ -205,7 +206,7 @@ enc: .octa 0x2
13065 * num_initial_blocks = b mod 4
13066 * encrypt the initial num_initial_blocks blocks and apply ghash on
13067 * the ciphertext
13068-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13069+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13070 * are clobbered
13071 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13072 */
13073@@ -214,8 +215,8 @@ enc: .octa 0x2
13074 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13075 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13076 mov arg7, %r10 # %r10 = AAD
13077- mov arg8, %r12 # %r12 = aadLen
13078- mov %r12, %r11
13079+ mov arg8, %r15 # %r15 = aadLen
13080+ mov %r15, %r11
13081 pxor %xmm\i, %xmm\i
13082 _get_AAD_loop\num_initial_blocks\operation:
13083 movd (%r10), \TMP1
13084@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13085 psrldq $4, %xmm\i
13086 pxor \TMP1, %xmm\i
13087 add $4, %r10
13088- sub $4, %r12
13089+ sub $4, %r15
13090 jne _get_AAD_loop\num_initial_blocks\operation
13091 cmp $16, %r11
13092 je _get_AAD_loop2_done\num_initial_blocks\operation
13093- mov $16, %r12
13094+ mov $16, %r15
13095 _get_AAD_loop2\num_initial_blocks\operation:
13096 psrldq $4, %xmm\i
13097- sub $4, %r12
13098- cmp %r11, %r12
13099+ sub $4, %r15
13100+ cmp %r11, %r15
13101 jne _get_AAD_loop2\num_initial_blocks\operation
13102 _get_AAD_loop2_done\num_initial_blocks\operation:
13103 movdqa SHUF_MASK(%rip), %xmm14
13104@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13105 * num_initial_blocks = b mod 4
13106 * encrypt the initial num_initial_blocks blocks and apply ghash on
13107 * the ciphertext
13108-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13109+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13110 * are clobbered
13111 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13112 */
13113@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13114 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13115 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13116 mov arg7, %r10 # %r10 = AAD
13117- mov arg8, %r12 # %r12 = aadLen
13118- mov %r12, %r11
13119+ mov arg8, %r15 # %r15 = aadLen
13120+ mov %r15, %r11
13121 pxor %xmm\i, %xmm\i
13122 _get_AAD_loop\num_initial_blocks\operation:
13123 movd (%r10), \TMP1
13124@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13125 psrldq $4, %xmm\i
13126 pxor \TMP1, %xmm\i
13127 add $4, %r10
13128- sub $4, %r12
13129+ sub $4, %r15
13130 jne _get_AAD_loop\num_initial_blocks\operation
13131 cmp $16, %r11
13132 je _get_AAD_loop2_done\num_initial_blocks\operation
13133- mov $16, %r12
13134+ mov $16, %r15
13135 _get_AAD_loop2\num_initial_blocks\operation:
13136 psrldq $4, %xmm\i
13137- sub $4, %r12
13138- cmp %r11, %r12
13139+ sub $4, %r15
13140+ cmp %r11, %r15
13141 jne _get_AAD_loop2\num_initial_blocks\operation
13142 _get_AAD_loop2_done\num_initial_blocks\operation:
13143 movdqa SHUF_MASK(%rip), %xmm14
13144@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13145 *
13146 *****************************************************************************/
13147 ENTRY(aesni_gcm_dec)
13148- push %r12
13149+ push %r15
13150 push %r13
13151 push %r14
13152 mov %rsp, %r14
13153@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13154 */
13155 sub $VARIABLE_OFFSET, %rsp
13156 and $~63, %rsp # align rsp to 64 bytes
13157- mov %arg6, %r12
13158- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13159+ mov %arg6, %r15
13160+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13161 movdqa SHUF_MASK(%rip), %xmm2
13162 PSHUFB_XMM %xmm2, %xmm13
13163
13164@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13165 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13166 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13167 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13168- mov %r13, %r12
13169- and $(3<<4), %r12
13170+ mov %r13, %r15
13171+ and $(3<<4), %r15
13172 jz _initial_num_blocks_is_0_decrypt
13173- cmp $(2<<4), %r12
13174+ cmp $(2<<4), %r15
13175 jb _initial_num_blocks_is_1_decrypt
13176 je _initial_num_blocks_is_2_decrypt
13177 _initial_num_blocks_is_3_decrypt:
13178@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13179 sub $16, %r11
13180 add %r13, %r11
13181 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13182- lea SHIFT_MASK+16(%rip), %r12
13183- sub %r13, %r12
13184+ lea SHIFT_MASK+16(%rip), %r15
13185+ sub %r13, %r15
13186 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13187 # (%r13 is the number of bytes in plaintext mod 16)
13188- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13189+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13190 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13191
13192 movdqa %xmm1, %xmm2
13193 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13194- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13195+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13196 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13197 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13198 pand %xmm1, %xmm2
13199@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13200 sub $1, %r13
13201 jne _less_than_8_bytes_left_decrypt
13202 _multiple_of_16_bytes_decrypt:
13203- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13204- shl $3, %r12 # convert into number of bits
13205- movd %r12d, %xmm15 # len(A) in %xmm15
13206+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13207+ shl $3, %r15 # convert into number of bits
13208+ movd %r15d, %xmm15 # len(A) in %xmm15
13209 shl $3, %arg4 # len(C) in bits (*128)
13210 MOVQ_R64_XMM %arg4, %xmm1
13211 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13212@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13213 mov %r14, %rsp
13214 pop %r14
13215 pop %r13
13216- pop %r12
13217+ pop %r15
13218+ pax_force_retaddr
13219 ret
13220 ENDPROC(aesni_gcm_dec)
13221
13222@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13223 * poly = x^128 + x^127 + x^126 + x^121 + 1
13224 ***************************************************************************/
13225 ENTRY(aesni_gcm_enc)
13226- push %r12
13227+ push %r15
13228 push %r13
13229 push %r14
13230 mov %rsp, %r14
13231@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13232 #
13233 sub $VARIABLE_OFFSET, %rsp
13234 and $~63, %rsp
13235- mov %arg6, %r12
13236- movdqu (%r12), %xmm13
13237+ mov %arg6, %r15
13238+ movdqu (%r15), %xmm13
13239 movdqa SHUF_MASK(%rip), %xmm2
13240 PSHUFB_XMM %xmm2, %xmm13
13241
13242@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13243 movdqa %xmm13, HashKey(%rsp)
13244 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13245 and $-16, %r13
13246- mov %r13, %r12
13247+ mov %r13, %r15
13248
13249 # Encrypt first few blocks
13250
13251- and $(3<<4), %r12
13252+ and $(3<<4), %r15
13253 jz _initial_num_blocks_is_0_encrypt
13254- cmp $(2<<4), %r12
13255+ cmp $(2<<4), %r15
13256 jb _initial_num_blocks_is_1_encrypt
13257 je _initial_num_blocks_is_2_encrypt
13258 _initial_num_blocks_is_3_encrypt:
13259@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13260 sub $16, %r11
13261 add %r13, %r11
13262 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13263- lea SHIFT_MASK+16(%rip), %r12
13264- sub %r13, %r12
13265+ lea SHIFT_MASK+16(%rip), %r15
13266+ sub %r13, %r15
13267 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13268 # (%r13 is the number of bytes in plaintext mod 16)
13269- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13270+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13271 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13272 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13273- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13274+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13275 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13276 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13277 movdqa SHUF_MASK(%rip), %xmm10
13278@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13279 sub $1, %r13
13280 jne _less_than_8_bytes_left_encrypt
13281 _multiple_of_16_bytes_encrypt:
13282- mov arg8, %r12 # %r12 = addLen (number of bytes)
13283- shl $3, %r12
13284- movd %r12d, %xmm15 # len(A) in %xmm15
13285+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13286+ shl $3, %r15
13287+ movd %r15d, %xmm15 # len(A) in %xmm15
13288 shl $3, %arg4 # len(C) in bits (*128)
13289 MOVQ_R64_XMM %arg4, %xmm1
13290 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13291@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13292 mov %r14, %rsp
13293 pop %r14
13294 pop %r13
13295- pop %r12
13296+ pop %r15
13297+ pax_force_retaddr
13298 ret
13299 ENDPROC(aesni_gcm_enc)
13300
13301@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13302 pxor %xmm1, %xmm0
13303 movaps %xmm0, (TKEYP)
13304 add $0x10, TKEYP
13305+ pax_force_retaddr
13306 ret
13307 ENDPROC(_key_expansion_128)
13308 ENDPROC(_key_expansion_256a)
13309@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13310 shufps $0b01001110, %xmm2, %xmm1
13311 movaps %xmm1, 0x10(TKEYP)
13312 add $0x20, TKEYP
13313+ pax_force_retaddr
13314 ret
13315 ENDPROC(_key_expansion_192a)
13316
13317@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13318
13319 movaps %xmm0, (TKEYP)
13320 add $0x10, TKEYP
13321+ pax_force_retaddr
13322 ret
13323 ENDPROC(_key_expansion_192b)
13324
13325@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13326 pxor %xmm1, %xmm2
13327 movaps %xmm2, (TKEYP)
13328 add $0x10, TKEYP
13329+ pax_force_retaddr
13330 ret
13331 ENDPROC(_key_expansion_256b)
13332
13333@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13334 #ifndef __x86_64__
13335 popl KEYP
13336 #endif
13337+ pax_force_retaddr
13338 ret
13339 ENDPROC(aesni_set_key)
13340
13341@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13342 popl KLEN
13343 popl KEYP
13344 #endif
13345+ pax_force_retaddr
13346 ret
13347 ENDPROC(aesni_enc)
13348
13349@@ -1974,6 +1983,7 @@ _aesni_enc1:
13350 AESENC KEY STATE
13351 movaps 0x70(TKEYP), KEY
13352 AESENCLAST KEY STATE
13353+ pax_force_retaddr
13354 ret
13355 ENDPROC(_aesni_enc1)
13356
13357@@ -2083,6 +2093,7 @@ _aesni_enc4:
13358 AESENCLAST KEY STATE2
13359 AESENCLAST KEY STATE3
13360 AESENCLAST KEY STATE4
13361+ pax_force_retaddr
13362 ret
13363 ENDPROC(_aesni_enc4)
13364
13365@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13366 popl KLEN
13367 popl KEYP
13368 #endif
13369+ pax_force_retaddr
13370 ret
13371 ENDPROC(aesni_dec)
13372
13373@@ -2164,6 +2176,7 @@ _aesni_dec1:
13374 AESDEC KEY STATE
13375 movaps 0x70(TKEYP), KEY
13376 AESDECLAST KEY STATE
13377+ pax_force_retaddr
13378 ret
13379 ENDPROC(_aesni_dec1)
13380
13381@@ -2273,6 +2286,7 @@ _aesni_dec4:
13382 AESDECLAST KEY STATE2
13383 AESDECLAST KEY STATE3
13384 AESDECLAST KEY STATE4
13385+ pax_force_retaddr
13386 ret
13387 ENDPROC(_aesni_dec4)
13388
13389@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13390 popl KEYP
13391 popl LEN
13392 #endif
13393+ pax_force_retaddr
13394 ret
13395 ENDPROC(aesni_ecb_enc)
13396
13397@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13398 popl KEYP
13399 popl LEN
13400 #endif
13401+ pax_force_retaddr
13402 ret
13403 ENDPROC(aesni_ecb_dec)
13404
13405@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13406 popl LEN
13407 popl IVP
13408 #endif
13409+ pax_force_retaddr
13410 ret
13411 ENDPROC(aesni_cbc_enc)
13412
13413@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13414 popl LEN
13415 popl IVP
13416 #endif
13417+ pax_force_retaddr
13418 ret
13419 ENDPROC(aesni_cbc_dec)
13420
13421@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13422 mov $1, TCTR_LOW
13423 MOVQ_R64_XMM TCTR_LOW INC
13424 MOVQ_R64_XMM CTR TCTR_LOW
13425+ pax_force_retaddr
13426 ret
13427 ENDPROC(_aesni_inc_init)
13428
13429@@ -2579,6 +2598,7 @@ _aesni_inc:
13430 .Linc_low:
13431 movaps CTR, IV
13432 PSHUFB_XMM BSWAP_MASK IV
13433+ pax_force_retaddr
13434 ret
13435 ENDPROC(_aesni_inc)
13436
13437@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13438 .Lctr_enc_ret:
13439 movups IV, (IVP)
13440 .Lctr_enc_just_ret:
13441+ pax_force_retaddr
13442 ret
13443 ENDPROC(aesni_ctr_enc)
13444
13445@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13446 pxor INC, STATE4
13447 movdqu STATE4, 0x70(OUTP)
13448
13449+ pax_force_retaddr
13450 ret
13451 ENDPROC(aesni_xts_crypt8)
13452
13453diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13454index 246c670..466e2d6 100644
13455--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13456+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13457@@ -21,6 +21,7 @@
13458 */
13459
13460 #include <linux/linkage.h>
13461+#include <asm/alternative-asm.h>
13462
13463 .file "blowfish-x86_64-asm.S"
13464 .text
13465@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13466 jnz .L__enc_xor;
13467
13468 write_block();
13469+ pax_force_retaddr
13470 ret;
13471 .L__enc_xor:
13472 xor_block();
13473+ pax_force_retaddr
13474 ret;
13475 ENDPROC(__blowfish_enc_blk)
13476
13477@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13478
13479 movq %r11, %rbp;
13480
13481+ pax_force_retaddr
13482 ret;
13483 ENDPROC(blowfish_dec_blk)
13484
13485@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13486
13487 popq %rbx;
13488 popq %rbp;
13489+ pax_force_retaddr
13490 ret;
13491
13492 .L__enc_xor4:
13493@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13494
13495 popq %rbx;
13496 popq %rbp;
13497+ pax_force_retaddr
13498 ret;
13499 ENDPROC(__blowfish_enc_blk_4way)
13500
13501@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13502 popq %rbx;
13503 popq %rbp;
13504
13505+ pax_force_retaddr
13506 ret;
13507 ENDPROC(blowfish_dec_blk_4way)
13508diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13509index ce71f92..1dce7ec 100644
13510--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13511+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13512@@ -16,6 +16,7 @@
13513 */
13514
13515 #include <linux/linkage.h>
13516+#include <asm/alternative-asm.h>
13517
13518 #define CAMELLIA_TABLE_BYTE_LEN 272
13519
13520@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13521 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13522 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13523 %rcx, (%r9));
13524+ pax_force_retaddr
13525 ret;
13526 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13527
13528@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13529 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13530 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13531 %rax, (%r9));
13532+ pax_force_retaddr
13533 ret;
13534 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13535
13536@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13537 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13538 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13539
13540+ pax_force_retaddr
13541 ret;
13542
13543 .align 8
13544@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13545 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13546 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13547
13548+ pax_force_retaddr
13549 ret;
13550
13551 .align 8
13552@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13553 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13554 %xmm8, %rsi);
13555
13556+ pax_force_retaddr
13557 ret;
13558 ENDPROC(camellia_ecb_enc_16way)
13559
13560@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13561 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13562 %xmm8, %rsi);
13563
13564+ pax_force_retaddr
13565 ret;
13566 ENDPROC(camellia_ecb_dec_16way)
13567
13568@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13569 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13570 %xmm8, %rsi);
13571
13572+ pax_force_retaddr
13573 ret;
13574 ENDPROC(camellia_cbc_dec_16way)
13575
13576@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13577 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13578 %xmm8, %rsi);
13579
13580+ pax_force_retaddr
13581 ret;
13582 ENDPROC(camellia_ctr_16way)
13583
13584@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13585 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13586 %xmm8, %rsi);
13587
13588+ pax_force_retaddr
13589 ret;
13590 ENDPROC(camellia_xts_crypt_16way)
13591
13592diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13593index 0e0b886..5a3123c 100644
13594--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13595+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13596@@ -11,6 +11,7 @@
13597 */
13598
13599 #include <linux/linkage.h>
13600+#include <asm/alternative-asm.h>
13601
13602 #define CAMELLIA_TABLE_BYTE_LEN 272
13603
13604@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13605 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13606 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13607 %rcx, (%r9));
13608+ pax_force_retaddr
13609 ret;
13610 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13611
13612@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13613 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13614 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13615 %rax, (%r9));
13616+ pax_force_retaddr
13617 ret;
13618 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13619
13620@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13621 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13622 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13623
13624+ pax_force_retaddr
13625 ret;
13626
13627 .align 8
13628@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13629 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13630 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13631
13632+ pax_force_retaddr
13633 ret;
13634
13635 .align 8
13636@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13637
13638 vzeroupper;
13639
13640+ pax_force_retaddr
13641 ret;
13642 ENDPROC(camellia_ecb_enc_32way)
13643
13644@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13645
13646 vzeroupper;
13647
13648+ pax_force_retaddr
13649 ret;
13650 ENDPROC(camellia_ecb_dec_32way)
13651
13652@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13653
13654 vzeroupper;
13655
13656+ pax_force_retaddr
13657 ret;
13658 ENDPROC(camellia_cbc_dec_32way)
13659
13660@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13661
13662 vzeroupper;
13663
13664+ pax_force_retaddr
13665 ret;
13666 ENDPROC(camellia_ctr_32way)
13667
13668@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13669
13670 vzeroupper;
13671
13672+ pax_force_retaddr
13673 ret;
13674 ENDPROC(camellia_xts_crypt_32way)
13675
13676diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13677index 310319c..db3d7b5 100644
13678--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13679+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13680@@ -21,6 +21,7 @@
13681 */
13682
13683 #include <linux/linkage.h>
13684+#include <asm/alternative-asm.h>
13685
13686 .file "camellia-x86_64-asm_64.S"
13687 .text
13688@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13689 enc_outunpack(mov, RT1);
13690
13691 movq RRBP, %rbp;
13692+ pax_force_retaddr
13693 ret;
13694
13695 .L__enc_xor:
13696 enc_outunpack(xor, RT1);
13697
13698 movq RRBP, %rbp;
13699+ pax_force_retaddr
13700 ret;
13701 ENDPROC(__camellia_enc_blk)
13702
13703@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13704 dec_outunpack();
13705
13706 movq RRBP, %rbp;
13707+ pax_force_retaddr
13708 ret;
13709 ENDPROC(camellia_dec_blk)
13710
13711@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13712
13713 movq RRBP, %rbp;
13714 popq %rbx;
13715+ pax_force_retaddr
13716 ret;
13717
13718 .L__enc2_xor:
13719@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13720
13721 movq RRBP, %rbp;
13722 popq %rbx;
13723+ pax_force_retaddr
13724 ret;
13725 ENDPROC(__camellia_enc_blk_2way)
13726
13727@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13728
13729 movq RRBP, %rbp;
13730 movq RXOR, %rbx;
13731+ pax_force_retaddr
13732 ret;
13733 ENDPROC(camellia_dec_blk_2way)
13734diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13735index c35fd5d..2d8c7db 100644
13736--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13737+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13738@@ -24,6 +24,7 @@
13739 */
13740
13741 #include <linux/linkage.h>
13742+#include <asm/alternative-asm.h>
13743
13744 .file "cast5-avx-x86_64-asm_64.S"
13745
13746@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13747 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13748 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13749
13750+ pax_force_retaddr
13751 ret;
13752 ENDPROC(__cast5_enc_blk16)
13753
13754@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13755 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13756 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13757
13758+ pax_force_retaddr
13759 ret;
13760
13761 .L__skip_dec:
13762@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13763 vmovdqu RR4, (6*4*4)(%r11);
13764 vmovdqu RL4, (7*4*4)(%r11);
13765
13766+ pax_force_retaddr
13767 ret;
13768 ENDPROC(cast5_ecb_enc_16way)
13769
13770@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13771 vmovdqu RR4, (6*4*4)(%r11);
13772 vmovdqu RL4, (7*4*4)(%r11);
13773
13774+ pax_force_retaddr
13775 ret;
13776 ENDPROC(cast5_ecb_dec_16way)
13777
13778@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13779 * %rdx: src
13780 */
13781
13782- pushq %r12;
13783+ pushq %r14;
13784
13785 movq %rsi, %r11;
13786- movq %rdx, %r12;
13787+ movq %rdx, %r14;
13788
13789 vmovdqu (0*16)(%rdx), RL1;
13790 vmovdqu (1*16)(%rdx), RR1;
13791@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13792 call __cast5_dec_blk16;
13793
13794 /* xor with src */
13795- vmovq (%r12), RX;
13796+ vmovq (%r14), RX;
13797 vpshufd $0x4f, RX, RX;
13798 vpxor RX, RR1, RR1;
13799- vpxor 0*16+8(%r12), RL1, RL1;
13800- vpxor 1*16+8(%r12), RR2, RR2;
13801- vpxor 2*16+8(%r12), RL2, RL2;
13802- vpxor 3*16+8(%r12), RR3, RR3;
13803- vpxor 4*16+8(%r12), RL3, RL3;
13804- vpxor 5*16+8(%r12), RR4, RR4;
13805- vpxor 6*16+8(%r12), RL4, RL4;
13806+ vpxor 0*16+8(%r14), RL1, RL1;
13807+ vpxor 1*16+8(%r14), RR2, RR2;
13808+ vpxor 2*16+8(%r14), RL2, RL2;
13809+ vpxor 3*16+8(%r14), RR3, RR3;
13810+ vpxor 4*16+8(%r14), RL3, RL3;
13811+ vpxor 5*16+8(%r14), RR4, RR4;
13812+ vpxor 6*16+8(%r14), RL4, RL4;
13813
13814 vmovdqu RR1, (0*16)(%r11);
13815 vmovdqu RL1, (1*16)(%r11);
13816@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13817 vmovdqu RR4, (6*16)(%r11);
13818 vmovdqu RL4, (7*16)(%r11);
13819
13820- popq %r12;
13821+ popq %r14;
13822
13823+ pax_force_retaddr
13824 ret;
13825 ENDPROC(cast5_cbc_dec_16way)
13826
13827@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13828 * %rcx: iv (big endian, 64bit)
13829 */
13830
13831- pushq %r12;
13832+ pushq %r14;
13833
13834 movq %rsi, %r11;
13835- movq %rdx, %r12;
13836+ movq %rdx, %r14;
13837
13838 vpcmpeqd RTMP, RTMP, RTMP;
13839 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13840@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13841 call __cast5_enc_blk16;
13842
13843 /* dst = src ^ iv */
13844- vpxor (0*16)(%r12), RR1, RR1;
13845- vpxor (1*16)(%r12), RL1, RL1;
13846- vpxor (2*16)(%r12), RR2, RR2;
13847- vpxor (3*16)(%r12), RL2, RL2;
13848- vpxor (4*16)(%r12), RR3, RR3;
13849- vpxor (5*16)(%r12), RL3, RL3;
13850- vpxor (6*16)(%r12), RR4, RR4;
13851- vpxor (7*16)(%r12), RL4, RL4;
13852+ vpxor (0*16)(%r14), RR1, RR1;
13853+ vpxor (1*16)(%r14), RL1, RL1;
13854+ vpxor (2*16)(%r14), RR2, RR2;
13855+ vpxor (3*16)(%r14), RL2, RL2;
13856+ vpxor (4*16)(%r14), RR3, RR3;
13857+ vpxor (5*16)(%r14), RL3, RL3;
13858+ vpxor (6*16)(%r14), RR4, RR4;
13859+ vpxor (7*16)(%r14), RL4, RL4;
13860 vmovdqu RR1, (0*16)(%r11);
13861 vmovdqu RL1, (1*16)(%r11);
13862 vmovdqu RR2, (2*16)(%r11);
13863@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13864 vmovdqu RR4, (6*16)(%r11);
13865 vmovdqu RL4, (7*16)(%r11);
13866
13867- popq %r12;
13868+ popq %r14;
13869
13870+ pax_force_retaddr
13871 ret;
13872 ENDPROC(cast5_ctr_16way)
13873diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13874index e3531f8..e123f35 100644
13875--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13876+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13877@@ -24,6 +24,7 @@
13878 */
13879
13880 #include <linux/linkage.h>
13881+#include <asm/alternative-asm.h>
13882 #include "glue_helper-asm-avx.S"
13883
13884 .file "cast6-avx-x86_64-asm_64.S"
13885@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13886 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13887 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13888
13889+ pax_force_retaddr
13890 ret;
13891 ENDPROC(__cast6_enc_blk8)
13892
13893@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13894 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13895 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13896
13897+ pax_force_retaddr
13898 ret;
13899 ENDPROC(__cast6_dec_blk8)
13900
13901@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13902
13903 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13904
13905+ pax_force_retaddr
13906 ret;
13907 ENDPROC(cast6_ecb_enc_8way)
13908
13909@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13910
13911 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13912
13913+ pax_force_retaddr
13914 ret;
13915 ENDPROC(cast6_ecb_dec_8way)
13916
13917@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13918 * %rdx: src
13919 */
13920
13921- pushq %r12;
13922+ pushq %r14;
13923
13924 movq %rsi, %r11;
13925- movq %rdx, %r12;
13926+ movq %rdx, %r14;
13927
13928 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13929
13930 call __cast6_dec_blk8;
13931
13932- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13933+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13934
13935- popq %r12;
13936+ popq %r14;
13937
13938+ pax_force_retaddr
13939 ret;
13940 ENDPROC(cast6_cbc_dec_8way)
13941
13942@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13943 * %rcx: iv (little endian, 128bit)
13944 */
13945
13946- pushq %r12;
13947+ pushq %r14;
13948
13949 movq %rsi, %r11;
13950- movq %rdx, %r12;
13951+ movq %rdx, %r14;
13952
13953 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13954 RD2, RX, RKR, RKM);
13955
13956 call __cast6_enc_blk8;
13957
13958- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13959+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13960
13961- popq %r12;
13962+ popq %r14;
13963
13964+ pax_force_retaddr
13965 ret;
13966 ENDPROC(cast6_ctr_8way)
13967
13968@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13969 /* dst <= regs xor IVs(in dst) */
13970 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13971
13972+ pax_force_retaddr
13973 ret;
13974 ENDPROC(cast6_xts_enc_8way)
13975
13976@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13977 /* dst <= regs xor IVs(in dst) */
13978 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13979
13980+ pax_force_retaddr
13981 ret;
13982 ENDPROC(cast6_xts_dec_8way)
13983diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13984index 26d49eb..c0a8c84 100644
13985--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13986+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13987@@ -45,6 +45,7 @@
13988
13989 #include <asm/inst.h>
13990 #include <linux/linkage.h>
13991+#include <asm/alternative-asm.h>
13992
13993 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13994
13995@@ -309,6 +310,7 @@ do_return:
13996 popq %rsi
13997 popq %rdi
13998 popq %rbx
13999+ pax_force_retaddr
14000 ret
14001
14002 ################################################################
14003diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14004index 5d1e007..098cb4f 100644
14005--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14006+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14007@@ -18,6 +18,7 @@
14008
14009 #include <linux/linkage.h>
14010 #include <asm/inst.h>
14011+#include <asm/alternative-asm.h>
14012
14013 .data
14014
14015@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14016 psrlq $1, T2
14017 pxor T2, T1
14018 pxor T1, DATA
14019+ pax_force_retaddr
14020 ret
14021 ENDPROC(__clmul_gf128mul_ble)
14022
14023@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14024 call __clmul_gf128mul_ble
14025 PSHUFB_XMM BSWAP DATA
14026 movups DATA, (%rdi)
14027+ pax_force_retaddr
14028 ret
14029 ENDPROC(clmul_ghash_mul)
14030
14031@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14032 PSHUFB_XMM BSWAP DATA
14033 movups DATA, (%rdi)
14034 .Lupdate_just_ret:
14035+ pax_force_retaddr
14036 ret
14037 ENDPROC(clmul_ghash_update)
14038diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14039index 9279e0b..c4b3d2c 100644
14040--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14041+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14042@@ -1,4 +1,5 @@
14043 #include <linux/linkage.h>
14044+#include <asm/alternative-asm.h>
14045
14046 # enter salsa20_encrypt_bytes
14047 ENTRY(salsa20_encrypt_bytes)
14048@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14049 add %r11,%rsp
14050 mov %rdi,%rax
14051 mov %rsi,%rdx
14052+ pax_force_retaddr
14053 ret
14054 # bytesatleast65:
14055 ._bytesatleast65:
14056@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14057 add %r11,%rsp
14058 mov %rdi,%rax
14059 mov %rsi,%rdx
14060+ pax_force_retaddr
14061 ret
14062 ENDPROC(salsa20_keysetup)
14063
14064@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14065 add %r11,%rsp
14066 mov %rdi,%rax
14067 mov %rsi,%rdx
14068+ pax_force_retaddr
14069 ret
14070 ENDPROC(salsa20_ivsetup)
14071diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14072index 2f202f4..d9164d6 100644
14073--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14074+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14075@@ -24,6 +24,7 @@
14076 */
14077
14078 #include <linux/linkage.h>
14079+#include <asm/alternative-asm.h>
14080 #include "glue_helper-asm-avx.S"
14081
14082 .file "serpent-avx-x86_64-asm_64.S"
14083@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14084 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14085 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14086
14087+ pax_force_retaddr
14088 ret;
14089 ENDPROC(__serpent_enc_blk8_avx)
14090
14091@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14092 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14093 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14094
14095+ pax_force_retaddr
14096 ret;
14097 ENDPROC(__serpent_dec_blk8_avx)
14098
14099@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14100
14101 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14102
14103+ pax_force_retaddr
14104 ret;
14105 ENDPROC(serpent_ecb_enc_8way_avx)
14106
14107@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14108
14109 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14110
14111+ pax_force_retaddr
14112 ret;
14113 ENDPROC(serpent_ecb_dec_8way_avx)
14114
14115@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14116
14117 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14118
14119+ pax_force_retaddr
14120 ret;
14121 ENDPROC(serpent_cbc_dec_8way_avx)
14122
14123@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14124
14125 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14126
14127+ pax_force_retaddr
14128 ret;
14129 ENDPROC(serpent_ctr_8way_avx)
14130
14131@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14132 /* dst <= regs xor IVs(in dst) */
14133 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14134
14135+ pax_force_retaddr
14136 ret;
14137 ENDPROC(serpent_xts_enc_8way_avx)
14138
14139@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14140 /* dst <= regs xor IVs(in dst) */
14141 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14142
14143+ pax_force_retaddr
14144 ret;
14145 ENDPROC(serpent_xts_dec_8way_avx)
14146diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14147index b222085..abd483c 100644
14148--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14149+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14150@@ -15,6 +15,7 @@
14151 */
14152
14153 #include <linux/linkage.h>
14154+#include <asm/alternative-asm.h>
14155 #include "glue_helper-asm-avx2.S"
14156
14157 .file "serpent-avx2-asm_64.S"
14158@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14159 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14160 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14161
14162+ pax_force_retaddr
14163 ret;
14164 ENDPROC(__serpent_enc_blk16)
14165
14166@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14167 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14168 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14169
14170+ pax_force_retaddr
14171 ret;
14172 ENDPROC(__serpent_dec_blk16)
14173
14174@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14175
14176 vzeroupper;
14177
14178+ pax_force_retaddr
14179 ret;
14180 ENDPROC(serpent_ecb_enc_16way)
14181
14182@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14183
14184 vzeroupper;
14185
14186+ pax_force_retaddr
14187 ret;
14188 ENDPROC(serpent_ecb_dec_16way)
14189
14190@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14191
14192 vzeroupper;
14193
14194+ pax_force_retaddr
14195 ret;
14196 ENDPROC(serpent_cbc_dec_16way)
14197
14198@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14199
14200 vzeroupper;
14201
14202+ pax_force_retaddr
14203 ret;
14204 ENDPROC(serpent_ctr_16way)
14205
14206@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14207
14208 vzeroupper;
14209
14210+ pax_force_retaddr
14211 ret;
14212 ENDPROC(serpent_xts_enc_16way)
14213
14214@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14215
14216 vzeroupper;
14217
14218+ pax_force_retaddr
14219 ret;
14220 ENDPROC(serpent_xts_dec_16way)
14221diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14222index acc066c..1559cc4 100644
14223--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14224+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14225@@ -25,6 +25,7 @@
14226 */
14227
14228 #include <linux/linkage.h>
14229+#include <asm/alternative-asm.h>
14230
14231 .file "serpent-sse2-x86_64-asm_64.S"
14232 .text
14233@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14234 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14235 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14236
14237+ pax_force_retaddr
14238 ret;
14239
14240 .L__enc_xor8:
14241 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14242 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14243
14244+ pax_force_retaddr
14245 ret;
14246 ENDPROC(__serpent_enc_blk_8way)
14247
14248@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14249 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14250 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14251
14252+ pax_force_retaddr
14253 ret;
14254 ENDPROC(serpent_dec_blk_8way)
14255diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14256index a410950..9dfe7ad 100644
14257--- a/arch/x86/crypto/sha1_ssse3_asm.S
14258+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14259@@ -29,6 +29,7 @@
14260 */
14261
14262 #include <linux/linkage.h>
14263+#include <asm/alternative-asm.h>
14264
14265 #define CTX %rdi // arg1
14266 #define BUF %rsi // arg2
14267@@ -75,9 +76,9 @@
14268
14269 push %rbx
14270 push %rbp
14271- push %r12
14272+ push %r14
14273
14274- mov %rsp, %r12
14275+ mov %rsp, %r14
14276 sub $64, %rsp # allocate workspace
14277 and $~15, %rsp # align stack
14278
14279@@ -99,11 +100,12 @@
14280 xor %rax, %rax
14281 rep stosq
14282
14283- mov %r12, %rsp # deallocate workspace
14284+ mov %r14, %rsp # deallocate workspace
14285
14286- pop %r12
14287+ pop %r14
14288 pop %rbp
14289 pop %rbx
14290+ pax_force_retaddr
14291 ret
14292
14293 ENDPROC(\name)
14294diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14295index 642f156..51a513c 100644
14296--- a/arch/x86/crypto/sha256-avx-asm.S
14297+++ b/arch/x86/crypto/sha256-avx-asm.S
14298@@ -49,6 +49,7 @@
14299
14300 #ifdef CONFIG_AS_AVX
14301 #include <linux/linkage.h>
14302+#include <asm/alternative-asm.h>
14303
14304 ## assume buffers not aligned
14305 #define VMOVDQ vmovdqu
14306@@ -460,6 +461,7 @@ done_hash:
14307 popq %r13
14308 popq %rbp
14309 popq %rbx
14310+ pax_force_retaddr
14311 ret
14312 ENDPROC(sha256_transform_avx)
14313
14314diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14315index 9e86944..3795e6a 100644
14316--- a/arch/x86/crypto/sha256-avx2-asm.S
14317+++ b/arch/x86/crypto/sha256-avx2-asm.S
14318@@ -50,6 +50,7 @@
14319
14320 #ifdef CONFIG_AS_AVX2
14321 #include <linux/linkage.h>
14322+#include <asm/alternative-asm.h>
14323
14324 ## assume buffers not aligned
14325 #define VMOVDQ vmovdqu
14326@@ -720,6 +721,7 @@ done_hash:
14327 popq %r12
14328 popq %rbp
14329 popq %rbx
14330+ pax_force_retaddr
14331 ret
14332 ENDPROC(sha256_transform_rorx)
14333
14334diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14335index f833b74..8c62a9e 100644
14336--- a/arch/x86/crypto/sha256-ssse3-asm.S
14337+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14338@@ -47,6 +47,7 @@
14339 ########################################################################
14340
14341 #include <linux/linkage.h>
14342+#include <asm/alternative-asm.h>
14343
14344 ## assume buffers not aligned
14345 #define MOVDQ movdqu
14346@@ -471,6 +472,7 @@ done_hash:
14347 popq %rbp
14348 popq %rbx
14349
14350+ pax_force_retaddr
14351 ret
14352 ENDPROC(sha256_transform_ssse3)
14353
14354diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14355index 974dde9..a823ff9 100644
14356--- a/arch/x86/crypto/sha512-avx-asm.S
14357+++ b/arch/x86/crypto/sha512-avx-asm.S
14358@@ -49,6 +49,7 @@
14359
14360 #ifdef CONFIG_AS_AVX
14361 #include <linux/linkage.h>
14362+#include <asm/alternative-asm.h>
14363
14364 .text
14365
14366@@ -364,6 +365,7 @@ updateblock:
14367 mov frame_RSPSAVE(%rsp), %rsp
14368
14369 nowork:
14370+ pax_force_retaddr
14371 ret
14372 ENDPROC(sha512_transform_avx)
14373
14374diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14375index 568b961..ed20c37 100644
14376--- a/arch/x86/crypto/sha512-avx2-asm.S
14377+++ b/arch/x86/crypto/sha512-avx2-asm.S
14378@@ -51,6 +51,7 @@
14379
14380 #ifdef CONFIG_AS_AVX2
14381 #include <linux/linkage.h>
14382+#include <asm/alternative-asm.h>
14383
14384 .text
14385
14386@@ -678,6 +679,7 @@ done_hash:
14387
14388 # Restore Stack Pointer
14389 mov frame_RSPSAVE(%rsp), %rsp
14390+ pax_force_retaddr
14391 ret
14392 ENDPROC(sha512_transform_rorx)
14393
14394diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14395index fb56855..6edd768 100644
14396--- a/arch/x86/crypto/sha512-ssse3-asm.S
14397+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14398@@ -48,6 +48,7 @@
14399 ########################################################################
14400
14401 #include <linux/linkage.h>
14402+#include <asm/alternative-asm.h>
14403
14404 .text
14405
14406@@ -363,6 +364,7 @@ updateblock:
14407 mov frame_RSPSAVE(%rsp), %rsp
14408
14409 nowork:
14410+ pax_force_retaddr
14411 ret
14412 ENDPROC(sha512_transform_ssse3)
14413
14414diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14415index 0505813..b067311 100644
14416--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14417+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14418@@ -24,6 +24,7 @@
14419 */
14420
14421 #include <linux/linkage.h>
14422+#include <asm/alternative-asm.h>
14423 #include "glue_helper-asm-avx.S"
14424
14425 .file "twofish-avx-x86_64-asm_64.S"
14426@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14427 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14428 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14429
14430+ pax_force_retaddr
14431 ret;
14432 ENDPROC(__twofish_enc_blk8)
14433
14434@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14435 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14436 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14437
14438+ pax_force_retaddr
14439 ret;
14440 ENDPROC(__twofish_dec_blk8)
14441
14442@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14443
14444 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14445
14446+ pax_force_retaddr
14447 ret;
14448 ENDPROC(twofish_ecb_enc_8way)
14449
14450@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14451
14452 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14453
14454+ pax_force_retaddr
14455 ret;
14456 ENDPROC(twofish_ecb_dec_8way)
14457
14458@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14459 * %rdx: src
14460 */
14461
14462- pushq %r12;
14463+ pushq %r14;
14464
14465 movq %rsi, %r11;
14466- movq %rdx, %r12;
14467+ movq %rdx, %r14;
14468
14469 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14470
14471 call __twofish_dec_blk8;
14472
14473- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14474+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14475
14476- popq %r12;
14477+ popq %r14;
14478
14479+ pax_force_retaddr
14480 ret;
14481 ENDPROC(twofish_cbc_dec_8way)
14482
14483@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14484 * %rcx: iv (little endian, 128bit)
14485 */
14486
14487- pushq %r12;
14488+ pushq %r14;
14489
14490 movq %rsi, %r11;
14491- movq %rdx, %r12;
14492+ movq %rdx, %r14;
14493
14494 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14495 RD2, RX0, RX1, RY0);
14496
14497 call __twofish_enc_blk8;
14498
14499- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14501
14502- popq %r12;
14503+ popq %r14;
14504
14505+ pax_force_retaddr
14506 ret;
14507 ENDPROC(twofish_ctr_8way)
14508
14509@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14510 /* dst <= regs xor IVs(in dst) */
14511 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14512
14513+ pax_force_retaddr
14514 ret;
14515 ENDPROC(twofish_xts_enc_8way)
14516
14517@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14518 /* dst <= regs xor IVs(in dst) */
14519 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14520
14521+ pax_force_retaddr
14522 ret;
14523 ENDPROC(twofish_xts_dec_8way)
14524diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14525index 1c3b7ce..02f578d 100644
14526--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14527+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14528@@ -21,6 +21,7 @@
14529 */
14530
14531 #include <linux/linkage.h>
14532+#include <asm/alternative-asm.h>
14533
14534 .file "twofish-x86_64-asm-3way.S"
14535 .text
14536@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14537 popq %r13;
14538 popq %r14;
14539 popq %r15;
14540+ pax_force_retaddr
14541 ret;
14542
14543 .L__enc_xor3:
14544@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14545 popq %r13;
14546 popq %r14;
14547 popq %r15;
14548+ pax_force_retaddr
14549 ret;
14550 ENDPROC(__twofish_enc_blk_3way)
14551
14552@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14553 popq %r13;
14554 popq %r14;
14555 popq %r15;
14556+ pax_force_retaddr
14557 ret;
14558 ENDPROC(twofish_dec_blk_3way)
14559diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14560index a039d21..524b8b2 100644
14561--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14562+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14563@@ -22,6 +22,7 @@
14564
14565 #include <linux/linkage.h>
14566 #include <asm/asm-offsets.h>
14567+#include <asm/alternative-asm.h>
14568
14569 #define a_offset 0
14570 #define b_offset 4
14571@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14572
14573 popq R1
14574 movq $1,%rax
14575+ pax_force_retaddr
14576 ret
14577 ENDPROC(twofish_enc_blk)
14578
14579@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14580
14581 popq R1
14582 movq $1,%rax
14583+ pax_force_retaddr
14584 ret
14585 ENDPROC(twofish_dec_blk)
14586diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14587index ae6aad1..719d6d9 100644
14588--- a/arch/x86/ia32/ia32_aout.c
14589+++ b/arch/x86/ia32/ia32_aout.c
14590@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14591 unsigned long dump_start, dump_size;
14592 struct user32 dump;
14593
14594+ memset(&dump, 0, sizeof(dump));
14595+
14596 fs = get_fs();
14597 set_fs(KERNEL_DS);
14598 has_dumped = 1;
14599diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14600index f9e181a..300544c 100644
14601--- a/arch/x86/ia32/ia32_signal.c
14602+++ b/arch/x86/ia32/ia32_signal.c
14603@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14604 if (__get_user(set.sig[0], &frame->sc.oldmask)
14605 || (_COMPAT_NSIG_WORDS > 1
14606 && __copy_from_user((((char *) &set.sig) + 4),
14607- &frame->extramask,
14608+ frame->extramask,
14609 sizeof(frame->extramask))))
14610 goto badframe;
14611
14612@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14613 sp -= frame_size;
14614 /* Align the stack pointer according to the i386 ABI,
14615 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14616- sp = ((sp + 4) & -16ul) - 4;
14617+ sp = ((sp - 12) & -16ul) - 4;
14618 return (void __user *) sp;
14619 }
14620
14621@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14622 } else {
14623 /* Return stub is in 32bit vsyscall page */
14624 if (current->mm->context.vdso)
14625- restorer = current->mm->context.vdso +
14626- selected_vdso32->sym___kernel_sigreturn;
14627+ restorer = (void __force_user *)(current->mm->context.vdso +
14628+ selected_vdso32->sym___kernel_sigreturn);
14629 else
14630- restorer = &frame->retcode;
14631+ restorer = frame->retcode;
14632 }
14633
14634 put_user_try {
14635@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14636 * These are actually not used anymore, but left because some
14637 * gdb versions depend on them as a marker.
14638 */
14639- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14640+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14641 } put_user_catch(err);
14642
14643 if (err)
14644@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14645 0xb8,
14646 __NR_ia32_rt_sigreturn,
14647 0x80cd,
14648- 0,
14649+ 0
14650 };
14651
14652 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14653@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14654
14655 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14656 restorer = ksig->ka.sa.sa_restorer;
14657+ else if (current->mm->context.vdso)
14658+ /* Return stub is in 32bit vsyscall page */
14659+ restorer = (void __force_user *)(current->mm->context.vdso +
14660+ selected_vdso32->sym___kernel_rt_sigreturn);
14661 else
14662- restorer = current->mm->context.vdso +
14663- selected_vdso32->sym___kernel_rt_sigreturn;
14664+ restorer = frame->retcode;
14665 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14666
14667 /*
14668 * Not actually used anymore, but left because some gdb
14669 * versions need it.
14670 */
14671- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14672+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14673 } put_user_catch(err);
14674
14675 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14676diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14677index 82e8a1d..4e998d5 100644
14678--- a/arch/x86/ia32/ia32entry.S
14679+++ b/arch/x86/ia32/ia32entry.S
14680@@ -15,8 +15,10 @@
14681 #include <asm/irqflags.h>
14682 #include <asm/asm.h>
14683 #include <asm/smap.h>
14684+#include <asm/pgtable.h>
14685 #include <linux/linkage.h>
14686 #include <linux/err.h>
14687+#include <asm/alternative-asm.h>
14688
14689 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14690 #include <linux/elf-em.h>
14691@@ -62,12 +64,12 @@
14692 */
14693 .macro LOAD_ARGS32 offset, _r9=0
14694 .if \_r9
14695- movl \offset+16(%rsp),%r9d
14696+ movl \offset+R9(%rsp),%r9d
14697 .endif
14698- movl \offset+40(%rsp),%ecx
14699- movl \offset+48(%rsp),%edx
14700- movl \offset+56(%rsp),%esi
14701- movl \offset+64(%rsp),%edi
14702+ movl \offset+RCX(%rsp),%ecx
14703+ movl \offset+RDX(%rsp),%edx
14704+ movl \offset+RSI(%rsp),%esi
14705+ movl \offset+RDI(%rsp),%edi
14706 movl %eax,%eax /* zero extension */
14707 .endm
14708
14709@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14710 ENDPROC(native_irq_enable_sysexit)
14711 #endif
14712
14713+ .macro pax_enter_kernel_user
14714+ pax_set_fptr_mask
14715+#ifdef CONFIG_PAX_MEMORY_UDEREF
14716+ call pax_enter_kernel_user
14717+#endif
14718+ .endm
14719+
14720+ .macro pax_exit_kernel_user
14721+#ifdef CONFIG_PAX_MEMORY_UDEREF
14722+ call pax_exit_kernel_user
14723+#endif
14724+#ifdef CONFIG_PAX_RANDKSTACK
14725+ pushq %rax
14726+ pushq %r11
14727+ call pax_randomize_kstack
14728+ popq %r11
14729+ popq %rax
14730+#endif
14731+ .endm
14732+
14733+ .macro pax_erase_kstack
14734+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14735+ call pax_erase_kstack
14736+#endif
14737+ .endm
14738+
14739 /*
14740 * 32bit SYSENTER instruction entry.
14741 *
14742@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14743 CFI_REGISTER rsp,rbp
14744 SWAPGS_UNSAFE_STACK
14745 movq PER_CPU_VAR(kernel_stack), %rsp
14746- addq $(KERNEL_STACK_OFFSET),%rsp
14747- /*
14748- * No need to follow this irqs on/off section: the syscall
14749- * disabled irqs, here we enable it straight after entry:
14750- */
14751- ENABLE_INTERRUPTS(CLBR_NONE)
14752 movl %ebp,%ebp /* zero extension */
14753 pushq_cfi $__USER32_DS
14754 /*CFI_REL_OFFSET ss,0*/
14755@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14756 CFI_REL_OFFSET rsp,0
14757 pushfq_cfi
14758 /*CFI_REL_OFFSET rflags,0*/
14759- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14760- CFI_REGISTER rip,r10
14761+ orl $X86_EFLAGS_IF,(%rsp)
14762+ GET_THREAD_INFO(%r11)
14763+ movl TI_sysenter_return(%r11), %r11d
14764+ CFI_REGISTER rip,r11
14765 pushq_cfi $__USER32_CS
14766 /*CFI_REL_OFFSET cs,0*/
14767 movl %eax, %eax
14768- pushq_cfi %r10
14769+ pushq_cfi %r11
14770 CFI_REL_OFFSET rip,0
14771 pushq_cfi %rax
14772 cld
14773 SAVE_ARGS 0,1,0
14774+ pax_enter_kernel_user
14775+
14776+#ifdef CONFIG_PAX_RANDKSTACK
14777+ pax_erase_kstack
14778+#endif
14779+
14780+ /*
14781+ * No need to follow this irqs on/off section: the syscall
14782+ * disabled irqs, here we enable it straight after entry:
14783+ */
14784+ ENABLE_INTERRUPTS(CLBR_NONE)
14785 /* no need to do an access_ok check here because rbp has been
14786 32bit zero extended */
14787+
14788+#ifdef CONFIG_PAX_MEMORY_UDEREF
14789+ addq pax_user_shadow_base,%rbp
14790+ ASM_PAX_OPEN_USERLAND
14791+#endif
14792+
14793 ASM_STAC
14794 1: movl (%rbp),%ebp
14795 _ASM_EXTABLE(1b,ia32_badarg)
14796 ASM_CLAC
14797
14798+#ifdef CONFIG_PAX_MEMORY_UDEREF
14799+ ASM_PAX_CLOSE_USERLAND
14800+#endif
14801+
14802 /*
14803 * Sysenter doesn't filter flags, so we need to clear NT
14804 * ourselves. To save a few cycles, we can check whether
14805@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14806 jnz sysenter_fix_flags
14807 sysenter_flags_fixed:
14808
14809- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14810- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14811+ GET_THREAD_INFO(%r11)
14812+ orl $TS_COMPAT,TI_status(%r11)
14813+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14814 CFI_REMEMBER_STATE
14815 jnz sysenter_tracesys
14816 cmpq $(IA32_NR_syscalls-1),%rax
14817@@ -172,15 +218,18 @@ sysenter_do_call:
14818 sysenter_dispatch:
14819 call *ia32_sys_call_table(,%rax,8)
14820 movq %rax,RAX-ARGOFFSET(%rsp)
14821+ GET_THREAD_INFO(%r11)
14822 DISABLE_INTERRUPTS(CLBR_NONE)
14823 TRACE_IRQS_OFF
14824- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14825+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14826 jnz sysexit_audit
14827 sysexit_from_sys_call:
14828- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14829+ pax_exit_kernel_user
14830+ pax_erase_kstack
14831+ andl $~TS_COMPAT,TI_status(%r11)
14832 /* clear IF, that popfq doesn't enable interrupts early */
14833- andl $~0x200,EFLAGS-R11(%rsp)
14834- movl RIP-R11(%rsp),%edx /* User %eip */
14835+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14836+ movl RIP(%rsp),%edx /* User %eip */
14837 CFI_REGISTER rip,rdx
14838 RESTORE_ARGS 0,24,0,0,0,0
14839 xorq %r8,%r8
14840@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14841 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14842 movl %eax,%edi /* 1st arg: syscall number */
14843 call __audit_syscall_entry
14844+
14845+ pax_erase_kstack
14846+
14847 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14848 cmpq $(IA32_NR_syscalls-1),%rax
14849 ja ia32_badsys
14850@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14851 .endm
14852
14853 .macro auditsys_exit exit
14854- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14855+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14856 jnz ia32_ret_from_sys_call
14857 TRACE_IRQS_ON
14858 ENABLE_INTERRUPTS(CLBR_NONE)
14859@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14860 1: setbe %al /* 1 if error, 0 if not */
14861 movzbl %al,%edi /* zero-extend that into %edi */
14862 call __audit_syscall_exit
14863+ GET_THREAD_INFO(%r11)
14864 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14865 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14866 DISABLE_INTERRUPTS(CLBR_NONE)
14867 TRACE_IRQS_OFF
14868- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14869+ testl %edi,TI_flags(%r11)
14870 jz \exit
14871 CLEAR_RREGS -ARGOFFSET
14872 jmp int_with_check
14873@@ -253,7 +306,7 @@ sysenter_fix_flags:
14874
14875 sysenter_tracesys:
14876 #ifdef CONFIG_AUDITSYSCALL
14877- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14878+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14879 jz sysenter_auditsys
14880 #endif
14881 SAVE_REST
14882@@ -265,6 +318,9 @@ sysenter_tracesys:
14883 RESTORE_REST
14884 cmpq $(IA32_NR_syscalls-1),%rax
14885 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14886+
14887+ pax_erase_kstack
14888+
14889 jmp sysenter_do_call
14890 CFI_ENDPROC
14891 ENDPROC(ia32_sysenter_target)
14892@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14893 ENTRY(ia32_cstar_target)
14894 CFI_STARTPROC32 simple
14895 CFI_SIGNAL_FRAME
14896- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14897+ CFI_DEF_CFA rsp,0
14898 CFI_REGISTER rip,rcx
14899 /*CFI_REGISTER rflags,r11*/
14900 SWAPGS_UNSAFE_STACK
14901 movl %esp,%r8d
14902 CFI_REGISTER rsp,r8
14903 movq PER_CPU_VAR(kernel_stack),%rsp
14904+ SAVE_ARGS 8*6,0,0
14905+ pax_enter_kernel_user
14906+
14907+#ifdef CONFIG_PAX_RANDKSTACK
14908+ pax_erase_kstack
14909+#endif
14910+
14911 /*
14912 * No need to follow this irqs on/off section: the syscall
14913 * disabled irqs and here we enable it straight after entry:
14914 */
14915 ENABLE_INTERRUPTS(CLBR_NONE)
14916- SAVE_ARGS 8,0,0
14917 movl %eax,%eax /* zero extension */
14918 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14919 movq %rcx,RIP-ARGOFFSET(%rsp)
14920@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14921 /* no need to do an access_ok check here because r8 has been
14922 32bit zero extended */
14923 /* hardware stack frame is complete now */
14924+
14925+#ifdef CONFIG_PAX_MEMORY_UDEREF
14926+ ASM_PAX_OPEN_USERLAND
14927+ movq pax_user_shadow_base,%r8
14928+ addq RSP-ARGOFFSET(%rsp),%r8
14929+#endif
14930+
14931 ASM_STAC
14932 1: movl (%r8),%r9d
14933 _ASM_EXTABLE(1b,ia32_badarg)
14934 ASM_CLAC
14935- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14936- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14937+
14938+#ifdef CONFIG_PAX_MEMORY_UDEREF
14939+ ASM_PAX_CLOSE_USERLAND
14940+#endif
14941+
14942+ GET_THREAD_INFO(%r11)
14943+ orl $TS_COMPAT,TI_status(%r11)
14944+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14945 CFI_REMEMBER_STATE
14946 jnz cstar_tracesys
14947 cmpq $IA32_NR_syscalls-1,%rax
14948@@ -335,13 +410,16 @@ cstar_do_call:
14949 cstar_dispatch:
14950 call *ia32_sys_call_table(,%rax,8)
14951 movq %rax,RAX-ARGOFFSET(%rsp)
14952+ GET_THREAD_INFO(%r11)
14953 DISABLE_INTERRUPTS(CLBR_NONE)
14954 TRACE_IRQS_OFF
14955- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14956+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14957 jnz sysretl_audit
14958 sysretl_from_sys_call:
14959- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14960- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14961+ pax_exit_kernel_user
14962+ pax_erase_kstack
14963+ andl $~TS_COMPAT,TI_status(%r11)
14964+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14965 movl RIP-ARGOFFSET(%rsp),%ecx
14966 CFI_REGISTER rip,rcx
14967 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14968@@ -368,7 +446,7 @@ sysretl_audit:
14969
14970 cstar_tracesys:
14971 #ifdef CONFIG_AUDITSYSCALL
14972- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14973+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14974 jz cstar_auditsys
14975 #endif
14976 xchgl %r9d,%ebp
14977@@ -382,11 +460,19 @@ cstar_tracesys:
14978 xchgl %ebp,%r9d
14979 cmpq $(IA32_NR_syscalls-1),%rax
14980 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14981+
14982+ pax_erase_kstack
14983+
14984 jmp cstar_do_call
14985 END(ia32_cstar_target)
14986
14987 ia32_badarg:
14988 ASM_CLAC
14989+
14990+#ifdef CONFIG_PAX_MEMORY_UDEREF
14991+ ASM_PAX_CLOSE_USERLAND
14992+#endif
14993+
14994 movq $-EFAULT,%rax
14995 jmp ia32_sysret
14996 CFI_ENDPROC
14997@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14998 CFI_REL_OFFSET rip,RIP-RIP
14999 PARAVIRT_ADJUST_EXCEPTION_FRAME
15000 SWAPGS
15001- /*
15002- * No need to follow this irqs on/off section: the syscall
15003- * disabled irqs and here we enable it straight after entry:
15004- */
15005- ENABLE_INTERRUPTS(CLBR_NONE)
15006 movl %eax,%eax
15007 pushq_cfi %rax
15008 cld
15009 /* note the registers are not zero extended to the sf.
15010 this could be a problem. */
15011 SAVE_ARGS 0,1,0
15012- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15013- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15014+ pax_enter_kernel_user
15015+
15016+#ifdef CONFIG_PAX_RANDKSTACK
15017+ pax_erase_kstack
15018+#endif
15019+
15020+ /*
15021+ * No need to follow this irqs on/off section: the syscall
15022+ * disabled irqs and here we enable it straight after entry:
15023+ */
15024+ ENABLE_INTERRUPTS(CLBR_NONE)
15025+ GET_THREAD_INFO(%r11)
15026+ orl $TS_COMPAT,TI_status(%r11)
15027+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15028 jnz ia32_tracesys
15029 cmpq $(IA32_NR_syscalls-1),%rax
15030 ja ia32_badsys
15031@@ -458,6 +551,9 @@ ia32_tracesys:
15032 RESTORE_REST
15033 cmpq $(IA32_NR_syscalls-1),%rax
15034 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15035+
15036+ pax_erase_kstack
15037+
15038 jmp ia32_do_call
15039 END(ia32_syscall)
15040
15041diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15042index 8e0ceec..af13504 100644
15043--- a/arch/x86/ia32/sys_ia32.c
15044+++ b/arch/x86/ia32/sys_ia32.c
15045@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15046 */
15047 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15048 {
15049- typeof(ubuf->st_uid) uid = 0;
15050- typeof(ubuf->st_gid) gid = 0;
15051+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15052+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15053 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15054 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15055 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15056diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15057index 372231c..51b537d 100644
15058--- a/arch/x86/include/asm/alternative-asm.h
15059+++ b/arch/x86/include/asm/alternative-asm.h
15060@@ -18,6 +18,45 @@
15061 .endm
15062 #endif
15063
15064+#ifdef KERNEXEC_PLUGIN
15065+ .macro pax_force_retaddr_bts rip=0
15066+ btsq $63,\rip(%rsp)
15067+ .endm
15068+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15069+ .macro pax_force_retaddr rip=0, reload=0
15070+ btsq $63,\rip(%rsp)
15071+ .endm
15072+ .macro pax_force_fptr ptr
15073+ btsq $63,\ptr
15074+ .endm
15075+ .macro pax_set_fptr_mask
15076+ .endm
15077+#endif
15078+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15079+ .macro pax_force_retaddr rip=0, reload=0
15080+ .if \reload
15081+ pax_set_fptr_mask
15082+ .endif
15083+ orq %r12,\rip(%rsp)
15084+ .endm
15085+ .macro pax_force_fptr ptr
15086+ orq %r12,\ptr
15087+ .endm
15088+ .macro pax_set_fptr_mask
15089+ movabs $0x8000000000000000,%r12
15090+ .endm
15091+#endif
15092+#else
15093+ .macro pax_force_retaddr rip=0, reload=0
15094+ .endm
15095+ .macro pax_force_fptr ptr
15096+ .endm
15097+ .macro pax_force_retaddr_bts rip=0
15098+ .endm
15099+ .macro pax_set_fptr_mask
15100+ .endm
15101+#endif
15102+
15103 .macro altinstruction_entry orig alt feature orig_len alt_len
15104 .long \orig - .
15105 .long \alt - .
15106diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15107index 473bdbe..b1e3377 100644
15108--- a/arch/x86/include/asm/alternative.h
15109+++ b/arch/x86/include/asm/alternative.h
15110@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15111 ".pushsection .discard,\"aw\",@progbits\n" \
15112 DISCARD_ENTRY(1) \
15113 ".popsection\n" \
15114- ".pushsection .altinstr_replacement, \"ax\"\n" \
15115+ ".pushsection .altinstr_replacement, \"a\"\n" \
15116 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15117 ".popsection"
15118
15119@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15120 DISCARD_ENTRY(1) \
15121 DISCARD_ENTRY(2) \
15122 ".popsection\n" \
15123- ".pushsection .altinstr_replacement, \"ax\"\n" \
15124+ ".pushsection .altinstr_replacement, \"a\"\n" \
15125 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15126 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15127 ".popsection"
15128diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15129index 465b309..ab7e51f 100644
15130--- a/arch/x86/include/asm/apic.h
15131+++ b/arch/x86/include/asm/apic.h
15132@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15133
15134 #ifdef CONFIG_X86_LOCAL_APIC
15135
15136-extern unsigned int apic_verbosity;
15137+extern int apic_verbosity;
15138 extern int local_apic_timer_c2_ok;
15139
15140 extern int disable_apic;
15141diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15142index 20370c6..a2eb9b0 100644
15143--- a/arch/x86/include/asm/apm.h
15144+++ b/arch/x86/include/asm/apm.h
15145@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15146 __asm__ __volatile__(APM_DO_ZERO_SEGS
15147 "pushl %%edi\n\t"
15148 "pushl %%ebp\n\t"
15149- "lcall *%%cs:apm_bios_entry\n\t"
15150+ "lcall *%%ss:apm_bios_entry\n\t"
15151 "setc %%al\n\t"
15152 "popl %%ebp\n\t"
15153 "popl %%edi\n\t"
15154@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15155 __asm__ __volatile__(APM_DO_ZERO_SEGS
15156 "pushl %%edi\n\t"
15157 "pushl %%ebp\n\t"
15158- "lcall *%%cs:apm_bios_entry\n\t"
15159+ "lcall *%%ss:apm_bios_entry\n\t"
15160 "setc %%bl\n\t"
15161 "popl %%ebp\n\t"
15162 "popl %%edi\n\t"
15163diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15164index 5e5cd12..51cdc93 100644
15165--- a/arch/x86/include/asm/atomic.h
15166+++ b/arch/x86/include/asm/atomic.h
15167@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15168 }
15169
15170 /**
15171+ * atomic_read_unchecked - read atomic variable
15172+ * @v: pointer of type atomic_unchecked_t
15173+ *
15174+ * Atomically reads the value of @v.
15175+ */
15176+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15177+{
15178+ return ACCESS_ONCE((v)->counter);
15179+}
15180+
15181+/**
15182 * atomic_set - set atomic variable
15183 * @v: pointer of type atomic_t
15184 * @i: required value
15185@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15186 }
15187
15188 /**
15189+ * atomic_set_unchecked - set atomic variable
15190+ * @v: pointer of type atomic_unchecked_t
15191+ * @i: required value
15192+ *
15193+ * Atomically sets the value of @v to @i.
15194+ */
15195+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15196+{
15197+ v->counter = i;
15198+}
15199+
15200+/**
15201 * atomic_add - add integer to atomic variable
15202 * @i: integer value to add
15203 * @v: pointer of type atomic_t
15204@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15205 */
15206 static inline void atomic_add(int i, atomic_t *v)
15207 {
15208- asm volatile(LOCK_PREFIX "addl %1,%0"
15209+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15210+
15211+#ifdef CONFIG_PAX_REFCOUNT
15212+ "jno 0f\n"
15213+ LOCK_PREFIX "subl %1,%0\n"
15214+ "int $4\n0:\n"
15215+ _ASM_EXTABLE(0b, 0b)
15216+#endif
15217+
15218+ : "+m" (v->counter)
15219+ : "ir" (i));
15220+}
15221+
15222+/**
15223+ * atomic_add_unchecked - add integer to atomic variable
15224+ * @i: integer value to add
15225+ * @v: pointer of type atomic_unchecked_t
15226+ *
15227+ * Atomically adds @i to @v.
15228+ */
15229+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15230+{
15231+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15232 : "+m" (v->counter)
15233 : "ir" (i));
15234 }
15235@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15236 */
15237 static inline void atomic_sub(int i, atomic_t *v)
15238 {
15239- asm volatile(LOCK_PREFIX "subl %1,%0"
15240+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15241+
15242+#ifdef CONFIG_PAX_REFCOUNT
15243+ "jno 0f\n"
15244+ LOCK_PREFIX "addl %1,%0\n"
15245+ "int $4\n0:\n"
15246+ _ASM_EXTABLE(0b, 0b)
15247+#endif
15248+
15249+ : "+m" (v->counter)
15250+ : "ir" (i));
15251+}
15252+
15253+/**
15254+ * atomic_sub_unchecked - subtract integer from atomic variable
15255+ * @i: integer value to subtract
15256+ * @v: pointer of type atomic_unchecked_t
15257+ *
15258+ * Atomically subtracts @i from @v.
15259+ */
15260+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15261+{
15262+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15263 : "+m" (v->counter)
15264 : "ir" (i));
15265 }
15266@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15267 */
15268 static inline int atomic_sub_and_test(int i, atomic_t *v)
15269 {
15270- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15271+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15272 }
15273
15274 /**
15275@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15276 */
15277 static inline void atomic_inc(atomic_t *v)
15278 {
15279- asm volatile(LOCK_PREFIX "incl %0"
15280+ asm volatile(LOCK_PREFIX "incl %0\n"
15281+
15282+#ifdef CONFIG_PAX_REFCOUNT
15283+ "jno 0f\n"
15284+ LOCK_PREFIX "decl %0\n"
15285+ "int $4\n0:\n"
15286+ _ASM_EXTABLE(0b, 0b)
15287+#endif
15288+
15289+ : "+m" (v->counter));
15290+}
15291+
15292+/**
15293+ * atomic_inc_unchecked - increment atomic variable
15294+ * @v: pointer of type atomic_unchecked_t
15295+ *
15296+ * Atomically increments @v by 1.
15297+ */
15298+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15299+{
15300+ asm volatile(LOCK_PREFIX "incl %0\n"
15301 : "+m" (v->counter));
15302 }
15303
15304@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15305 */
15306 static inline void atomic_dec(atomic_t *v)
15307 {
15308- asm volatile(LOCK_PREFIX "decl %0"
15309+ asm volatile(LOCK_PREFIX "decl %0\n"
15310+
15311+#ifdef CONFIG_PAX_REFCOUNT
15312+ "jno 0f\n"
15313+ LOCK_PREFIX "incl %0\n"
15314+ "int $4\n0:\n"
15315+ _ASM_EXTABLE(0b, 0b)
15316+#endif
15317+
15318+ : "+m" (v->counter));
15319+}
15320+
15321+/**
15322+ * atomic_dec_unchecked - decrement atomic variable
15323+ * @v: pointer of type atomic_unchecked_t
15324+ *
15325+ * Atomically decrements @v by 1.
15326+ */
15327+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15328+{
15329+ asm volatile(LOCK_PREFIX "decl %0\n"
15330 : "+m" (v->counter));
15331 }
15332
15333@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15334 */
15335 static inline int atomic_dec_and_test(atomic_t *v)
15336 {
15337- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15338+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15339 }
15340
15341 /**
15342@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15343 */
15344 static inline int atomic_inc_and_test(atomic_t *v)
15345 {
15346- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15347+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15348+}
15349+
15350+/**
15351+ * atomic_inc_and_test_unchecked - increment and test
15352+ * @v: pointer of type atomic_unchecked_t
15353+ *
15354+ * Atomically increments @v by 1
15355+ * and returns true if the result is zero, or false for all
15356+ * other cases.
15357+ */
15358+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15359+{
15360+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15361 }
15362
15363 /**
15364@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15365 */
15366 static inline int atomic_add_negative(int i, atomic_t *v)
15367 {
15368- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15369+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15370 }
15371
15372 /**
15373@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15374 *
15375 * Atomically adds @i to @v and returns @i + @v
15376 */
15377-static inline int atomic_add_return(int i, atomic_t *v)
15378+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15379+{
15380+ return i + xadd_check_overflow(&v->counter, i);
15381+}
15382+
15383+/**
15384+ * atomic_add_return_unchecked - add integer and return
15385+ * @i: integer value to add
15386+ * @v: pointer of type atomic_unchecked_t
15387+ *
15388+ * Atomically adds @i to @v and returns @i + @v
15389+ */
15390+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15391 {
15392 return i + xadd(&v->counter, i);
15393 }
15394@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15395 *
15396 * Atomically subtracts @i from @v and returns @v - @i
15397 */
15398-static inline int atomic_sub_return(int i, atomic_t *v)
15399+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15400 {
15401 return atomic_add_return(-i, v);
15402 }
15403
15404 #define atomic_inc_return(v) (atomic_add_return(1, v))
15405+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15406+{
15407+ return atomic_add_return_unchecked(1, v);
15408+}
15409 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15410
15411-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15412+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15413+{
15414+ return cmpxchg(&v->counter, old, new);
15415+}
15416+
15417+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15418 {
15419 return cmpxchg(&v->counter, old, new);
15420 }
15421@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15422 return xchg(&v->counter, new);
15423 }
15424
15425+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15426+{
15427+ return xchg(&v->counter, new);
15428+}
15429+
15430 /**
15431 * __atomic_add_unless - add unless the number is already a given value
15432 * @v: pointer of type atomic_t
15433@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15434 */
15435 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15436 {
15437- int c, old;
15438+ int c, old, new;
15439 c = atomic_read(v);
15440 for (;;) {
15441- if (unlikely(c == (u)))
15442+ if (unlikely(c == u))
15443 break;
15444- old = atomic_cmpxchg((v), c, c + (a));
15445+
15446+ asm volatile("addl %2,%0\n"
15447+
15448+#ifdef CONFIG_PAX_REFCOUNT
15449+ "jno 0f\n"
15450+ "subl %2,%0\n"
15451+ "int $4\n0:\n"
15452+ _ASM_EXTABLE(0b, 0b)
15453+#endif
15454+
15455+ : "=r" (new)
15456+ : "0" (c), "ir" (a));
15457+
15458+ old = atomic_cmpxchg(v, c, new);
15459 if (likely(old == c))
15460 break;
15461 c = old;
15462@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15463 }
15464
15465 /**
15466+ * atomic_inc_not_zero_hint - increment if not null
15467+ * @v: pointer of type atomic_t
15468+ * @hint: probable value of the atomic before the increment
15469+ *
15470+ * This version of atomic_inc_not_zero() gives a hint of probable
15471+ * value of the atomic. This helps processor to not read the memory
15472+ * before doing the atomic read/modify/write cycle, lowering
15473+ * number of bus transactions on some arches.
15474+ *
15475+ * Returns: 0 if increment was not done, 1 otherwise.
15476+ */
15477+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15478+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15479+{
15480+ int val, c = hint, new;
15481+
15482+ /* sanity test, should be removed by compiler if hint is a constant */
15483+ if (!hint)
15484+ return __atomic_add_unless(v, 1, 0);
15485+
15486+ do {
15487+ asm volatile("incl %0\n"
15488+
15489+#ifdef CONFIG_PAX_REFCOUNT
15490+ "jno 0f\n"
15491+ "decl %0\n"
15492+ "int $4\n0:\n"
15493+ _ASM_EXTABLE(0b, 0b)
15494+#endif
15495+
15496+ : "=r" (new)
15497+ : "0" (c));
15498+
15499+ val = atomic_cmpxchg(v, c, new);
15500+ if (val == c)
15501+ return 1;
15502+ c = val;
15503+ } while (c);
15504+
15505+ return 0;
15506+}
15507+
15508+/**
15509 * atomic_inc_short - increment of a short integer
15510 * @v: pointer to type int
15511 *
15512@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15513 }
15514
15515 /* These are x86-specific, used by some header files */
15516-#define atomic_clear_mask(mask, addr) \
15517- asm volatile(LOCK_PREFIX "andl %0,%1" \
15518- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15519+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15520+{
15521+ asm volatile(LOCK_PREFIX "andl %1,%0"
15522+ : "+m" (v->counter)
15523+ : "r" (~(mask))
15524+ : "memory");
15525+}
15526
15527-#define atomic_set_mask(mask, addr) \
15528- asm volatile(LOCK_PREFIX "orl %0,%1" \
15529- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15530- : "memory")
15531+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15532+{
15533+ asm volatile(LOCK_PREFIX "andl %1,%0"
15534+ : "+m" (v->counter)
15535+ : "r" (~(mask))
15536+ : "memory");
15537+}
15538+
15539+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15540+{
15541+ asm volatile(LOCK_PREFIX "orl %1,%0"
15542+ : "+m" (v->counter)
15543+ : "r" (mask)
15544+ : "memory");
15545+}
15546+
15547+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15548+{
15549+ asm volatile(LOCK_PREFIX "orl %1,%0"
15550+ : "+m" (v->counter)
15551+ : "r" (mask)
15552+ : "memory");
15553+}
15554
15555 #ifdef CONFIG_X86_32
15556 # include <asm/atomic64_32.h>
15557diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15558index b154de7..bf18a5a 100644
15559--- a/arch/x86/include/asm/atomic64_32.h
15560+++ b/arch/x86/include/asm/atomic64_32.h
15561@@ -12,6 +12,14 @@ typedef struct {
15562 u64 __aligned(8) counter;
15563 } atomic64_t;
15564
15565+#ifdef CONFIG_PAX_REFCOUNT
15566+typedef struct {
15567+ u64 __aligned(8) counter;
15568+} atomic64_unchecked_t;
15569+#else
15570+typedef atomic64_t atomic64_unchecked_t;
15571+#endif
15572+
15573 #define ATOMIC64_INIT(val) { (val) }
15574
15575 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15576@@ -37,21 +45,31 @@ typedef struct {
15577 ATOMIC64_DECL_ONE(sym##_386)
15578
15579 ATOMIC64_DECL_ONE(add_386);
15580+ATOMIC64_DECL_ONE(add_unchecked_386);
15581 ATOMIC64_DECL_ONE(sub_386);
15582+ATOMIC64_DECL_ONE(sub_unchecked_386);
15583 ATOMIC64_DECL_ONE(inc_386);
15584+ATOMIC64_DECL_ONE(inc_unchecked_386);
15585 ATOMIC64_DECL_ONE(dec_386);
15586+ATOMIC64_DECL_ONE(dec_unchecked_386);
15587 #endif
15588
15589 #define alternative_atomic64(f, out, in...) \
15590 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15591
15592 ATOMIC64_DECL(read);
15593+ATOMIC64_DECL(read_unchecked);
15594 ATOMIC64_DECL(set);
15595+ATOMIC64_DECL(set_unchecked);
15596 ATOMIC64_DECL(xchg);
15597 ATOMIC64_DECL(add_return);
15598+ATOMIC64_DECL(add_return_unchecked);
15599 ATOMIC64_DECL(sub_return);
15600+ATOMIC64_DECL(sub_return_unchecked);
15601 ATOMIC64_DECL(inc_return);
15602+ATOMIC64_DECL(inc_return_unchecked);
15603 ATOMIC64_DECL(dec_return);
15604+ATOMIC64_DECL(dec_return_unchecked);
15605 ATOMIC64_DECL(dec_if_positive);
15606 ATOMIC64_DECL(inc_not_zero);
15607 ATOMIC64_DECL(add_unless);
15608@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15609 }
15610
15611 /**
15612+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15613+ * @p: pointer to type atomic64_unchecked_t
15614+ * @o: expected value
15615+ * @n: new value
15616+ *
15617+ * Atomically sets @v to @n if it was equal to @o and returns
15618+ * the old value.
15619+ */
15620+
15621+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15622+{
15623+ return cmpxchg64(&v->counter, o, n);
15624+}
15625+
15626+/**
15627 * atomic64_xchg - xchg atomic64 variable
15628 * @v: pointer to type atomic64_t
15629 * @n: value to assign
15630@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15631 }
15632
15633 /**
15634+ * atomic64_set_unchecked - set atomic64 variable
15635+ * @v: pointer to type atomic64_unchecked_t
15636+ * @n: value to assign
15637+ *
15638+ * Atomically sets the value of @v to @n.
15639+ */
15640+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15641+{
15642+ unsigned high = (unsigned)(i >> 32);
15643+ unsigned low = (unsigned)i;
15644+ alternative_atomic64(set, /* no output */,
15645+ "S" (v), "b" (low), "c" (high)
15646+ : "eax", "edx", "memory");
15647+}
15648+
15649+/**
15650 * atomic64_read - read atomic64 variable
15651 * @v: pointer to type atomic64_t
15652 *
15653@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15654 }
15655
15656 /**
15657+ * atomic64_read_unchecked - read atomic64 variable
15658+ * @v: pointer to type atomic64_unchecked_t
15659+ *
15660+ * Atomically reads the value of @v and returns it.
15661+ */
15662+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15663+{
15664+ long long r;
15665+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15666+ return r;
15667+ }
15668+
15669+/**
15670 * atomic64_add_return - add and return
15671 * @i: integer value to add
15672 * @v: pointer to type atomic64_t
15673@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15674 return i;
15675 }
15676
15677+/**
15678+ * atomic64_add_return_unchecked - add and return
15679+ * @i: integer value to add
15680+ * @v: pointer to type atomic64_unchecked_t
15681+ *
15682+ * Atomically adds @i to @v and returns @i + *@v
15683+ */
15684+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15685+{
15686+ alternative_atomic64(add_return_unchecked,
15687+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15688+ ASM_NO_INPUT_CLOBBER("memory"));
15689+ return i;
15690+}
15691+
15692 /*
15693 * Other variants with different arithmetic operators:
15694 */
15695@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15696 return a;
15697 }
15698
15699+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15700+{
15701+ long long a;
15702+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15703+ "S" (v) : "memory", "ecx");
15704+ return a;
15705+}
15706+
15707 static inline long long atomic64_dec_return(atomic64_t *v)
15708 {
15709 long long a;
15710@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15711 }
15712
15713 /**
15714+ * atomic64_add_unchecked - add integer to atomic64 variable
15715+ * @i: integer value to add
15716+ * @v: pointer to type atomic64_unchecked_t
15717+ *
15718+ * Atomically adds @i to @v.
15719+ */
15720+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15721+{
15722+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15723+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15724+ ASM_NO_INPUT_CLOBBER("memory"));
15725+ return i;
15726+}
15727+
15728+/**
15729 * atomic64_sub - subtract the atomic64 variable
15730 * @i: integer value to subtract
15731 * @v: pointer to type atomic64_t
15732diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15733index f8d273e..02f39f3 100644
15734--- a/arch/x86/include/asm/atomic64_64.h
15735+++ b/arch/x86/include/asm/atomic64_64.h
15736@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15737 }
15738
15739 /**
15740+ * atomic64_read_unchecked - read atomic64 variable
15741+ * @v: pointer of type atomic64_unchecked_t
15742+ *
15743+ * Atomically reads the value of @v.
15744+ * Doesn't imply a read memory barrier.
15745+ */
15746+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15747+{
15748+ return ACCESS_ONCE((v)->counter);
15749+}
15750+
15751+/**
15752 * atomic64_set - set atomic64 variable
15753 * @v: pointer to type atomic64_t
15754 * @i: required value
15755@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15756 }
15757
15758 /**
15759+ * atomic64_set_unchecked - set atomic64 variable
15760+ * @v: pointer to type atomic64_unchecked_t
15761+ * @i: required value
15762+ *
15763+ * Atomically sets the value of @v to @i.
15764+ */
15765+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15766+{
15767+ v->counter = i;
15768+}
15769+
15770+/**
15771 * atomic64_add - add integer to atomic64 variable
15772 * @i: integer value to add
15773 * @v: pointer to type atomic64_t
15774@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15775 */
15776 static inline void atomic64_add(long i, atomic64_t *v)
15777 {
15778+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15779+
15780+#ifdef CONFIG_PAX_REFCOUNT
15781+ "jno 0f\n"
15782+ LOCK_PREFIX "subq %1,%0\n"
15783+ "int $4\n0:\n"
15784+ _ASM_EXTABLE(0b, 0b)
15785+#endif
15786+
15787+ : "=m" (v->counter)
15788+ : "er" (i), "m" (v->counter));
15789+}
15790+
15791+/**
15792+ * atomic64_add_unchecked - add integer to atomic64 variable
15793+ * @i: integer value to add
15794+ * @v: pointer to type atomic64_unchecked_t
15795+ *
15796+ * Atomically adds @i to @v.
15797+ */
15798+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15799+{
15800 asm volatile(LOCK_PREFIX "addq %1,%0"
15801 : "=m" (v->counter)
15802 : "er" (i), "m" (v->counter));
15803@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15804 */
15805 static inline void atomic64_sub(long i, atomic64_t *v)
15806 {
15807- asm volatile(LOCK_PREFIX "subq %1,%0"
15808+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15809+
15810+#ifdef CONFIG_PAX_REFCOUNT
15811+ "jno 0f\n"
15812+ LOCK_PREFIX "addq %1,%0\n"
15813+ "int $4\n0:\n"
15814+ _ASM_EXTABLE(0b, 0b)
15815+#endif
15816+
15817+ : "=m" (v->counter)
15818+ : "er" (i), "m" (v->counter));
15819+}
15820+
15821+/**
15822+ * atomic64_sub_unchecked - subtract the atomic64 variable
15823+ * @i: integer value to subtract
15824+ * @v: pointer to type atomic64_unchecked_t
15825+ *
15826+ * Atomically subtracts @i from @v.
15827+ */
15828+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15829+{
15830+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15831 : "=m" (v->counter)
15832 : "er" (i), "m" (v->counter));
15833 }
15834@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15835 */
15836 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15837 {
15838- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15839+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15840 }
15841
15842 /**
15843@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15844 */
15845 static inline void atomic64_inc(atomic64_t *v)
15846 {
15847+ asm volatile(LOCK_PREFIX "incq %0\n"
15848+
15849+#ifdef CONFIG_PAX_REFCOUNT
15850+ "jno 0f\n"
15851+ LOCK_PREFIX "decq %0\n"
15852+ "int $4\n0:\n"
15853+ _ASM_EXTABLE(0b, 0b)
15854+#endif
15855+
15856+ : "=m" (v->counter)
15857+ : "m" (v->counter));
15858+}
15859+
15860+/**
15861+ * atomic64_inc_unchecked - increment atomic64 variable
15862+ * @v: pointer to type atomic64_unchecked_t
15863+ *
15864+ * Atomically increments @v by 1.
15865+ */
15866+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15867+{
15868 asm volatile(LOCK_PREFIX "incq %0"
15869 : "=m" (v->counter)
15870 : "m" (v->counter));
15871@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15872 */
15873 static inline void atomic64_dec(atomic64_t *v)
15874 {
15875- asm volatile(LOCK_PREFIX "decq %0"
15876+ asm volatile(LOCK_PREFIX "decq %0\n"
15877+
15878+#ifdef CONFIG_PAX_REFCOUNT
15879+ "jno 0f\n"
15880+ LOCK_PREFIX "incq %0\n"
15881+ "int $4\n0:\n"
15882+ _ASM_EXTABLE(0b, 0b)
15883+#endif
15884+
15885+ : "=m" (v->counter)
15886+ : "m" (v->counter));
15887+}
15888+
15889+/**
15890+ * atomic64_dec_unchecked - decrement atomic64 variable
15891+ * @v: pointer to type atomic64_t
15892+ *
15893+ * Atomically decrements @v by 1.
15894+ */
15895+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15896+{
15897+ asm volatile(LOCK_PREFIX "decq %0\n"
15898 : "=m" (v->counter)
15899 : "m" (v->counter));
15900 }
15901@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15902 */
15903 static inline int atomic64_dec_and_test(atomic64_t *v)
15904 {
15905- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15906+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15907 }
15908
15909 /**
15910@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15911 */
15912 static inline int atomic64_inc_and_test(atomic64_t *v)
15913 {
15914- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15915+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15916 }
15917
15918 /**
15919@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15920 */
15921 static inline int atomic64_add_negative(long i, atomic64_t *v)
15922 {
15923- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15924+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15925 }
15926
15927 /**
15928@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15929 */
15930 static inline long atomic64_add_return(long i, atomic64_t *v)
15931 {
15932+ return i + xadd_check_overflow(&v->counter, i);
15933+}
15934+
15935+/**
15936+ * atomic64_add_return_unchecked - add and return
15937+ * @i: integer value to add
15938+ * @v: pointer to type atomic64_unchecked_t
15939+ *
15940+ * Atomically adds @i to @v and returns @i + @v
15941+ */
15942+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15943+{
15944 return i + xadd(&v->counter, i);
15945 }
15946
15947@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15948 }
15949
15950 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15951+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15952+{
15953+ return atomic64_add_return_unchecked(1, v);
15954+}
15955 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15956
15957 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15958@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15959 return cmpxchg(&v->counter, old, new);
15960 }
15961
15962+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15963+{
15964+ return cmpxchg(&v->counter, old, new);
15965+}
15966+
15967 static inline long atomic64_xchg(atomic64_t *v, long new)
15968 {
15969 return xchg(&v->counter, new);
15970@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15971 */
15972 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15973 {
15974- long c, old;
15975+ long c, old, new;
15976 c = atomic64_read(v);
15977 for (;;) {
15978- if (unlikely(c == (u)))
15979+ if (unlikely(c == u))
15980 break;
15981- old = atomic64_cmpxchg((v), c, c + (a));
15982+
15983+ asm volatile("add %2,%0\n"
15984+
15985+#ifdef CONFIG_PAX_REFCOUNT
15986+ "jno 0f\n"
15987+ "sub %2,%0\n"
15988+ "int $4\n0:\n"
15989+ _ASM_EXTABLE(0b, 0b)
15990+#endif
15991+
15992+ : "=r" (new)
15993+ : "0" (c), "ir" (a));
15994+
15995+ old = atomic64_cmpxchg(v, c, new);
15996 if (likely(old == c))
15997 break;
15998 c = old;
15999 }
16000- return c != (u);
16001+ return c != u;
16002 }
16003
16004 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16005diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16006index 2ab1eb3..1e8cc5d 100644
16007--- a/arch/x86/include/asm/barrier.h
16008+++ b/arch/x86/include/asm/barrier.h
16009@@ -57,7 +57,7 @@
16010 do { \
16011 compiletime_assert_atomic_type(*p); \
16012 smp_mb(); \
16013- ACCESS_ONCE(*p) = (v); \
16014+ ACCESS_ONCE_RW(*p) = (v); \
16015 } while (0)
16016
16017 #define smp_load_acquire(p) \
16018@@ -74,7 +74,7 @@ do { \
16019 do { \
16020 compiletime_assert_atomic_type(*p); \
16021 barrier(); \
16022- ACCESS_ONCE(*p) = (v); \
16023+ ACCESS_ONCE_RW(*p) = (v); \
16024 } while (0)
16025
16026 #define smp_load_acquire(p) \
16027diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16028index cfe3b95..d01b118 100644
16029--- a/arch/x86/include/asm/bitops.h
16030+++ b/arch/x86/include/asm/bitops.h
16031@@ -50,7 +50,7 @@
16032 * a mask operation on a byte.
16033 */
16034 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16035-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16036+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16037 #define CONST_MASK(nr) (1 << ((nr) & 7))
16038
16039 /**
16040@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16041 */
16042 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16043 {
16044- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16045+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16046 }
16047
16048 /**
16049@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16050 */
16051 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16052 {
16053- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16054+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16055 }
16056
16057 /**
16058@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16059 */
16060 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16061 {
16062- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16063+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16064 }
16065
16066 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16067@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16068 *
16069 * Undefined if no bit exists, so code should check against 0 first.
16070 */
16071-static inline unsigned long __ffs(unsigned long word)
16072+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16073 {
16074 asm("rep; bsf %1,%0"
16075 : "=r" (word)
16076@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16077 *
16078 * Undefined if no zero exists, so code should check against ~0UL first.
16079 */
16080-static inline unsigned long ffz(unsigned long word)
16081+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16082 {
16083 asm("rep; bsf %1,%0"
16084 : "=r" (word)
16085@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16086 *
16087 * Undefined if no set bit exists, so code should check against 0 first.
16088 */
16089-static inline unsigned long __fls(unsigned long word)
16090+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16091 {
16092 asm("bsr %1,%0"
16093 : "=r" (word)
16094@@ -434,7 +434,7 @@ static inline int ffs(int x)
16095 * set bit if value is nonzero. The last (most significant) bit is
16096 * at position 32.
16097 */
16098-static inline int fls(int x)
16099+static inline int __intentional_overflow(-1) fls(int x)
16100 {
16101 int r;
16102
16103@@ -476,7 +476,7 @@ static inline int fls(int x)
16104 * at position 64.
16105 */
16106 #ifdef CONFIG_X86_64
16107-static __always_inline int fls64(__u64 x)
16108+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16109 {
16110 int bitpos = -1;
16111 /*
16112diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16113index 4fa687a..60f2d39 100644
16114--- a/arch/x86/include/asm/boot.h
16115+++ b/arch/x86/include/asm/boot.h
16116@@ -6,10 +6,15 @@
16117 #include <uapi/asm/boot.h>
16118
16119 /* Physical address where kernel should be loaded. */
16120-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16121+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16122 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16123 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16124
16125+#ifndef __ASSEMBLY__
16126+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16127+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16128+#endif
16129+
16130 /* Minimum kernel alignment, as a power of two */
16131 #ifdef CONFIG_X86_64
16132 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16133diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16134index 48f99f1..d78ebf9 100644
16135--- a/arch/x86/include/asm/cache.h
16136+++ b/arch/x86/include/asm/cache.h
16137@@ -5,12 +5,13 @@
16138
16139 /* L1 cache line size */
16140 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16141-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16143
16144 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16145+#define __read_only __attribute__((__section__(".data..read_only")))
16146
16147 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16148-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16149+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16150
16151 #ifdef CONFIG_X86_VSMP
16152 #ifdef CONFIG_SMP
16153diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16154index 76659b6..72b8439 100644
16155--- a/arch/x86/include/asm/calling.h
16156+++ b/arch/x86/include/asm/calling.h
16157@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16158 #define RSP 152
16159 #define SS 160
16160
16161-#define ARGOFFSET R11
16162-#define SWFRAME ORIG_RAX
16163+#define ARGOFFSET R15
16164
16165 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16166- subq $9*8+\addskip, %rsp
16167- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16168- movq_cfi rdi, 8*8
16169- movq_cfi rsi, 7*8
16170- movq_cfi rdx, 6*8
16171+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16172+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16173+ movq_cfi rdi, RDI
16174+ movq_cfi rsi, RSI
16175+ movq_cfi rdx, RDX
16176
16177 .if \save_rcx
16178- movq_cfi rcx, 5*8
16179+ movq_cfi rcx, RCX
16180 .endif
16181
16182 .if \rax_enosys
16183- movq $-ENOSYS, 4*8(%rsp)
16184+ movq $-ENOSYS, RAX(%rsp)
16185 .else
16186- movq_cfi rax, 4*8
16187+ movq_cfi rax, RAX
16188 .endif
16189
16190 .if \save_r891011
16191- movq_cfi r8, 3*8
16192- movq_cfi r9, 2*8
16193- movq_cfi r10, 1*8
16194- movq_cfi r11, 0*8
16195+ movq_cfi r8, R8
16196+ movq_cfi r9, R9
16197+ movq_cfi r10, R10
16198+ movq_cfi r11, R11
16199 .endif
16200
16201+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16202+ movq_cfi r12, R12
16203+#endif
16204+
16205 .endm
16206
16207-#define ARG_SKIP (9*8)
16208+#define ARG_SKIP ORIG_RAX
16209
16210 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16211 rstor_r8910=1, rstor_rdx=1
16212+
16213+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16214+ movq_cfi_restore R12, r12
16215+#endif
16216+
16217 .if \rstor_r11
16218- movq_cfi_restore 0*8, r11
16219+ movq_cfi_restore R11, r11
16220 .endif
16221
16222 .if \rstor_r8910
16223- movq_cfi_restore 1*8, r10
16224- movq_cfi_restore 2*8, r9
16225- movq_cfi_restore 3*8, r8
16226+ movq_cfi_restore R10, r10
16227+ movq_cfi_restore R9, r9
16228+ movq_cfi_restore R8, r8
16229 .endif
16230
16231 .if \rstor_rax
16232- movq_cfi_restore 4*8, rax
16233+ movq_cfi_restore RAX, rax
16234 .endif
16235
16236 .if \rstor_rcx
16237- movq_cfi_restore 5*8, rcx
16238+ movq_cfi_restore RCX, rcx
16239 .endif
16240
16241 .if \rstor_rdx
16242- movq_cfi_restore 6*8, rdx
16243+ movq_cfi_restore RDX, rdx
16244 .endif
16245
16246- movq_cfi_restore 7*8, rsi
16247- movq_cfi_restore 8*8, rdi
16248+ movq_cfi_restore RSI, rsi
16249+ movq_cfi_restore RDI, rdi
16250
16251- .if ARG_SKIP+\addskip > 0
16252- addq $ARG_SKIP+\addskip, %rsp
16253- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16254+ .if ORIG_RAX+\addskip > 0
16255+ addq $ORIG_RAX+\addskip, %rsp
16256+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16257 .endif
16258 .endm
16259
16260- .macro LOAD_ARGS offset, skiprax=0
16261- movq \offset(%rsp), %r11
16262- movq \offset+8(%rsp), %r10
16263- movq \offset+16(%rsp), %r9
16264- movq \offset+24(%rsp), %r8
16265- movq \offset+40(%rsp), %rcx
16266- movq \offset+48(%rsp), %rdx
16267- movq \offset+56(%rsp), %rsi
16268- movq \offset+64(%rsp), %rdi
16269+ .macro LOAD_ARGS skiprax=0
16270+ movq R11(%rsp), %r11
16271+ movq R10(%rsp), %r10
16272+ movq R9(%rsp), %r9
16273+ movq R8(%rsp), %r8
16274+ movq RCX(%rsp), %rcx
16275+ movq RDX(%rsp), %rdx
16276+ movq RSI(%rsp), %rsi
16277+ movq RDI(%rsp), %rdi
16278 .if \skiprax
16279 .else
16280- movq \offset+72(%rsp), %rax
16281+ movq ORIG_RAX(%rsp), %rax
16282 .endif
16283 .endm
16284
16285-#define REST_SKIP (6*8)
16286-
16287 .macro SAVE_REST
16288- subq $REST_SKIP, %rsp
16289- CFI_ADJUST_CFA_OFFSET REST_SKIP
16290- movq_cfi rbx, 5*8
16291- movq_cfi rbp, 4*8
16292- movq_cfi r12, 3*8
16293- movq_cfi r13, 2*8
16294- movq_cfi r14, 1*8
16295- movq_cfi r15, 0*8
16296+ movq_cfi rbx, RBX
16297+ movq_cfi rbp, RBP
16298+
16299+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16300+ movq_cfi r12, R12
16301+#endif
16302+
16303+ movq_cfi r13, R13
16304+ movq_cfi r14, R14
16305+ movq_cfi r15, R15
16306 .endm
16307
16308 .macro RESTORE_REST
16309- movq_cfi_restore 0*8, r15
16310- movq_cfi_restore 1*8, r14
16311- movq_cfi_restore 2*8, r13
16312- movq_cfi_restore 3*8, r12
16313- movq_cfi_restore 4*8, rbp
16314- movq_cfi_restore 5*8, rbx
16315- addq $REST_SKIP, %rsp
16316- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16317+ movq_cfi_restore R15, r15
16318+ movq_cfi_restore R14, r14
16319+ movq_cfi_restore R13, r13
16320+
16321+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16322+ movq_cfi_restore R12, r12
16323+#endif
16324+
16325+ movq_cfi_restore RBP, rbp
16326+ movq_cfi_restore RBX, rbx
16327 .endm
16328
16329 .macro SAVE_ALL
16330diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16331index f50de69..2b0a458 100644
16332--- a/arch/x86/include/asm/checksum_32.h
16333+++ b/arch/x86/include/asm/checksum_32.h
16334@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16335 int len, __wsum sum,
16336 int *src_err_ptr, int *dst_err_ptr);
16337
16338+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16339+ int len, __wsum sum,
16340+ int *src_err_ptr, int *dst_err_ptr);
16341+
16342+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16343+ int len, __wsum sum,
16344+ int *src_err_ptr, int *dst_err_ptr);
16345+
16346 /*
16347 * Note: when you get a NULL pointer exception here this means someone
16348 * passed in an incorrect kernel address to one of these functions.
16349@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16350
16351 might_sleep();
16352 stac();
16353- ret = csum_partial_copy_generic((__force void *)src, dst,
16354+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16355 len, sum, err_ptr, NULL);
16356 clac();
16357
16358@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16359 might_sleep();
16360 if (access_ok(VERIFY_WRITE, dst, len)) {
16361 stac();
16362- ret = csum_partial_copy_generic(src, (__force void *)dst,
16363+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16364 len, sum, NULL, err_ptr);
16365 clac();
16366 return ret;
16367diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16368index 99c105d7..2f667ac 100644
16369--- a/arch/x86/include/asm/cmpxchg.h
16370+++ b/arch/x86/include/asm/cmpxchg.h
16371@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16372 __compiletime_error("Bad argument size for cmpxchg");
16373 extern void __xadd_wrong_size(void)
16374 __compiletime_error("Bad argument size for xadd");
16375+extern void __xadd_check_overflow_wrong_size(void)
16376+ __compiletime_error("Bad argument size for xadd_check_overflow");
16377 extern void __add_wrong_size(void)
16378 __compiletime_error("Bad argument size for add");
16379+extern void __add_check_overflow_wrong_size(void)
16380+ __compiletime_error("Bad argument size for add_check_overflow");
16381
16382 /*
16383 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16384@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16385 __ret; \
16386 })
16387
16388+#ifdef CONFIG_PAX_REFCOUNT
16389+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16390+ ({ \
16391+ __typeof__ (*(ptr)) __ret = (arg); \
16392+ switch (sizeof(*(ptr))) { \
16393+ case __X86_CASE_L: \
16394+ asm volatile (lock #op "l %0, %1\n" \
16395+ "jno 0f\n" \
16396+ "mov %0,%1\n" \
16397+ "int $4\n0:\n" \
16398+ _ASM_EXTABLE(0b, 0b) \
16399+ : "+r" (__ret), "+m" (*(ptr)) \
16400+ : : "memory", "cc"); \
16401+ break; \
16402+ case __X86_CASE_Q: \
16403+ asm volatile (lock #op "q %q0, %1\n" \
16404+ "jno 0f\n" \
16405+ "mov %0,%1\n" \
16406+ "int $4\n0:\n" \
16407+ _ASM_EXTABLE(0b, 0b) \
16408+ : "+r" (__ret), "+m" (*(ptr)) \
16409+ : : "memory", "cc"); \
16410+ break; \
16411+ default: \
16412+ __ ## op ## _check_overflow_wrong_size(); \
16413+ } \
16414+ __ret; \
16415+ })
16416+#else
16417+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16418+#endif
16419+
16420 /*
16421 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16422 * Since this is generally used to protect other memory information, we
16423@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16424 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16425 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16426
16427+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16428+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16429+
16430 #define __add(ptr, inc, lock) \
16431 ({ \
16432 __typeof__ (*(ptr)) __ret = (inc); \
16433diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16434index 59c6c40..5e0b22c 100644
16435--- a/arch/x86/include/asm/compat.h
16436+++ b/arch/x86/include/asm/compat.h
16437@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16438 typedef u32 compat_uint_t;
16439 typedef u32 compat_ulong_t;
16440 typedef u64 __attribute__((aligned(4))) compat_u64;
16441-typedef u32 compat_uptr_t;
16442+typedef u32 __user compat_uptr_t;
16443
16444 struct compat_timespec {
16445 compat_time_t tv_sec;
16446diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16447index aede2c3..40d7a8f 100644
16448--- a/arch/x86/include/asm/cpufeature.h
16449+++ b/arch/x86/include/asm/cpufeature.h
16450@@ -212,7 +212,7 @@
16451 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16452 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16453 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16454-
16455+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16456
16457 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16458 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16459@@ -220,7 +220,7 @@
16460 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16461 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16462 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16463-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16464+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16465 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16466 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16467 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16468@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16469 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
16470 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16471 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16472+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16473
16474 #if __GNUC__ >= 4
16475 extern void warn_pre_alternatives(void);
16476@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16477
16478 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16479 t_warn:
16480- warn_pre_alternatives();
16481+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16482+ warn_pre_alternatives();
16483 return false;
16484 #endif
16485
16486@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16487 ".section .discard,\"aw\",@progbits\n"
16488 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16489 ".previous\n"
16490- ".section .altinstr_replacement,\"ax\"\n"
16491+ ".section .altinstr_replacement,\"a\"\n"
16492 "3: movb $1,%0\n"
16493 "4:\n"
16494 ".previous\n"
16495@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16496 " .byte 2b - 1b\n" /* src len */
16497 " .byte 4f - 3f\n" /* repl len */
16498 ".previous\n"
16499- ".section .altinstr_replacement,\"ax\"\n"
16500+ ".section .altinstr_replacement,\"a\"\n"
16501 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16502 "4:\n"
16503 ".previous\n"
16504@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16505 ".section .discard,\"aw\",@progbits\n"
16506 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16507 ".previous\n"
16508- ".section .altinstr_replacement,\"ax\"\n"
16509+ ".section .altinstr_replacement,\"a\"\n"
16510 "3: movb $0,%0\n"
16511 "4:\n"
16512 ".previous\n"
16513@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16514 ".section .discard,\"aw\",@progbits\n"
16515 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16516 ".previous\n"
16517- ".section .altinstr_replacement,\"ax\"\n"
16518+ ".section .altinstr_replacement,\"a\"\n"
16519 "5: movb $1,%0\n"
16520 "6:\n"
16521 ".previous\n"
16522diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16523index a94b82e..59ecefa 100644
16524--- a/arch/x86/include/asm/desc.h
16525+++ b/arch/x86/include/asm/desc.h
16526@@ -4,6 +4,7 @@
16527 #include <asm/desc_defs.h>
16528 #include <asm/ldt.h>
16529 #include <asm/mmu.h>
16530+#include <asm/pgtable.h>
16531
16532 #include <linux/smp.h>
16533 #include <linux/percpu.h>
16534@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16535
16536 desc->type = (info->read_exec_only ^ 1) << 1;
16537 desc->type |= info->contents << 2;
16538+ desc->type |= info->seg_not_present ^ 1;
16539
16540 desc->s = 1;
16541 desc->dpl = 0x3;
16542@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16543 }
16544
16545 extern struct desc_ptr idt_descr;
16546-extern gate_desc idt_table[];
16547-extern struct desc_ptr debug_idt_descr;
16548-extern gate_desc debug_idt_table[];
16549-
16550-struct gdt_page {
16551- struct desc_struct gdt[GDT_ENTRIES];
16552-} __attribute__((aligned(PAGE_SIZE)));
16553-
16554-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16555+extern gate_desc idt_table[IDT_ENTRIES];
16556+extern const struct desc_ptr debug_idt_descr;
16557+extern gate_desc debug_idt_table[IDT_ENTRIES];
16558
16559+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16560 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16561 {
16562- return per_cpu(gdt_page, cpu).gdt;
16563+ return cpu_gdt_table[cpu];
16564 }
16565
16566 #ifdef CONFIG_X86_64
16567@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16568 unsigned long base, unsigned dpl, unsigned flags,
16569 unsigned short seg)
16570 {
16571- gate->a = (seg << 16) | (base & 0xffff);
16572- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16573+ gate->gate.offset_low = base;
16574+ gate->gate.seg = seg;
16575+ gate->gate.reserved = 0;
16576+ gate->gate.type = type;
16577+ gate->gate.s = 0;
16578+ gate->gate.dpl = dpl;
16579+ gate->gate.p = 1;
16580+ gate->gate.offset_high = base >> 16;
16581 }
16582
16583 #endif
16584@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16585
16586 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16587 {
16588+ pax_open_kernel();
16589 memcpy(&idt[entry], gate, sizeof(*gate));
16590+ pax_close_kernel();
16591 }
16592
16593 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16594 {
16595+ pax_open_kernel();
16596 memcpy(&ldt[entry], desc, 8);
16597+ pax_close_kernel();
16598 }
16599
16600 static inline void
16601@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16602 default: size = sizeof(*gdt); break;
16603 }
16604
16605+ pax_open_kernel();
16606 memcpy(&gdt[entry], desc, size);
16607+ pax_close_kernel();
16608 }
16609
16610 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16611@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16612
16613 static inline void native_load_tr_desc(void)
16614 {
16615+ pax_open_kernel();
16616 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16617+ pax_close_kernel();
16618 }
16619
16620 static inline void native_load_gdt(const struct desc_ptr *dtr)
16621@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16622 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16623 unsigned int i;
16624
16625+ pax_open_kernel();
16626 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16627 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16628+ pax_close_kernel();
16629 }
16630
16631 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16632@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16633 preempt_enable();
16634 }
16635
16636-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16637+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16638 {
16639 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16640 }
16641@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16642 }
16643
16644 #ifdef CONFIG_X86_64
16645-static inline void set_nmi_gate(int gate, void *addr)
16646+static inline void set_nmi_gate(int gate, const void *addr)
16647 {
16648 gate_desc s;
16649
16650@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16651 #endif
16652
16653 #ifdef CONFIG_TRACING
16654-extern struct desc_ptr trace_idt_descr;
16655-extern gate_desc trace_idt_table[];
16656+extern const struct desc_ptr trace_idt_descr;
16657+extern gate_desc trace_idt_table[IDT_ENTRIES];
16658 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16659 {
16660 write_idt_entry(trace_idt_table, entry, gate);
16661 }
16662
16663-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16664+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16665 unsigned dpl, unsigned ist, unsigned seg)
16666 {
16667 gate_desc s;
16668@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16669 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16670 #endif
16671
16672-static inline void _set_gate(int gate, unsigned type, void *addr,
16673+static inline void _set_gate(int gate, unsigned type, const void *addr,
16674 unsigned dpl, unsigned ist, unsigned seg)
16675 {
16676 gate_desc s;
16677@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16678 #define set_intr_gate(n, addr) \
16679 do { \
16680 BUG_ON((unsigned)n > 0xFF); \
16681- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16682+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16683 __KERNEL_CS); \
16684- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16685+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16686 0, 0, __KERNEL_CS); \
16687 } while (0)
16688
16689@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16690 /*
16691 * This routine sets up an interrupt gate at directory privilege level 3.
16692 */
16693-static inline void set_system_intr_gate(unsigned int n, void *addr)
16694+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16695 {
16696 BUG_ON((unsigned)n > 0xFF);
16697 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16698 }
16699
16700-static inline void set_system_trap_gate(unsigned int n, void *addr)
16701+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16702 {
16703 BUG_ON((unsigned)n > 0xFF);
16704 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16705 }
16706
16707-static inline void set_trap_gate(unsigned int n, void *addr)
16708+static inline void set_trap_gate(unsigned int n, const void *addr)
16709 {
16710 BUG_ON((unsigned)n > 0xFF);
16711 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16712@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16713 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16714 {
16715 BUG_ON((unsigned)n > 0xFF);
16716- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16717+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16718 }
16719
16720-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16721+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16722 {
16723 BUG_ON((unsigned)n > 0xFF);
16724 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16725 }
16726
16727-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16728+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16729 {
16730 BUG_ON((unsigned)n > 0xFF);
16731 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16732@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16733 else
16734 load_idt((const struct desc_ptr *)&idt_descr);
16735 }
16736+
16737+#ifdef CONFIG_X86_32
16738+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16739+{
16740+ struct desc_struct d;
16741+
16742+ if (likely(limit))
16743+ limit = (limit - 1UL) >> PAGE_SHIFT;
16744+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16745+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16746+}
16747+#endif
16748+
16749 #endif /* _ASM_X86_DESC_H */
16750diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16751index 278441f..b95a174 100644
16752--- a/arch/x86/include/asm/desc_defs.h
16753+++ b/arch/x86/include/asm/desc_defs.h
16754@@ -31,6 +31,12 @@ struct desc_struct {
16755 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16756 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16757 };
16758+ struct {
16759+ u16 offset_low;
16760+ u16 seg;
16761+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16762+ unsigned offset_high: 16;
16763+ } gate;
16764 };
16765 } __attribute__((packed));
16766
16767diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16768index ced283a..ffe04cc 100644
16769--- a/arch/x86/include/asm/div64.h
16770+++ b/arch/x86/include/asm/div64.h
16771@@ -39,7 +39,7 @@
16772 __mod; \
16773 })
16774
16775-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16776+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16777 {
16778 union {
16779 u64 v64;
16780diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16781index ca3347a..1a5082a 100644
16782--- a/arch/x86/include/asm/elf.h
16783+++ b/arch/x86/include/asm/elf.h
16784@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16785
16786 #include <asm/vdso.h>
16787
16788-#ifdef CONFIG_X86_64
16789-extern unsigned int vdso64_enabled;
16790-#endif
16791 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16792 extern unsigned int vdso32_enabled;
16793 #endif
16794@@ -249,7 +246,25 @@ extern int force_personality32;
16795 the loader. We need to make sure that it is out of the way of the program
16796 that it will "exec", and that there is sufficient room for the brk. */
16797
16798+#ifdef CONFIG_PAX_SEGMEXEC
16799+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16800+#else
16801 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16802+#endif
16803+
16804+#ifdef CONFIG_PAX_ASLR
16805+#ifdef CONFIG_X86_32
16806+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16807+
16808+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16809+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16810+#else
16811+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16812+
16813+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16814+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16815+#endif
16816+#endif
16817
16818 /* This yields a mask that user programs can use to figure out what
16819 instruction set this CPU supports. This could be done in user space,
16820@@ -298,17 +313,13 @@ do { \
16821
16822 #define ARCH_DLINFO \
16823 do { \
16824- if (vdso64_enabled) \
16825- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16826- (unsigned long __force)current->mm->context.vdso); \
16827+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16828 } while (0)
16829
16830 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16831 #define ARCH_DLINFO_X32 \
16832 do { \
16833- if (vdso64_enabled) \
16834- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16835- (unsigned long __force)current->mm->context.vdso); \
16836+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16837 } while (0)
16838
16839 #define AT_SYSINFO 32
16840@@ -323,10 +334,10 @@ else \
16841
16842 #endif /* !CONFIG_X86_32 */
16843
16844-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16845+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16846
16847 #define VDSO_ENTRY \
16848- ((unsigned long)current->mm->context.vdso + \
16849+ (current->mm->context.vdso + \
16850 selected_vdso32->sym___kernel_vsyscall)
16851
16852 struct linux_binprm;
16853@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16854 int uses_interp);
16855 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16856
16857-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16858-#define arch_randomize_brk arch_randomize_brk
16859-
16860 /*
16861 * True on X86_32 or when emulating IA32 on X86_64
16862 */
16863diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16864index 77a99ac..39ff7f5 100644
16865--- a/arch/x86/include/asm/emergency-restart.h
16866+++ b/arch/x86/include/asm/emergency-restart.h
16867@@ -1,6 +1,6 @@
16868 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16869 #define _ASM_X86_EMERGENCY_RESTART_H
16870
16871-extern void machine_emergency_restart(void);
16872+extern void machine_emergency_restart(void) __noreturn;
16873
16874 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16875diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16876index 1c7eefe..d0e4702 100644
16877--- a/arch/x86/include/asm/floppy.h
16878+++ b/arch/x86/include/asm/floppy.h
16879@@ -229,18 +229,18 @@ static struct fd_routine_l {
16880 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16881 } fd_routine[] = {
16882 {
16883- request_dma,
16884- free_dma,
16885- get_dma_residue,
16886- dma_mem_alloc,
16887- hard_dma_setup
16888+ ._request_dma = request_dma,
16889+ ._free_dma = free_dma,
16890+ ._get_dma_residue = get_dma_residue,
16891+ ._dma_mem_alloc = dma_mem_alloc,
16892+ ._dma_setup = hard_dma_setup
16893 },
16894 {
16895- vdma_request_dma,
16896- vdma_nop,
16897- vdma_get_dma_residue,
16898- vdma_mem_alloc,
16899- vdma_dma_setup
16900+ ._request_dma = vdma_request_dma,
16901+ ._free_dma = vdma_nop,
16902+ ._get_dma_residue = vdma_get_dma_residue,
16903+ ._dma_mem_alloc = vdma_mem_alloc,
16904+ ._dma_setup = vdma_dma_setup
16905 }
16906 };
16907
16908diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16909index e97622f..d0ba77a 100644
16910--- a/arch/x86/include/asm/fpu-internal.h
16911+++ b/arch/x86/include/asm/fpu-internal.h
16912@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16913 #define user_insn(insn, output, input...) \
16914 ({ \
16915 int err; \
16916+ pax_open_userland(); \
16917 asm volatile(ASM_STAC "\n" \
16918- "1:" #insn "\n\t" \
16919+ "1:" \
16920+ __copyuser_seg \
16921+ #insn "\n\t" \
16922 "2: " ASM_CLAC "\n" \
16923 ".section .fixup,\"ax\"\n" \
16924 "3: movl $-1,%[err]\n" \
16925@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16926 _ASM_EXTABLE(1b, 3b) \
16927 : [err] "=r" (err), output \
16928 : "0"(0), input); \
16929+ pax_close_userland(); \
16930 err; \
16931 })
16932
16933@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16934 "fnclex\n\t"
16935 "emms\n\t"
16936 "fildl %P[addr]" /* set F?P to defined value */
16937- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16938+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16939 }
16940
16941 return fpu_restore_checking(&tsk->thread.fpu);
16942diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16943index b4c1f54..e290c08 100644
16944--- a/arch/x86/include/asm/futex.h
16945+++ b/arch/x86/include/asm/futex.h
16946@@ -12,6 +12,7 @@
16947 #include <asm/smap.h>
16948
16949 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16950+ typecheck(u32 __user *, uaddr); \
16951 asm volatile("\t" ASM_STAC "\n" \
16952 "1:\t" insn "\n" \
16953 "2:\t" ASM_CLAC "\n" \
16954@@ -20,15 +21,16 @@
16955 "\tjmp\t2b\n" \
16956 "\t.previous\n" \
16957 _ASM_EXTABLE(1b, 3b) \
16958- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16959+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16960 : "i" (-EFAULT), "0" (oparg), "1" (0))
16961
16962 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16963+ typecheck(u32 __user *, uaddr); \
16964 asm volatile("\t" ASM_STAC "\n" \
16965 "1:\tmovl %2, %0\n" \
16966 "\tmovl\t%0, %3\n" \
16967 "\t" insn "\n" \
16968- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16969+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16970 "\tjnz\t1b\n" \
16971 "3:\t" ASM_CLAC "\n" \
16972 "\t.section .fixup,\"ax\"\n" \
16973@@ -38,7 +40,7 @@
16974 _ASM_EXTABLE(1b, 4b) \
16975 _ASM_EXTABLE(2b, 4b) \
16976 : "=&a" (oldval), "=&r" (ret), \
16977- "+m" (*uaddr), "=&r" (tem) \
16978+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16979 : "r" (oparg), "i" (-EFAULT), "1" (0))
16980
16981 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16982@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16983
16984 pagefault_disable();
16985
16986+ pax_open_userland();
16987 switch (op) {
16988 case FUTEX_OP_SET:
16989- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16990+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16991 break;
16992 case FUTEX_OP_ADD:
16993- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16994+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16995 uaddr, oparg);
16996 break;
16997 case FUTEX_OP_OR:
16998@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16999 default:
17000 ret = -ENOSYS;
17001 }
17002+ pax_close_userland();
17003
17004 pagefault_enable();
17005
17006diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17007index 9662290..49ca5e5 100644
17008--- a/arch/x86/include/asm/hw_irq.h
17009+++ b/arch/x86/include/asm/hw_irq.h
17010@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17011 #endif /* CONFIG_X86_LOCAL_APIC */
17012
17013 /* Statistics */
17014-extern atomic_t irq_err_count;
17015-extern atomic_t irq_mis_count;
17016+extern atomic_unchecked_t irq_err_count;
17017+extern atomic_unchecked_t irq_mis_count;
17018
17019 /* EISA */
17020 extern void eisa_set_level_irq(unsigned int irq);
17021diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17022index ccffa53..3c90c87 100644
17023--- a/arch/x86/include/asm/i8259.h
17024+++ b/arch/x86/include/asm/i8259.h
17025@@ -62,7 +62,7 @@ struct legacy_pic {
17026 void (*init)(int auto_eoi);
17027 int (*irq_pending)(unsigned int irq);
17028 void (*make_irq)(unsigned int irq);
17029-};
17030+} __do_const;
17031
17032 extern struct legacy_pic *legacy_pic;
17033 extern struct legacy_pic null_legacy_pic;
17034diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17035index 34a5b93..27e40a6 100644
17036--- a/arch/x86/include/asm/io.h
17037+++ b/arch/x86/include/asm/io.h
17038@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17039 "m" (*(volatile type __force *)addr) barrier); }
17040
17041 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17042-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17043-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17044+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17045+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17046
17047 build_mmio_read(__readb, "b", unsigned char, "=q", )
17048-build_mmio_read(__readw, "w", unsigned short, "=r", )
17049-build_mmio_read(__readl, "l", unsigned int, "=r", )
17050+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17051+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17052
17053 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17054 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17055@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17056 * this function
17057 */
17058
17059-static inline phys_addr_t virt_to_phys(volatile void *address)
17060+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17061 {
17062 return __pa(address);
17063 }
17064@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17065 return ioremap_nocache(offset, size);
17066 }
17067
17068-extern void iounmap(volatile void __iomem *addr);
17069+extern void iounmap(const volatile void __iomem *addr);
17070
17071 extern void set_iounmap_nonlazy(void);
17072
17073@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17074
17075 #include <linux/vmalloc.h>
17076
17077+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17078+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17079+{
17080+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17081+}
17082+
17083+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17084+{
17085+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17086+}
17087+
17088 /*
17089 * Convert a virtual cached pointer to an uncached pointer
17090 */
17091diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17092index 0a8b519..80e7d5b 100644
17093--- a/arch/x86/include/asm/irqflags.h
17094+++ b/arch/x86/include/asm/irqflags.h
17095@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17096 sti; \
17097 sysexit
17098
17099+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17100+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17101+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17102+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17103+
17104 #else
17105 #define INTERRUPT_RETURN iret
17106 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17107diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17108index 4421b5d..8543006 100644
17109--- a/arch/x86/include/asm/kprobes.h
17110+++ b/arch/x86/include/asm/kprobes.h
17111@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17112 #define RELATIVEJUMP_SIZE 5
17113 #define RELATIVECALL_OPCODE 0xe8
17114 #define RELATIVE_ADDR_SIZE 4
17115-#define MAX_STACK_SIZE 64
17116-#define MIN_STACK_SIZE(ADDR) \
17117- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17118- THREAD_SIZE - (unsigned long)(ADDR))) \
17119- ? (MAX_STACK_SIZE) \
17120- : (((unsigned long)current_thread_info()) + \
17121- THREAD_SIZE - (unsigned long)(ADDR)))
17122+#define MAX_STACK_SIZE 64UL
17123+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17124
17125 #define flush_insn_slot(p) do { } while (0)
17126
17127diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17128index d89c6b8..e711c69 100644
17129--- a/arch/x86/include/asm/kvm_host.h
17130+++ b/arch/x86/include/asm/kvm_host.h
17131@@ -51,7 +51,7 @@
17132 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
17133
17134 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
17135-#define CR3_PCID_INVD (1UL << 63)
17136+#define CR3_PCID_INVD (1ULL << 63)
17137 #define CR4_RESERVED_BITS \
17138 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
17139 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
17140diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17141index 4ad6560..75c7bdd 100644
17142--- a/arch/x86/include/asm/local.h
17143+++ b/arch/x86/include/asm/local.h
17144@@ -10,33 +10,97 @@ typedef struct {
17145 atomic_long_t a;
17146 } local_t;
17147
17148+typedef struct {
17149+ atomic_long_unchecked_t a;
17150+} local_unchecked_t;
17151+
17152 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17153
17154 #define local_read(l) atomic_long_read(&(l)->a)
17155+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17156 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17157+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17158
17159 static inline void local_inc(local_t *l)
17160 {
17161- asm volatile(_ASM_INC "%0"
17162+ asm volatile(_ASM_INC "%0\n"
17163+
17164+#ifdef CONFIG_PAX_REFCOUNT
17165+ "jno 0f\n"
17166+ _ASM_DEC "%0\n"
17167+ "int $4\n0:\n"
17168+ _ASM_EXTABLE(0b, 0b)
17169+#endif
17170+
17171+ : "+m" (l->a.counter));
17172+}
17173+
17174+static inline void local_inc_unchecked(local_unchecked_t *l)
17175+{
17176+ asm volatile(_ASM_INC "%0\n"
17177 : "+m" (l->a.counter));
17178 }
17179
17180 static inline void local_dec(local_t *l)
17181 {
17182- asm volatile(_ASM_DEC "%0"
17183+ asm volatile(_ASM_DEC "%0\n"
17184+
17185+#ifdef CONFIG_PAX_REFCOUNT
17186+ "jno 0f\n"
17187+ _ASM_INC "%0\n"
17188+ "int $4\n0:\n"
17189+ _ASM_EXTABLE(0b, 0b)
17190+#endif
17191+
17192+ : "+m" (l->a.counter));
17193+}
17194+
17195+static inline void local_dec_unchecked(local_unchecked_t *l)
17196+{
17197+ asm volatile(_ASM_DEC "%0\n"
17198 : "+m" (l->a.counter));
17199 }
17200
17201 static inline void local_add(long i, local_t *l)
17202 {
17203- asm volatile(_ASM_ADD "%1,%0"
17204+ asm volatile(_ASM_ADD "%1,%0\n"
17205+
17206+#ifdef CONFIG_PAX_REFCOUNT
17207+ "jno 0f\n"
17208+ _ASM_SUB "%1,%0\n"
17209+ "int $4\n0:\n"
17210+ _ASM_EXTABLE(0b, 0b)
17211+#endif
17212+
17213+ : "+m" (l->a.counter)
17214+ : "ir" (i));
17215+}
17216+
17217+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17218+{
17219+ asm volatile(_ASM_ADD "%1,%0\n"
17220 : "+m" (l->a.counter)
17221 : "ir" (i));
17222 }
17223
17224 static inline void local_sub(long i, local_t *l)
17225 {
17226- asm volatile(_ASM_SUB "%1,%0"
17227+ asm volatile(_ASM_SUB "%1,%0\n"
17228+
17229+#ifdef CONFIG_PAX_REFCOUNT
17230+ "jno 0f\n"
17231+ _ASM_ADD "%1,%0\n"
17232+ "int $4\n0:\n"
17233+ _ASM_EXTABLE(0b, 0b)
17234+#endif
17235+
17236+ : "+m" (l->a.counter)
17237+ : "ir" (i));
17238+}
17239+
17240+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17241+{
17242+ asm volatile(_ASM_SUB "%1,%0\n"
17243 : "+m" (l->a.counter)
17244 : "ir" (i));
17245 }
17246@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17247 */
17248 static inline int local_sub_and_test(long i, local_t *l)
17249 {
17250- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17251+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17252 }
17253
17254 /**
17255@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17256 */
17257 static inline int local_dec_and_test(local_t *l)
17258 {
17259- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17260+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17261 }
17262
17263 /**
17264@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17265 */
17266 static inline int local_inc_and_test(local_t *l)
17267 {
17268- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17269+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17270 }
17271
17272 /**
17273@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17274 */
17275 static inline int local_add_negative(long i, local_t *l)
17276 {
17277- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17278+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17279 }
17280
17281 /**
17282@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17283 static inline long local_add_return(long i, local_t *l)
17284 {
17285 long __i = i;
17286+ asm volatile(_ASM_XADD "%0, %1\n"
17287+
17288+#ifdef CONFIG_PAX_REFCOUNT
17289+ "jno 0f\n"
17290+ _ASM_MOV "%0,%1\n"
17291+ "int $4\n0:\n"
17292+ _ASM_EXTABLE(0b, 0b)
17293+#endif
17294+
17295+ : "+r" (i), "+m" (l->a.counter)
17296+ : : "memory");
17297+ return i + __i;
17298+}
17299+
17300+/**
17301+ * local_add_return_unchecked - add and return
17302+ * @i: integer value to add
17303+ * @l: pointer to type local_unchecked_t
17304+ *
17305+ * Atomically adds @i to @l and returns @i + @l
17306+ */
17307+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17308+{
17309+ long __i = i;
17310 asm volatile(_ASM_XADD "%0, %1;"
17311 : "+r" (i), "+m" (l->a.counter)
17312 : : "memory");
17313@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17314
17315 #define local_cmpxchg(l, o, n) \
17316 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17317+#define local_cmpxchg_unchecked(l, o, n) \
17318+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17319 /* Always has a lock prefix */
17320 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17321
17322diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17323new file mode 100644
17324index 0000000..2bfd3ba
17325--- /dev/null
17326+++ b/arch/x86/include/asm/mman.h
17327@@ -0,0 +1,15 @@
17328+#ifndef _X86_MMAN_H
17329+#define _X86_MMAN_H
17330+
17331+#include <uapi/asm/mman.h>
17332+
17333+#ifdef __KERNEL__
17334+#ifndef __ASSEMBLY__
17335+#ifdef CONFIG_X86_32
17336+#define arch_mmap_check i386_mmap_check
17337+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17338+#endif
17339+#endif
17340+#endif
17341+
17342+#endif /* X86_MMAN_H */
17343diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17344index 876e74e..e20bfb1 100644
17345--- a/arch/x86/include/asm/mmu.h
17346+++ b/arch/x86/include/asm/mmu.h
17347@@ -9,7 +9,7 @@
17348 * we put the segment information here.
17349 */
17350 typedef struct {
17351- void *ldt;
17352+ struct desc_struct *ldt;
17353 int size;
17354
17355 #ifdef CONFIG_X86_64
17356@@ -18,7 +18,19 @@ typedef struct {
17357 #endif
17358
17359 struct mutex lock;
17360- void __user *vdso;
17361+ unsigned long vdso;
17362+
17363+#ifdef CONFIG_X86_32
17364+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17365+ unsigned long user_cs_base;
17366+ unsigned long user_cs_limit;
17367+
17368+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17369+ cpumask_t cpu_user_cs_mask;
17370+#endif
17371+
17372+#endif
17373+#endif
17374 } mm_context_t;
17375
17376 #ifdef CONFIG_SMP
17377diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17378index 4b75d59..8ffacb6 100644
17379--- a/arch/x86/include/asm/mmu_context.h
17380+++ b/arch/x86/include/asm/mmu_context.h
17381@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
17382
17383 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17384 {
17385+
17386+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17387+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17388+ unsigned int i;
17389+ pgd_t *pgd;
17390+
17391+ pax_open_kernel();
17392+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17393+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17394+ set_pgd_batched(pgd+i, native_make_pgd(0));
17395+ pax_close_kernel();
17396+ }
17397+#endif
17398+
17399 #ifdef CONFIG_SMP
17400 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17401 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17402@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17403 struct task_struct *tsk)
17404 {
17405 unsigned cpu = smp_processor_id();
17406+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17407+ int tlbstate = TLBSTATE_OK;
17408+#endif
17409
17410 if (likely(prev != next)) {
17411 #ifdef CONFIG_SMP
17412+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17413+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17414+#endif
17415 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17416 this_cpu_write(cpu_tlbstate.active_mm, next);
17417 #endif
17418 cpumask_set_cpu(cpu, mm_cpumask(next));
17419
17420 /* Re-load page tables */
17421+#ifdef CONFIG_PAX_PER_CPU_PGD
17422+ pax_open_kernel();
17423+
17424+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17425+ if (static_cpu_has(X86_FEATURE_PCID))
17426+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17427+ else
17428+#endif
17429+
17430+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17431+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17432+ pax_close_kernel();
17433+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17434+
17435+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17436+ if (static_cpu_has(X86_FEATURE_PCID)) {
17437+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17438+ u64 descriptor[2];
17439+ descriptor[0] = PCID_USER;
17440+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17441+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17442+ descriptor[0] = PCID_KERNEL;
17443+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17444+ }
17445+ } else {
17446+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17447+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17448+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17449+ else
17450+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17451+ }
17452+ } else
17453+#endif
17454+
17455+ load_cr3(get_cpu_pgd(cpu, kernel));
17456+#else
17457 load_cr3(next->pgd);
17458+#endif
17459 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17460
17461 /* Stop flush ipis for the previous mm */
17462@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17463 */
17464 if (unlikely(prev->context.ldt != next->context.ldt))
17465 load_LDT_nolock(&next->context);
17466+
17467+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17468+ if (!(__supported_pte_mask & _PAGE_NX)) {
17469+ smp_mb__before_atomic();
17470+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17471+ smp_mb__after_atomic();
17472+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17473+ }
17474+#endif
17475+
17476+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17477+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17478+ prev->context.user_cs_limit != next->context.user_cs_limit))
17479+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17480+#ifdef CONFIG_SMP
17481+ else if (unlikely(tlbstate != TLBSTATE_OK))
17482+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17483+#endif
17484+#endif
17485+
17486 }
17487+ else {
17488+
17489+#ifdef CONFIG_PAX_PER_CPU_PGD
17490+ pax_open_kernel();
17491+
17492+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17493+ if (static_cpu_has(X86_FEATURE_PCID))
17494+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17495+ else
17496+#endif
17497+
17498+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17499+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17500+ pax_close_kernel();
17501+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17502+
17503+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17504+ if (static_cpu_has(X86_FEATURE_PCID)) {
17505+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17506+ u64 descriptor[2];
17507+ descriptor[0] = PCID_USER;
17508+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17509+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17510+ descriptor[0] = PCID_KERNEL;
17511+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17512+ }
17513+ } else {
17514+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17515+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17516+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17517+ else
17518+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17519+ }
17520+ } else
17521+#endif
17522+
17523+ load_cr3(get_cpu_pgd(cpu, kernel));
17524+#endif
17525+
17526 #ifdef CONFIG_SMP
17527- else {
17528 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17529 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17530
17531@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17532 * tlb flush IPI delivery. We must reload CR3
17533 * to make sure to use no freed page tables.
17534 */
17535+
17536+#ifndef CONFIG_PAX_PER_CPU_PGD
17537 load_cr3(next->pgd);
17538 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17539+#endif
17540+
17541 load_LDT_nolock(&next->context);
17542+
17543+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17544+ if (!(__supported_pte_mask & _PAGE_NX))
17545+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17546+#endif
17547+
17548+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17549+#ifdef CONFIG_PAX_PAGEEXEC
17550+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17551+#endif
17552+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17553+#endif
17554+
17555 }
17556+#endif
17557 }
17558-#endif
17559 }
17560
17561 #define activate_mm(prev, next) \
17562diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17563index e3b7819..b257c64 100644
17564--- a/arch/x86/include/asm/module.h
17565+++ b/arch/x86/include/asm/module.h
17566@@ -5,6 +5,7 @@
17567
17568 #ifdef CONFIG_X86_64
17569 /* X86_64 does not define MODULE_PROC_FAMILY */
17570+#define MODULE_PROC_FAMILY ""
17571 #elif defined CONFIG_M486
17572 #define MODULE_PROC_FAMILY "486 "
17573 #elif defined CONFIG_M586
17574@@ -57,8 +58,20 @@
17575 #error unknown processor family
17576 #endif
17577
17578-#ifdef CONFIG_X86_32
17579-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17580+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17581+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17582+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17583+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17584+#else
17585+#define MODULE_PAX_KERNEXEC ""
17586 #endif
17587
17588+#ifdef CONFIG_PAX_MEMORY_UDEREF
17589+#define MODULE_PAX_UDEREF "UDEREF "
17590+#else
17591+#define MODULE_PAX_UDEREF ""
17592+#endif
17593+
17594+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17595+
17596 #endif /* _ASM_X86_MODULE_H */
17597diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17598index 5f2fc44..106caa6 100644
17599--- a/arch/x86/include/asm/nmi.h
17600+++ b/arch/x86/include/asm/nmi.h
17601@@ -36,26 +36,35 @@ enum {
17602
17603 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17604
17605+struct nmiaction;
17606+
17607+struct nmiwork {
17608+ const struct nmiaction *action;
17609+ u64 max_duration;
17610+ struct irq_work irq_work;
17611+};
17612+
17613 struct nmiaction {
17614 struct list_head list;
17615 nmi_handler_t handler;
17616- u64 max_duration;
17617- struct irq_work irq_work;
17618 unsigned long flags;
17619 const char *name;
17620-};
17621+ struct nmiwork *work;
17622+} __do_const;
17623
17624 #define register_nmi_handler(t, fn, fg, n, init...) \
17625 ({ \
17626- static struct nmiaction init fn##_na = { \
17627+ static struct nmiwork fn##_nw; \
17628+ static const struct nmiaction init fn##_na = { \
17629 .handler = (fn), \
17630 .name = (n), \
17631 .flags = (fg), \
17632+ .work = &fn##_nw, \
17633 }; \
17634 __register_nmi_handler((t), &fn##_na); \
17635 })
17636
17637-int __register_nmi_handler(unsigned int, struct nmiaction *);
17638+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17639
17640 void unregister_nmi_handler(unsigned int, const char *);
17641
17642diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17643index 802dde3..9183e68 100644
17644--- a/arch/x86/include/asm/page.h
17645+++ b/arch/x86/include/asm/page.h
17646@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17647 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17648
17649 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17650+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17651
17652 #define __boot_va(x) __va(x)
17653 #define __boot_pa(x) __pa(x)
17654@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17655 * virt_to_page(kaddr) returns a valid pointer if and only if
17656 * virt_addr_valid(kaddr) returns true.
17657 */
17658-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17659 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17660 extern bool __virt_addr_valid(unsigned long kaddr);
17661 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17662
17663+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17664+#define virt_to_page(kaddr) \
17665+ ({ \
17666+ const void *__kaddr = (const void *)(kaddr); \
17667+ BUG_ON(!virt_addr_valid(__kaddr)); \
17668+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17669+ })
17670+#else
17671+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17672+#endif
17673+
17674 #endif /* __ASSEMBLY__ */
17675
17676 #include <asm-generic/memory_model.h>
17677diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17678index b3bebf9..e1f5d95 100644
17679--- a/arch/x86/include/asm/page_64.h
17680+++ b/arch/x86/include/asm/page_64.h
17681@@ -7,9 +7,9 @@
17682
17683 /* duplicated to the one in bootmem.h */
17684 extern unsigned long max_pfn;
17685-extern unsigned long phys_base;
17686+extern const unsigned long phys_base;
17687
17688-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17689+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17690 {
17691 unsigned long y = x - __START_KERNEL_map;
17692
17693diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17694index 32444ae..1a1624b 100644
17695--- a/arch/x86/include/asm/paravirt.h
17696+++ b/arch/x86/include/asm/paravirt.h
17697@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17698 return (pmd_t) { ret };
17699 }
17700
17701-static inline pmdval_t pmd_val(pmd_t pmd)
17702+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17703 {
17704 pmdval_t ret;
17705
17706@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17707 val);
17708 }
17709
17710+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17711+{
17712+ pgdval_t val = native_pgd_val(pgd);
17713+
17714+ if (sizeof(pgdval_t) > sizeof(long))
17715+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17716+ val, (u64)val >> 32);
17717+ else
17718+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17719+ val);
17720+}
17721+
17722 static inline void pgd_clear(pgd_t *pgdp)
17723 {
17724 set_pgd(pgdp, __pgd(0));
17725@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17726 pv_mmu_ops.set_fixmap(idx, phys, flags);
17727 }
17728
17729+#ifdef CONFIG_PAX_KERNEXEC
17730+static inline unsigned long pax_open_kernel(void)
17731+{
17732+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17733+}
17734+
17735+static inline unsigned long pax_close_kernel(void)
17736+{
17737+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17738+}
17739+#else
17740+static inline unsigned long pax_open_kernel(void) { return 0; }
17741+static inline unsigned long pax_close_kernel(void) { return 0; }
17742+#endif
17743+
17744 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17745
17746 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17747@@ -906,7 +933,7 @@ extern void default_banner(void);
17748
17749 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17750 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17751-#define PARA_INDIRECT(addr) *%cs:addr
17752+#define PARA_INDIRECT(addr) *%ss:addr
17753 #endif
17754
17755 #define INTERRUPT_RETURN \
17756@@ -981,6 +1008,21 @@ extern void default_banner(void);
17757 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17758 CLBR_NONE, \
17759 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17760+
17761+#define GET_CR0_INTO_RDI \
17762+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17763+ mov %rax,%rdi
17764+
17765+#define SET_RDI_INTO_CR0 \
17766+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17767+
17768+#define GET_CR3_INTO_RDI \
17769+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17770+ mov %rax,%rdi
17771+
17772+#define SET_RDI_INTO_CR3 \
17773+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17774+
17775 #endif /* CONFIG_X86_32 */
17776
17777 #endif /* __ASSEMBLY__ */
17778diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17779index 7549b8b..f0edfda 100644
17780--- a/arch/x86/include/asm/paravirt_types.h
17781+++ b/arch/x86/include/asm/paravirt_types.h
17782@@ -84,7 +84,7 @@ struct pv_init_ops {
17783 */
17784 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17785 unsigned long addr, unsigned len);
17786-};
17787+} __no_const __no_randomize_layout;
17788
17789
17790 struct pv_lazy_ops {
17791@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17792 void (*enter)(void);
17793 void (*leave)(void);
17794 void (*flush)(void);
17795-};
17796+} __no_randomize_layout;
17797
17798 struct pv_time_ops {
17799 unsigned long long (*sched_clock)(void);
17800 unsigned long long (*steal_clock)(int cpu);
17801 unsigned long (*get_tsc_khz)(void);
17802-};
17803+} __no_const __no_randomize_layout;
17804
17805 struct pv_cpu_ops {
17806 /* hooks for various privileged instructions */
17807@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17808
17809 void (*start_context_switch)(struct task_struct *prev);
17810 void (*end_context_switch)(struct task_struct *next);
17811-};
17812+} __no_const __no_randomize_layout;
17813
17814 struct pv_irq_ops {
17815 /*
17816@@ -215,7 +215,7 @@ struct pv_irq_ops {
17817 #ifdef CONFIG_X86_64
17818 void (*adjust_exception_frame)(void);
17819 #endif
17820-};
17821+} __no_randomize_layout;
17822
17823 struct pv_apic_ops {
17824 #ifdef CONFIG_X86_LOCAL_APIC
17825@@ -223,7 +223,7 @@ struct pv_apic_ops {
17826 unsigned long start_eip,
17827 unsigned long start_esp);
17828 #endif
17829-};
17830+} __no_const __no_randomize_layout;
17831
17832 struct pv_mmu_ops {
17833 unsigned long (*read_cr2)(void);
17834@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17835 struct paravirt_callee_save make_pud;
17836
17837 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17838+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17839 #endif /* PAGETABLE_LEVELS == 4 */
17840 #endif /* PAGETABLE_LEVELS >= 3 */
17841
17842@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17843 an mfn. We can tell which is which from the index. */
17844 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17845 phys_addr_t phys, pgprot_t flags);
17846-};
17847+
17848+#ifdef CONFIG_PAX_KERNEXEC
17849+ unsigned long (*pax_open_kernel)(void);
17850+ unsigned long (*pax_close_kernel)(void);
17851+#endif
17852+
17853+} __no_randomize_layout;
17854
17855 struct arch_spinlock;
17856 #ifdef CONFIG_SMP
17857@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17858 struct pv_lock_ops {
17859 struct paravirt_callee_save lock_spinning;
17860 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17861-};
17862+} __no_randomize_layout;
17863
17864 /* This contains all the paravirt structures: we get a convenient
17865 * number for each function using the offset which we use to indicate
17866- * what to patch. */
17867+ * what to patch.
17868+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17869+ */
17870+
17871 struct paravirt_patch_template {
17872 struct pv_init_ops pv_init_ops;
17873 struct pv_time_ops pv_time_ops;
17874@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17875 struct pv_apic_ops pv_apic_ops;
17876 struct pv_mmu_ops pv_mmu_ops;
17877 struct pv_lock_ops pv_lock_ops;
17878-};
17879+} __no_randomize_layout;
17880
17881 extern struct pv_info pv_info;
17882 extern struct pv_init_ops pv_init_ops;
17883diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17884index c4412e9..90e88c5 100644
17885--- a/arch/x86/include/asm/pgalloc.h
17886+++ b/arch/x86/include/asm/pgalloc.h
17887@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17888 pmd_t *pmd, pte_t *pte)
17889 {
17890 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17891+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17892+}
17893+
17894+static inline void pmd_populate_user(struct mm_struct *mm,
17895+ pmd_t *pmd, pte_t *pte)
17896+{
17897+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17898 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17899 }
17900
17901@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17902
17903 #ifdef CONFIG_X86_PAE
17904 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17905+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17906+{
17907+ pud_populate(mm, pudp, pmd);
17908+}
17909 #else /* !CONFIG_X86_PAE */
17910 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17911 {
17912 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17913 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17914 }
17915+
17916+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17917+{
17918+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17919+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17920+}
17921 #endif /* CONFIG_X86_PAE */
17922
17923 #if PAGETABLE_LEVELS > 3
17924@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17925 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17926 }
17927
17928+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17929+{
17930+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17931+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17932+}
17933+
17934 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17935 {
17936 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17937diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17938index 206a87f..1623b06 100644
17939--- a/arch/x86/include/asm/pgtable-2level.h
17940+++ b/arch/x86/include/asm/pgtable-2level.h
17941@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17942
17943 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17944 {
17945+ pax_open_kernel();
17946 *pmdp = pmd;
17947+ pax_close_kernel();
17948 }
17949
17950 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17951diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17952index 81bb91b..9392125 100644
17953--- a/arch/x86/include/asm/pgtable-3level.h
17954+++ b/arch/x86/include/asm/pgtable-3level.h
17955@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17956
17957 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17958 {
17959+ pax_open_kernel();
17960 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17961+ pax_close_kernel();
17962 }
17963
17964 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17965 {
17966+ pax_open_kernel();
17967 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17968+ pax_close_kernel();
17969 }
17970
17971 /*
17972diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17973index e8a5454..1539359 100644
17974--- a/arch/x86/include/asm/pgtable.h
17975+++ b/arch/x86/include/asm/pgtable.h
17976@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17977
17978 #ifndef __PAGETABLE_PUD_FOLDED
17979 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17980+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17981 #define pgd_clear(pgd) native_pgd_clear(pgd)
17982 #endif
17983
17984@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17985
17986 #define arch_end_context_switch(prev) do {} while(0)
17987
17988+#define pax_open_kernel() native_pax_open_kernel()
17989+#define pax_close_kernel() native_pax_close_kernel()
17990 #endif /* CONFIG_PARAVIRT */
17991
17992+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17993+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17994+
17995+#ifdef CONFIG_PAX_KERNEXEC
17996+static inline unsigned long native_pax_open_kernel(void)
17997+{
17998+ unsigned long cr0;
17999+
18000+ preempt_disable();
18001+ barrier();
18002+ cr0 = read_cr0() ^ X86_CR0_WP;
18003+ BUG_ON(cr0 & X86_CR0_WP);
18004+ write_cr0(cr0);
18005+ barrier();
18006+ return cr0 ^ X86_CR0_WP;
18007+}
18008+
18009+static inline unsigned long native_pax_close_kernel(void)
18010+{
18011+ unsigned long cr0;
18012+
18013+ barrier();
18014+ cr0 = read_cr0() ^ X86_CR0_WP;
18015+ BUG_ON(!(cr0 & X86_CR0_WP));
18016+ write_cr0(cr0);
18017+ barrier();
18018+ preempt_enable_no_resched();
18019+ return cr0 ^ X86_CR0_WP;
18020+}
18021+#else
18022+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18023+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18024+#endif
18025+
18026 /*
18027 * The following only work if pte_present() is true.
18028 * Undefined behaviour if not..
18029 */
18030+static inline int pte_user(pte_t pte)
18031+{
18032+ return pte_val(pte) & _PAGE_USER;
18033+}
18034+
18035 static inline int pte_dirty(pte_t pte)
18036 {
18037 return pte_flags(pte) & _PAGE_DIRTY;
18038@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18039 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18040 }
18041
18042+static inline unsigned long pgd_pfn(pgd_t pgd)
18043+{
18044+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18045+}
18046+
18047 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18048
18049 static inline int pmd_large(pmd_t pte)
18050@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18051 return pte_clear_flags(pte, _PAGE_RW);
18052 }
18053
18054+static inline pte_t pte_mkread(pte_t pte)
18055+{
18056+ return __pte(pte_val(pte) | _PAGE_USER);
18057+}
18058+
18059 static inline pte_t pte_mkexec(pte_t pte)
18060 {
18061- return pte_clear_flags(pte, _PAGE_NX);
18062+#ifdef CONFIG_X86_PAE
18063+ if (__supported_pte_mask & _PAGE_NX)
18064+ return pte_clear_flags(pte, _PAGE_NX);
18065+ else
18066+#endif
18067+ return pte_set_flags(pte, _PAGE_USER);
18068+}
18069+
18070+static inline pte_t pte_exprotect(pte_t pte)
18071+{
18072+#ifdef CONFIG_X86_PAE
18073+ if (__supported_pte_mask & _PAGE_NX)
18074+ return pte_set_flags(pte, _PAGE_NX);
18075+ else
18076+#endif
18077+ return pte_clear_flags(pte, _PAGE_USER);
18078 }
18079
18080 static inline pte_t pte_mkdirty(pte_t pte)
18081@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18082 #endif
18083
18084 #ifndef __ASSEMBLY__
18085+
18086+#ifdef CONFIG_PAX_PER_CPU_PGD
18087+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18088+enum cpu_pgd_type {kernel = 0, user = 1};
18089+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18090+{
18091+ return cpu_pgd[cpu][type];
18092+}
18093+#endif
18094+
18095 #include <linux/mm_types.h>
18096 #include <linux/mmdebug.h>
18097 #include <linux/log2.h>
18098@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18099 * Currently stuck as a macro due to indirect forward reference to
18100 * linux/mmzone.h's __section_mem_map_addr() definition:
18101 */
18102-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18103+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18104
18105 /* Find an entry in the second-level page table.. */
18106 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18107@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18108 * Currently stuck as a macro due to indirect forward reference to
18109 * linux/mmzone.h's __section_mem_map_addr() definition:
18110 */
18111-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18112+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18113
18114 /* to find an entry in a page-table-directory. */
18115 static inline unsigned long pud_index(unsigned long address)
18116@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18117
18118 static inline int pgd_bad(pgd_t pgd)
18119 {
18120- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18121+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18122 }
18123
18124 static inline int pgd_none(pgd_t pgd)
18125@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
18126 * pgd_offset() returns a (pgd_t *)
18127 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18128 */
18129-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18130+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18131+
18132+#ifdef CONFIG_PAX_PER_CPU_PGD
18133+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18134+#endif
18135+
18136 /*
18137 * a shortcut which implies the use of the kernel's pgd, instead
18138 * of a process's
18139@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
18140 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18141 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18142
18143+#ifdef CONFIG_X86_32
18144+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18145+#else
18146+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18147+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18148+
18149+#ifdef CONFIG_PAX_MEMORY_UDEREF
18150+#ifdef __ASSEMBLY__
18151+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18152+#else
18153+extern unsigned long pax_user_shadow_base;
18154+extern pgdval_t clone_pgd_mask;
18155+#endif
18156+#endif
18157+
18158+#endif
18159+
18160 #ifndef __ASSEMBLY__
18161
18162 extern int direct_gbpages;
18163@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18164 * dst and src can be on the same page, but the range must not overlap,
18165 * and must not cross a page boundary.
18166 */
18167-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18168+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18169 {
18170- memcpy(dst, src, count * sizeof(pgd_t));
18171+ pax_open_kernel();
18172+ while (count--)
18173+ *dst++ = *src++;
18174+ pax_close_kernel();
18175 }
18176
18177+#ifdef CONFIG_PAX_PER_CPU_PGD
18178+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18179+#endif
18180+
18181+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18182+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18183+#else
18184+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18185+#endif
18186+
18187 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18188 static inline int page_level_shift(enum pg_level level)
18189 {
18190diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18191index b6c0b40..3535d47 100644
18192--- a/arch/x86/include/asm/pgtable_32.h
18193+++ b/arch/x86/include/asm/pgtable_32.h
18194@@ -25,9 +25,6 @@
18195 struct mm_struct;
18196 struct vm_area_struct;
18197
18198-extern pgd_t swapper_pg_dir[1024];
18199-extern pgd_t initial_page_table[1024];
18200-
18201 static inline void pgtable_cache_init(void) { }
18202 static inline void check_pgt_cache(void) { }
18203 void paging_init(void);
18204@@ -45,6 +42,12 @@ void paging_init(void);
18205 # include <asm/pgtable-2level.h>
18206 #endif
18207
18208+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18209+extern pgd_t initial_page_table[PTRS_PER_PGD];
18210+#ifdef CONFIG_X86_PAE
18211+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18212+#endif
18213+
18214 #if defined(CONFIG_HIGHPTE)
18215 #define pte_offset_map(dir, address) \
18216 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18217@@ -59,12 +62,17 @@ void paging_init(void);
18218 /* Clear a kernel PTE and flush it from the TLB */
18219 #define kpte_clear_flush(ptep, vaddr) \
18220 do { \
18221+ pax_open_kernel(); \
18222 pte_clear(&init_mm, (vaddr), (ptep)); \
18223+ pax_close_kernel(); \
18224 __flush_tlb_one((vaddr)); \
18225 } while (0)
18226
18227 #endif /* !__ASSEMBLY__ */
18228
18229+#define HAVE_ARCH_UNMAPPED_AREA
18230+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18231+
18232 /*
18233 * kern_addr_valid() is (1) for FLATMEM and (0) for
18234 * SPARSEMEM and DISCONTIGMEM
18235diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18236index 9fb2f2b..b04b4bf 100644
18237--- a/arch/x86/include/asm/pgtable_32_types.h
18238+++ b/arch/x86/include/asm/pgtable_32_types.h
18239@@ -8,7 +8,7 @@
18240 */
18241 #ifdef CONFIG_X86_PAE
18242 # include <asm/pgtable-3level_types.h>
18243-# define PMD_SIZE (1UL << PMD_SHIFT)
18244+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18245 # define PMD_MASK (~(PMD_SIZE - 1))
18246 #else
18247 # include <asm/pgtable-2level_types.h>
18248@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18249 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18250 #endif
18251
18252+#ifdef CONFIG_PAX_KERNEXEC
18253+#ifndef __ASSEMBLY__
18254+extern unsigned char MODULES_EXEC_VADDR[];
18255+extern unsigned char MODULES_EXEC_END[];
18256+#endif
18257+#include <asm/boot.h>
18258+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18259+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18260+#else
18261+#define ktla_ktva(addr) (addr)
18262+#define ktva_ktla(addr) (addr)
18263+#endif
18264+
18265 #define MODULES_VADDR VMALLOC_START
18266 #define MODULES_END VMALLOC_END
18267 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18268diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18269index 4572b2f..4430113 100644
18270--- a/arch/x86/include/asm/pgtable_64.h
18271+++ b/arch/x86/include/asm/pgtable_64.h
18272@@ -16,11 +16,16 @@
18273
18274 extern pud_t level3_kernel_pgt[512];
18275 extern pud_t level3_ident_pgt[512];
18276+extern pud_t level3_vmalloc_start_pgt[512];
18277+extern pud_t level3_vmalloc_end_pgt[512];
18278+extern pud_t level3_vmemmap_pgt[512];
18279+extern pud_t level2_vmemmap_pgt[512];
18280 extern pmd_t level2_kernel_pgt[512];
18281 extern pmd_t level2_fixmap_pgt[512];
18282-extern pmd_t level2_ident_pgt[512];
18283+extern pmd_t level2_ident_pgt[512*2];
18284 extern pte_t level1_fixmap_pgt[512];
18285-extern pgd_t init_level4_pgt[];
18286+extern pte_t level1_vsyscall_pgt[512];
18287+extern pgd_t init_level4_pgt[512];
18288
18289 #define swapper_pg_dir init_level4_pgt
18290
18291@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18292
18293 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18294 {
18295+ pax_open_kernel();
18296 *pmdp = pmd;
18297+ pax_close_kernel();
18298 }
18299
18300 static inline void native_pmd_clear(pmd_t *pmd)
18301@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18302
18303 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18304 {
18305+ pax_open_kernel();
18306 *pudp = pud;
18307+ pax_close_kernel();
18308 }
18309
18310 static inline void native_pud_clear(pud_t *pud)
18311@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18312
18313 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18314 {
18315+ pax_open_kernel();
18316+ *pgdp = pgd;
18317+ pax_close_kernel();
18318+}
18319+
18320+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18321+{
18322 *pgdp = pgd;
18323 }
18324
18325diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18326index 602b602..acb53ed 100644
18327--- a/arch/x86/include/asm/pgtable_64_types.h
18328+++ b/arch/x86/include/asm/pgtable_64_types.h
18329@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18330 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18331 #define MODULES_END _AC(0xffffffffff000000, UL)
18332 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18333+#define MODULES_EXEC_VADDR MODULES_VADDR
18334+#define MODULES_EXEC_END MODULES_END
18335 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18336 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18337 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18338 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18339
18340+#define ktla_ktva(addr) (addr)
18341+#define ktva_ktla(addr) (addr)
18342+
18343 #define EARLY_DYNAMIC_PAGE_TABLES 64
18344
18345 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18346diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18347index 25bcd4a..bf3f815 100644
18348--- a/arch/x86/include/asm/pgtable_types.h
18349+++ b/arch/x86/include/asm/pgtable_types.h
18350@@ -110,8 +110,10 @@
18351
18352 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18353 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18354-#else
18355+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18356 #define _PAGE_NX (_AT(pteval_t, 0))
18357+#else
18358+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18359 #endif
18360
18361 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18362@@ -167,6 +169,9 @@ enum page_cache_mode {
18363 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18364 _PAGE_ACCESSED)
18365
18366+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18367+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18368+
18369 #define __PAGE_KERNEL_EXEC \
18370 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18371 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18372@@ -174,7 +179,7 @@ enum page_cache_mode {
18373 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18374 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18375 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18376-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18377+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18378 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18379 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18380 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18381@@ -220,7 +225,7 @@ enum page_cache_mode {
18382 #ifdef CONFIG_X86_64
18383 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18384 #else
18385-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18386+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18387 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18388 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18389 #endif
18390@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18391 {
18392 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18393 }
18394+#endif
18395
18396+#if PAGETABLE_LEVELS == 3
18397+#include <asm-generic/pgtable-nopud.h>
18398+#endif
18399+
18400+#if PAGETABLE_LEVELS == 2
18401+#include <asm-generic/pgtable-nopmd.h>
18402+#endif
18403+
18404+#ifndef __ASSEMBLY__
18405 #if PAGETABLE_LEVELS > 3
18406 typedef struct { pudval_t pud; } pud_t;
18407
18408@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18409 return pud.pud;
18410 }
18411 #else
18412-#include <asm-generic/pgtable-nopud.h>
18413-
18414 static inline pudval_t native_pud_val(pud_t pud)
18415 {
18416 return native_pgd_val(pud.pgd);
18417@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18418 return pmd.pmd;
18419 }
18420 #else
18421-#include <asm-generic/pgtable-nopmd.h>
18422-
18423 static inline pmdval_t native_pmd_val(pmd_t pmd)
18424 {
18425 return native_pgd_val(pmd.pud.pgd);
18426@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
18427
18428 extern pteval_t __supported_pte_mask;
18429 extern void set_nx(void);
18430-extern int nx_enabled;
18431
18432 #define pgprot_writecombine pgprot_writecombine
18433 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18434diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18435index 8f327184..368fb29 100644
18436--- a/arch/x86/include/asm/preempt.h
18437+++ b/arch/x86/include/asm/preempt.h
18438@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18439 */
18440 static __always_inline bool __preempt_count_dec_and_test(void)
18441 {
18442- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18443+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18444 }
18445
18446 /*
18447diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18448index a092a0c..8e9640b 100644
18449--- a/arch/x86/include/asm/processor.h
18450+++ b/arch/x86/include/asm/processor.h
18451@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18452 /* Index into per_cpu list: */
18453 u16 cpu_index;
18454 u32 microcode;
18455-};
18456+} __randomize_layout;
18457
18458 #define X86_VENDOR_INTEL 0
18459 #define X86_VENDOR_CYRIX 1
18460@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18461 : "memory");
18462 }
18463
18464+/* invpcid (%rdx),%rax */
18465+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18466+
18467+#define INVPCID_SINGLE_ADDRESS 0UL
18468+#define INVPCID_SINGLE_CONTEXT 1UL
18469+#define INVPCID_ALL_GLOBAL 2UL
18470+#define INVPCID_ALL_NONGLOBAL 3UL
18471+
18472+#define PCID_KERNEL 0UL
18473+#define PCID_USER 1UL
18474+#define PCID_NOFLUSH (1UL << 63)
18475+
18476 static inline void load_cr3(pgd_t *pgdir)
18477 {
18478- write_cr3(__pa(pgdir));
18479+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18480 }
18481
18482 #ifdef CONFIG_X86_32
18483@@ -282,7 +294,7 @@ struct tss_struct {
18484
18485 } ____cacheline_aligned;
18486
18487-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18488+extern struct tss_struct init_tss[NR_CPUS];
18489
18490 /*
18491 * Save the original ist values for checking stack pointers during debugging
18492@@ -479,6 +491,7 @@ struct thread_struct {
18493 unsigned short ds;
18494 unsigned short fsindex;
18495 unsigned short gsindex;
18496+ unsigned short ss;
18497 #endif
18498 #ifdef CONFIG_X86_32
18499 unsigned long ip;
18500@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18501 extern unsigned long mmu_cr4_features;
18502 extern u32 *trampoline_cr4_features;
18503
18504-static inline void set_in_cr4(unsigned long mask)
18505-{
18506- unsigned long cr4;
18507-
18508- mmu_cr4_features |= mask;
18509- if (trampoline_cr4_features)
18510- *trampoline_cr4_features = mmu_cr4_features;
18511- cr4 = read_cr4();
18512- cr4 |= mask;
18513- write_cr4(cr4);
18514-}
18515-
18516-static inline void clear_in_cr4(unsigned long mask)
18517-{
18518- unsigned long cr4;
18519-
18520- mmu_cr4_features &= ~mask;
18521- if (trampoline_cr4_features)
18522- *trampoline_cr4_features = mmu_cr4_features;
18523- cr4 = read_cr4();
18524- cr4 &= ~mask;
18525- write_cr4(cr4);
18526-}
18527+extern void set_in_cr4(unsigned long mask);
18528+extern void clear_in_cr4(unsigned long mask);
18529
18530 typedef struct {
18531 unsigned long seg;
18532@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
18533 */
18534 #define TASK_SIZE PAGE_OFFSET
18535 #define TASK_SIZE_MAX TASK_SIZE
18536+
18537+#ifdef CONFIG_PAX_SEGMEXEC
18538+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18539+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18540+#else
18541 #define STACK_TOP TASK_SIZE
18542-#define STACK_TOP_MAX STACK_TOP
18543+#endif
18544+
18545+#define STACK_TOP_MAX TASK_SIZE
18546
18547 #define INIT_THREAD { \
18548- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18549+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18550 .vm86_info = NULL, \
18551 .sysenter_cs = __KERNEL_CS, \
18552 .io_bitmap_ptr = NULL, \
18553@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18554 */
18555 #define INIT_TSS { \
18556 .x86_tss = { \
18557- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18558+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18559 .ss0 = __KERNEL_DS, \
18560 .ss1 = __KERNEL_CS, \
18561 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18562@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
18563 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18564
18565 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18566-#define KSTK_TOP(info) \
18567-({ \
18568- unsigned long *__ptr = (unsigned long *)(info); \
18569- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18570-})
18571+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18572
18573 /*
18574 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18575@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18576 #define task_pt_regs(task) \
18577 ({ \
18578 struct pt_regs *__regs__; \
18579- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18580+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18581 __regs__ - 1; \
18582 })
18583
18584@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18585 * particular problem by preventing anything from being mapped
18586 * at the maximum canonical address.
18587 */
18588-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18589+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18590
18591 /* This decides where the kernel will search for a free chunk of vm
18592 * space during mmap's.
18593 */
18594 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18595- 0xc0000000 : 0xFFFFe000)
18596+ 0xc0000000 : 0xFFFFf000)
18597
18598 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18599 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18600@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18601 #define STACK_TOP_MAX TASK_SIZE_MAX
18602
18603 #define INIT_THREAD { \
18604- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18605+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18606 }
18607
18608 #define INIT_TSS { \
18609- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18610+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18611 }
18612
18613 /*
18614@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18615 */
18616 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18617
18618+#ifdef CONFIG_PAX_SEGMEXEC
18619+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18620+#endif
18621+
18622 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18623
18624 /* Get/set a process' ability to use the timestamp counter instruction */
18625@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18626 return 0;
18627 }
18628
18629-extern unsigned long arch_align_stack(unsigned long sp);
18630+#define arch_align_stack(x) ((x) & ~0xfUL)
18631 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18632
18633 void default_idle(void);
18634@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
18635 #define xen_set_default_idle 0
18636 #endif
18637
18638-void stop_this_cpu(void *dummy);
18639+void stop_this_cpu(void *dummy) __noreturn;
18640 void df_debug(struct pt_regs *regs, long error_code);
18641 #endif /* _ASM_X86_PROCESSOR_H */
18642diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18643index 86fc2bb..bd5049a 100644
18644--- a/arch/x86/include/asm/ptrace.h
18645+++ b/arch/x86/include/asm/ptrace.h
18646@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18647 }
18648
18649 /*
18650- * user_mode_vm(regs) determines whether a register set came from user mode.
18651+ * user_mode(regs) determines whether a register set came from user mode.
18652 * This is true if V8086 mode was enabled OR if the register set was from
18653 * protected mode with RPL-3 CS value. This tricky test checks that with
18654 * one comparison. Many places in the kernel can bypass this full check
18655- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18656+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18657+ * be used.
18658 */
18659-static inline int user_mode(struct pt_regs *regs)
18660+static inline int user_mode_novm(struct pt_regs *regs)
18661 {
18662 #ifdef CONFIG_X86_32
18663 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18664 #else
18665- return !!(regs->cs & 3);
18666+ return !!(regs->cs & SEGMENT_RPL_MASK);
18667 #endif
18668 }
18669
18670-static inline int user_mode_vm(struct pt_regs *regs)
18671+static inline int user_mode(struct pt_regs *regs)
18672 {
18673 #ifdef CONFIG_X86_32
18674 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18675 USER_RPL;
18676 #else
18677- return user_mode(regs);
18678+ return user_mode_novm(regs);
18679 #endif
18680 }
18681
18682@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18683 #ifdef CONFIG_X86_64
18684 static inline bool user_64bit_mode(struct pt_regs *regs)
18685 {
18686+ unsigned long cs = regs->cs & 0xffff;
18687 #ifndef CONFIG_PARAVIRT
18688 /*
18689 * On non-paravirt systems, this is the only long mode CPL 3
18690 * selector. We do not allow long mode selectors in the LDT.
18691 */
18692- return regs->cs == __USER_CS;
18693+ return cs == __USER_CS;
18694 #else
18695 /* Headers are too twisted for this to go in paravirt.h. */
18696- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18697+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18698 #endif
18699 }
18700
18701@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18702 * Traps from the kernel do not save sp and ss.
18703 * Use the helper function to retrieve sp.
18704 */
18705- if (offset == offsetof(struct pt_regs, sp) &&
18706- regs->cs == __KERNEL_CS)
18707- return kernel_stack_pointer(regs);
18708+ if (offset == offsetof(struct pt_regs, sp)) {
18709+ unsigned long cs = regs->cs & 0xffff;
18710+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18711+ return kernel_stack_pointer(regs);
18712+ }
18713 #endif
18714 return *(unsigned long *)((unsigned long)regs + offset);
18715 }
18716diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18717index ae0e241..e80b10b 100644
18718--- a/arch/x86/include/asm/qrwlock.h
18719+++ b/arch/x86/include/asm/qrwlock.h
18720@@ -7,8 +7,8 @@
18721 #define queue_write_unlock queue_write_unlock
18722 static inline void queue_write_unlock(struct qrwlock *lock)
18723 {
18724- barrier();
18725- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18726+ barrier();
18727+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18728 }
18729 #endif
18730
18731diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18732index 9c6b890..5305f53 100644
18733--- a/arch/x86/include/asm/realmode.h
18734+++ b/arch/x86/include/asm/realmode.h
18735@@ -22,16 +22,14 @@ struct real_mode_header {
18736 #endif
18737 /* APM/BIOS reboot */
18738 u32 machine_real_restart_asm;
18739-#ifdef CONFIG_X86_64
18740 u32 machine_real_restart_seg;
18741-#endif
18742 };
18743
18744 /* This must match data at trampoline_32/64.S */
18745 struct trampoline_header {
18746 #ifdef CONFIG_X86_32
18747 u32 start;
18748- u16 gdt_pad;
18749+ u16 boot_cs;
18750 u16 gdt_limit;
18751 u32 gdt_base;
18752 #else
18753diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18754index a82c4f1..ac45053 100644
18755--- a/arch/x86/include/asm/reboot.h
18756+++ b/arch/x86/include/asm/reboot.h
18757@@ -6,13 +6,13 @@
18758 struct pt_regs;
18759
18760 struct machine_ops {
18761- void (*restart)(char *cmd);
18762- void (*halt)(void);
18763- void (*power_off)(void);
18764+ void (* __noreturn restart)(char *cmd);
18765+ void (* __noreturn halt)(void);
18766+ void (* __noreturn power_off)(void);
18767 void (*shutdown)(void);
18768 void (*crash_shutdown)(struct pt_regs *);
18769- void (*emergency_restart)(void);
18770-};
18771+ void (* __noreturn emergency_restart)(void);
18772+} __no_const;
18773
18774 extern struct machine_ops machine_ops;
18775
18776diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18777index 8f7866a..e442f20 100644
18778--- a/arch/x86/include/asm/rmwcc.h
18779+++ b/arch/x86/include/asm/rmwcc.h
18780@@ -3,7 +3,34 @@
18781
18782 #ifdef CC_HAVE_ASM_GOTO
18783
18784-#define __GEN_RMWcc(fullop, var, cc, ...) \
18785+#ifdef CONFIG_PAX_REFCOUNT
18786+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18787+do { \
18788+ asm_volatile_goto (fullop \
18789+ ";jno 0f\n" \
18790+ fullantiop \
18791+ ";int $4\n0:\n" \
18792+ _ASM_EXTABLE(0b, 0b) \
18793+ ";j" cc " %l[cc_label]" \
18794+ : : "m" (var), ## __VA_ARGS__ \
18795+ : "memory" : cc_label); \
18796+ return 0; \
18797+cc_label: \
18798+ return 1; \
18799+} while (0)
18800+#else
18801+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18802+do { \
18803+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18804+ : : "m" (var), ## __VA_ARGS__ \
18805+ : "memory" : cc_label); \
18806+ return 0; \
18807+cc_label: \
18808+ return 1; \
18809+} while (0)
18810+#endif
18811+
18812+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18813 do { \
18814 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18815 : : "m" (var), ## __VA_ARGS__ \
18816@@ -13,15 +40,46 @@ cc_label: \
18817 return 1; \
18818 } while (0)
18819
18820-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18821- __GEN_RMWcc(op " " arg0, var, cc)
18822+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18823+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18824
18825-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18826- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18827+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18828+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18829+
18830+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18831+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18832+
18833+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18834+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18835
18836 #else /* !CC_HAVE_ASM_GOTO */
18837
18838-#define __GEN_RMWcc(fullop, var, cc, ...) \
18839+#ifdef CONFIG_PAX_REFCOUNT
18840+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18841+do { \
18842+ char c; \
18843+ asm volatile (fullop \
18844+ ";jno 0f\n" \
18845+ fullantiop \
18846+ ";int $4\n0:\n" \
18847+ _ASM_EXTABLE(0b, 0b) \
18848+ "; set" cc " %1" \
18849+ : "+m" (var), "=qm" (c) \
18850+ : __VA_ARGS__ : "memory"); \
18851+ return c != 0; \
18852+} while (0)
18853+#else
18854+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18855+do { \
18856+ char c; \
18857+ asm volatile (fullop "; set" cc " %1" \
18858+ : "+m" (var), "=qm" (c) \
18859+ : __VA_ARGS__ : "memory"); \
18860+ return c != 0; \
18861+} while (0)
18862+#endif
18863+
18864+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18865 do { \
18866 char c; \
18867 asm volatile (fullop "; set" cc " %1" \
18868@@ -30,11 +88,17 @@ do { \
18869 return c != 0; \
18870 } while (0)
18871
18872-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18873- __GEN_RMWcc(op " " arg0, var, cc)
18874+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18875+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18876+
18877+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18878+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18879+
18880+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18881+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18882
18883-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18884- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18885+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18886+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18887
18888 #endif /* CC_HAVE_ASM_GOTO */
18889
18890diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18891index cad82c9..2e5c5c1 100644
18892--- a/arch/x86/include/asm/rwsem.h
18893+++ b/arch/x86/include/asm/rwsem.h
18894@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18895 {
18896 asm volatile("# beginning down_read\n\t"
18897 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18898+
18899+#ifdef CONFIG_PAX_REFCOUNT
18900+ "jno 0f\n"
18901+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18902+ "int $4\n0:\n"
18903+ _ASM_EXTABLE(0b, 0b)
18904+#endif
18905+
18906 /* adds 0x00000001 */
18907 " jns 1f\n"
18908 " call call_rwsem_down_read_failed\n"
18909@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18910 "1:\n\t"
18911 " mov %1,%2\n\t"
18912 " add %3,%2\n\t"
18913+
18914+#ifdef CONFIG_PAX_REFCOUNT
18915+ "jno 0f\n"
18916+ "sub %3,%2\n"
18917+ "int $4\n0:\n"
18918+ _ASM_EXTABLE(0b, 0b)
18919+#endif
18920+
18921 " jle 2f\n\t"
18922 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18923 " jnz 1b\n\t"
18924@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18925 long tmp;
18926 asm volatile("# beginning down_write\n\t"
18927 LOCK_PREFIX " xadd %1,(%2)\n\t"
18928+
18929+#ifdef CONFIG_PAX_REFCOUNT
18930+ "jno 0f\n"
18931+ "mov %1,(%2)\n"
18932+ "int $4\n0:\n"
18933+ _ASM_EXTABLE(0b, 0b)
18934+#endif
18935+
18936 /* adds 0xffff0001, returns the old value */
18937 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18938 /* was the active mask 0 before? */
18939@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18940 long tmp;
18941 asm volatile("# beginning __up_read\n\t"
18942 LOCK_PREFIX " xadd %1,(%2)\n\t"
18943+
18944+#ifdef CONFIG_PAX_REFCOUNT
18945+ "jno 0f\n"
18946+ "mov %1,(%2)\n"
18947+ "int $4\n0:\n"
18948+ _ASM_EXTABLE(0b, 0b)
18949+#endif
18950+
18951 /* subtracts 1, returns the old value */
18952 " jns 1f\n\t"
18953 " call call_rwsem_wake\n" /* expects old value in %edx */
18954@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18955 long tmp;
18956 asm volatile("# beginning __up_write\n\t"
18957 LOCK_PREFIX " xadd %1,(%2)\n\t"
18958+
18959+#ifdef CONFIG_PAX_REFCOUNT
18960+ "jno 0f\n"
18961+ "mov %1,(%2)\n"
18962+ "int $4\n0:\n"
18963+ _ASM_EXTABLE(0b, 0b)
18964+#endif
18965+
18966 /* subtracts 0xffff0001, returns the old value */
18967 " jns 1f\n\t"
18968 " call call_rwsem_wake\n" /* expects old value in %edx */
18969@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18970 {
18971 asm volatile("# beginning __downgrade_write\n\t"
18972 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18973+
18974+#ifdef CONFIG_PAX_REFCOUNT
18975+ "jno 0f\n"
18976+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18977+ "int $4\n0:\n"
18978+ _ASM_EXTABLE(0b, 0b)
18979+#endif
18980+
18981 /*
18982 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18983 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18984@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18985 */
18986 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18987 {
18988- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18989+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18990+
18991+#ifdef CONFIG_PAX_REFCOUNT
18992+ "jno 0f\n"
18993+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18994+ "int $4\n0:\n"
18995+ _ASM_EXTABLE(0b, 0b)
18996+#endif
18997+
18998 : "+m" (sem->count)
18999 : "er" (delta));
19000 }
19001@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19002 */
19003 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19004 {
19005- return delta + xadd(&sem->count, delta);
19006+ return delta + xadd_check_overflow(&sem->count, delta);
19007 }
19008
19009 #endif /* __KERNEL__ */
19010diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19011index db257a5..b91bc77 100644
19012--- a/arch/x86/include/asm/segment.h
19013+++ b/arch/x86/include/asm/segment.h
19014@@ -73,10 +73,15 @@
19015 * 26 - ESPFIX small SS
19016 * 27 - per-cpu [ offset to per-cpu data area ]
19017 * 28 - stack_canary-20 [ for stack protector ]
19018- * 29 - unused
19019- * 30 - unused
19020+ * 29 - PCI BIOS CS
19021+ * 30 - PCI BIOS DS
19022 * 31 - TSS for double fault handler
19023 */
19024+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19025+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19026+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19027+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19028+
19029 #define GDT_ENTRY_TLS_MIN 6
19030 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19031
19032@@ -88,6 +93,8 @@
19033
19034 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19035
19036+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19037+
19038 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19039
19040 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19041@@ -113,6 +120,12 @@
19042 #define __KERNEL_STACK_CANARY 0
19043 #endif
19044
19045+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19046+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19047+
19048+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19049+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19050+
19051 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19052
19053 /*
19054@@ -140,7 +153,7 @@
19055 */
19056
19057 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19058-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19059+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19060
19061
19062 #else
19063@@ -164,6 +177,8 @@
19064 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19065 #define __USER32_DS __USER_DS
19066
19067+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19068+
19069 #define GDT_ENTRY_TSS 8 /* needs two entries */
19070 #define GDT_ENTRY_LDT 10 /* needs two entries */
19071 #define GDT_ENTRY_TLS_MIN 12
19072@@ -172,6 +187,8 @@
19073 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19074 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19075
19076+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19077+
19078 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19079 #define FS_TLS 0
19080 #define GS_TLS 1
19081@@ -179,12 +196,14 @@
19082 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19083 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19084
19085-#define GDT_ENTRIES 16
19086+#define GDT_ENTRIES 17
19087
19088 #endif
19089
19090 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19091+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19092 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19093+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19094 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19095 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19096 #ifndef CONFIG_PARAVIRT
19097@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19098 {
19099 unsigned long __limit;
19100 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19101- return __limit + 1;
19102+ return __limit;
19103 }
19104
19105 #endif /* !__ASSEMBLY__ */
19106diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19107index 8d3120f..352b440 100644
19108--- a/arch/x86/include/asm/smap.h
19109+++ b/arch/x86/include/asm/smap.h
19110@@ -25,11 +25,40 @@
19111
19112 #include <asm/alternative-asm.h>
19113
19114+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19115+#define ASM_PAX_OPEN_USERLAND \
19116+ 661: jmp 663f; \
19117+ .pushsection .altinstr_replacement, "a" ; \
19118+ 662: pushq %rax; nop; \
19119+ .popsection ; \
19120+ .pushsection .altinstructions, "a" ; \
19121+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19122+ .popsection ; \
19123+ call __pax_open_userland; \
19124+ popq %rax; \
19125+ 663:
19126+
19127+#define ASM_PAX_CLOSE_USERLAND \
19128+ 661: jmp 663f; \
19129+ .pushsection .altinstr_replacement, "a" ; \
19130+ 662: pushq %rax; nop; \
19131+ .popsection; \
19132+ .pushsection .altinstructions, "a" ; \
19133+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19134+ .popsection; \
19135+ call __pax_close_userland; \
19136+ popq %rax; \
19137+ 663:
19138+#else
19139+#define ASM_PAX_OPEN_USERLAND
19140+#define ASM_PAX_CLOSE_USERLAND
19141+#endif
19142+
19143 #ifdef CONFIG_X86_SMAP
19144
19145 #define ASM_CLAC \
19146 661: ASM_NOP3 ; \
19147- .pushsection .altinstr_replacement, "ax" ; \
19148+ .pushsection .altinstr_replacement, "a" ; \
19149 662: __ASM_CLAC ; \
19150 .popsection ; \
19151 .pushsection .altinstructions, "a" ; \
19152@@ -38,7 +67,7 @@
19153
19154 #define ASM_STAC \
19155 661: ASM_NOP3 ; \
19156- .pushsection .altinstr_replacement, "ax" ; \
19157+ .pushsection .altinstr_replacement, "a" ; \
19158 662: __ASM_STAC ; \
19159 .popsection ; \
19160 .pushsection .altinstructions, "a" ; \
19161@@ -56,6 +85,37 @@
19162
19163 #include <asm/alternative.h>
19164
19165+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19166+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19167+
19168+extern void __pax_open_userland(void);
19169+static __always_inline unsigned long pax_open_userland(void)
19170+{
19171+
19172+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19173+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19174+ :
19175+ : [open] "i" (__pax_open_userland)
19176+ : "memory", "rax");
19177+#endif
19178+
19179+ return 0;
19180+}
19181+
19182+extern void __pax_close_userland(void);
19183+static __always_inline unsigned long pax_close_userland(void)
19184+{
19185+
19186+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19187+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19188+ :
19189+ : [close] "i" (__pax_close_userland)
19190+ : "memory", "rax");
19191+#endif
19192+
19193+ return 0;
19194+}
19195+
19196 #ifdef CONFIG_X86_SMAP
19197
19198 static __always_inline void clac(void)
19199diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19200index 8cd1cc3..827e09e 100644
19201--- a/arch/x86/include/asm/smp.h
19202+++ b/arch/x86/include/asm/smp.h
19203@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19204 /* cpus sharing the last level cache: */
19205 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19206 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19207-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19208+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19209
19210 static inline struct cpumask *cpu_sibling_mask(int cpu)
19211 {
19212@@ -78,7 +78,7 @@ struct smp_ops {
19213
19214 void (*send_call_func_ipi)(const struct cpumask *mask);
19215 void (*send_call_func_single_ipi)(int cpu);
19216-};
19217+} __no_const;
19218
19219 /* Globals due to paravirt */
19220 extern void set_cpu_sibling_map(int cpu);
19221@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19222 extern int safe_smp_processor_id(void);
19223
19224 #elif defined(CONFIG_X86_64_SMP)
19225-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19226-
19227-#define stack_smp_processor_id() \
19228-({ \
19229- struct thread_info *ti; \
19230- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19231- ti->cpu; \
19232-})
19233+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19234+#define stack_smp_processor_id() raw_smp_processor_id()
19235 #define safe_smp_processor_id() smp_processor_id()
19236
19237 #endif
19238diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19239index 6a99859..03cb807 100644
19240--- a/arch/x86/include/asm/stackprotector.h
19241+++ b/arch/x86/include/asm/stackprotector.h
19242@@ -47,7 +47,7 @@
19243 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19244 */
19245 #define GDT_STACK_CANARY_INIT \
19246- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19247+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19248
19249 /*
19250 * Initialize the stackprotector canary value.
19251@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19252
19253 static inline void load_stack_canary_segment(void)
19254 {
19255-#ifdef CONFIG_X86_32
19256+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19257 asm volatile ("mov %0, %%gs" : : "r" (0));
19258 #endif
19259 }
19260diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19261index 70bbe39..4ae2bd4 100644
19262--- a/arch/x86/include/asm/stacktrace.h
19263+++ b/arch/x86/include/asm/stacktrace.h
19264@@ -11,28 +11,20 @@
19265
19266 extern int kstack_depth_to_print;
19267
19268-struct thread_info;
19269+struct task_struct;
19270 struct stacktrace_ops;
19271
19272-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19273- unsigned long *stack,
19274- unsigned long bp,
19275- const struct stacktrace_ops *ops,
19276- void *data,
19277- unsigned long *end,
19278- int *graph);
19279+typedef unsigned long walk_stack_t(struct task_struct *task,
19280+ void *stack_start,
19281+ unsigned long *stack,
19282+ unsigned long bp,
19283+ const struct stacktrace_ops *ops,
19284+ void *data,
19285+ unsigned long *end,
19286+ int *graph);
19287
19288-extern unsigned long
19289-print_context_stack(struct thread_info *tinfo,
19290- unsigned long *stack, unsigned long bp,
19291- const struct stacktrace_ops *ops, void *data,
19292- unsigned long *end, int *graph);
19293-
19294-extern unsigned long
19295-print_context_stack_bp(struct thread_info *tinfo,
19296- unsigned long *stack, unsigned long bp,
19297- const struct stacktrace_ops *ops, void *data,
19298- unsigned long *end, int *graph);
19299+extern walk_stack_t print_context_stack;
19300+extern walk_stack_t print_context_stack_bp;
19301
19302 /* Generic stack tracer with callbacks */
19303
19304@@ -40,7 +32,7 @@ struct stacktrace_ops {
19305 void (*address)(void *data, unsigned long address, int reliable);
19306 /* On negative return stop dumping */
19307 int (*stack)(void *data, char *name);
19308- walk_stack_t walk_stack;
19309+ walk_stack_t *walk_stack;
19310 };
19311
19312 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19313diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19314index 751bf4b..a1278b5 100644
19315--- a/arch/x86/include/asm/switch_to.h
19316+++ b/arch/x86/include/asm/switch_to.h
19317@@ -112,7 +112,7 @@ do { \
19318 "call __switch_to\n\t" \
19319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19320 __switch_canary \
19321- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19322+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19323 "movq %%rax,%%rdi\n\t" \
19324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19325 "jnz ret_from_fork\n\t" \
19326@@ -123,7 +123,7 @@ do { \
19327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19329 [_tif_fork] "i" (_TIF_FORK), \
19330- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19331+ [thread_info] "m" (current_tinfo), \
19332 [current_task] "m" (current_task) \
19333 __switch_canary_iparam \
19334 : "memory", "cc" __EXTRA_CLOBBER)
19335diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19336index 547e344..6be1175 100644
19337--- a/arch/x86/include/asm/thread_info.h
19338+++ b/arch/x86/include/asm/thread_info.h
19339@@ -24,7 +24,6 @@ struct exec_domain;
19340 #include <linux/atomic.h>
19341
19342 struct thread_info {
19343- struct task_struct *task; /* main task structure */
19344 struct exec_domain *exec_domain; /* execution domain */
19345 __u32 flags; /* low level flags */
19346 __u32 status; /* thread synchronous flags */
19347@@ -33,13 +32,13 @@ struct thread_info {
19348 mm_segment_t addr_limit;
19349 struct restart_block restart_block;
19350 void __user *sysenter_return;
19351+ unsigned long lowest_stack;
19352 unsigned int sig_on_uaccess_error:1;
19353 unsigned int uaccess_err:1; /* uaccess failed */
19354 };
19355
19356-#define INIT_THREAD_INFO(tsk) \
19357+#define INIT_THREAD_INFO \
19358 { \
19359- .task = &tsk, \
19360 .exec_domain = &default_exec_domain, \
19361 .flags = 0, \
19362 .cpu = 0, \
19363@@ -50,7 +49,7 @@ struct thread_info {
19364 }, \
19365 }
19366
19367-#define init_thread_info (init_thread_union.thread_info)
19368+#define init_thread_info (init_thread_union.stack)
19369 #define init_stack (init_thread_union.stack)
19370
19371 #else /* !__ASSEMBLY__ */
19372@@ -91,6 +90,7 @@ struct thread_info {
19373 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19374 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19375 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19376+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19377
19378 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19379 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19380@@ -115,17 +115,18 @@ struct thread_info {
19381 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19382 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19383 #define _TIF_X32 (1 << TIF_X32)
19384+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19385
19386 /* work to do in syscall_trace_enter() */
19387 #define _TIF_WORK_SYSCALL_ENTRY \
19388 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19389 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19390- _TIF_NOHZ)
19391+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19392
19393 /* work to do in syscall_trace_leave() */
19394 #define _TIF_WORK_SYSCALL_EXIT \
19395 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19396- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19397+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19398
19399 /* work to do on interrupt/exception return */
19400 #define _TIF_WORK_MASK \
19401@@ -136,7 +137,7 @@ struct thread_info {
19402 /* work to do on any return to user space */
19403 #define _TIF_ALLWORK_MASK \
19404 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19405- _TIF_NOHZ)
19406+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19407
19408 /* Only used for 64 bit */
19409 #define _TIF_DO_NOTIFY_MASK \
19410@@ -151,7 +152,6 @@ struct thread_info {
19411 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19412
19413 #define STACK_WARN (THREAD_SIZE/8)
19414-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19415
19416 /*
19417 * macros/functions for gaining access to the thread information structure
19418@@ -162,26 +162,18 @@ struct thread_info {
19419
19420 DECLARE_PER_CPU(unsigned long, kernel_stack);
19421
19422+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19423+
19424 static inline struct thread_info *current_thread_info(void)
19425 {
19426- struct thread_info *ti;
19427- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19428- KERNEL_STACK_OFFSET - THREAD_SIZE);
19429- return ti;
19430+ return this_cpu_read_stable(current_tinfo);
19431 }
19432
19433 #else /* !__ASSEMBLY__ */
19434
19435 /* how to get the thread information struct from ASM */
19436 #define GET_THREAD_INFO(reg) \
19437- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19438- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19439-
19440-/*
19441- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19442- * a certain register (to be used in assembler memory operands).
19443- */
19444-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19445+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19446
19447 #endif
19448
19449@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19450 extern void arch_task_cache_init(void);
19451 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19452 extern void arch_release_task_struct(struct task_struct *tsk);
19453+
19454+#define __HAVE_THREAD_FUNCTIONS
19455+#define task_thread_info(task) (&(task)->tinfo)
19456+#define task_stack_page(task) ((task)->stack)
19457+#define setup_thread_stack(p, org) do {} while (0)
19458+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19459+
19460 #endif
19461 #endif /* _ASM_X86_THREAD_INFO_H */
19462diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19463index 04905bf..1178cdf 100644
19464--- a/arch/x86/include/asm/tlbflush.h
19465+++ b/arch/x86/include/asm/tlbflush.h
19466@@ -17,18 +17,44 @@
19467
19468 static inline void __native_flush_tlb(void)
19469 {
19470+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19471+ u64 descriptor[2];
19472+
19473+ descriptor[0] = PCID_KERNEL;
19474+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19475+ return;
19476+ }
19477+
19478+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19479+ if (static_cpu_has(X86_FEATURE_PCID)) {
19480+ unsigned int cpu = raw_get_cpu();
19481+
19482+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19483+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19484+ raw_put_cpu_no_resched();
19485+ return;
19486+ }
19487+#endif
19488+
19489 native_write_cr3(native_read_cr3());
19490 }
19491
19492 static inline void __native_flush_tlb_global_irq_disabled(void)
19493 {
19494- unsigned long cr4;
19495+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19496+ u64 descriptor[2];
19497
19498- cr4 = native_read_cr4();
19499- /* clear PGE */
19500- native_write_cr4(cr4 & ~X86_CR4_PGE);
19501- /* write old PGE again and flush TLBs */
19502- native_write_cr4(cr4);
19503+ descriptor[0] = PCID_KERNEL;
19504+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19505+ } else {
19506+ unsigned long cr4;
19507+
19508+ cr4 = native_read_cr4();
19509+ /* clear PGE */
19510+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19511+ /* write old PGE again and flush TLBs */
19512+ native_write_cr4(cr4);
19513+ }
19514 }
19515
19516 static inline void __native_flush_tlb_global(void)
19517@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19518
19519 static inline void __native_flush_tlb_single(unsigned long addr)
19520 {
19521+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19522+ u64 descriptor[2];
19523+
19524+ descriptor[0] = PCID_KERNEL;
19525+ descriptor[1] = addr;
19526+
19527+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19528+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19529+ if (addr < TASK_SIZE_MAX)
19530+ descriptor[1] += pax_user_shadow_base;
19531+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19532+ }
19533+
19534+ descriptor[0] = PCID_USER;
19535+ descriptor[1] = addr;
19536+#endif
19537+
19538+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19539+ return;
19540+ }
19541+
19542+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19543+ if (static_cpu_has(X86_FEATURE_PCID)) {
19544+ unsigned int cpu = raw_get_cpu();
19545+
19546+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19547+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19548+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19549+ raw_put_cpu_no_resched();
19550+
19551+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19552+ addr += pax_user_shadow_base;
19553+ }
19554+#endif
19555+
19556 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19557 }
19558
19559diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19560index 0d592e0..7430aad 100644
19561--- a/arch/x86/include/asm/uaccess.h
19562+++ b/arch/x86/include/asm/uaccess.h
19563@@ -7,6 +7,7 @@
19564 #include <linux/compiler.h>
19565 #include <linux/thread_info.h>
19566 #include <linux/string.h>
19567+#include <linux/spinlock.h>
19568 #include <asm/asm.h>
19569 #include <asm/page.h>
19570 #include <asm/smap.h>
19571@@ -29,7 +30,12 @@
19572
19573 #define get_ds() (KERNEL_DS)
19574 #define get_fs() (current_thread_info()->addr_limit)
19575+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19576+void __set_fs(mm_segment_t x);
19577+void set_fs(mm_segment_t x);
19578+#else
19579 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19580+#endif
19581
19582 #define segment_eq(a, b) ((a).seg == (b).seg)
19583
19584@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19585 * checks that the pointer is in the user space range - after calling
19586 * this function, memory access functions may still return -EFAULT.
19587 */
19588-#define access_ok(type, addr, size) \
19589- likely(!__range_not_ok(addr, size, user_addr_max()))
19590+extern int _cond_resched(void);
19591+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19592+#define access_ok(type, addr, size) \
19593+({ \
19594+ unsigned long __size = size; \
19595+ unsigned long __addr = (unsigned long)addr; \
19596+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19597+ if (__ret_ao && __size) { \
19598+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19599+ unsigned long __end_ao = __addr + __size - 1; \
19600+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19601+ while (__addr_ao <= __end_ao) { \
19602+ char __c_ao; \
19603+ __addr_ao += PAGE_SIZE; \
19604+ if (__size > PAGE_SIZE) \
19605+ _cond_resched(); \
19606+ if (__get_user(__c_ao, (char __user *)__addr)) \
19607+ break; \
19608+ if (type != VERIFY_WRITE) { \
19609+ __addr = __addr_ao; \
19610+ continue; \
19611+ } \
19612+ if (__put_user(__c_ao, (char __user *)__addr)) \
19613+ break; \
19614+ __addr = __addr_ao; \
19615+ } \
19616+ } \
19617+ } \
19618+ __ret_ao; \
19619+})
19620
19621 /*
19622 * The exception table consists of pairs of addresses relative to the
19623@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19624 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19625 __chk_user_ptr(ptr); \
19626 might_fault(); \
19627+ pax_open_userland(); \
19628 asm volatile("call __get_user_%P3" \
19629 : "=a" (__ret_gu), "=r" (__val_gu) \
19630 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19631 (x) = (__typeof__(*(ptr))) __val_gu; \
19632+ pax_close_userland(); \
19633 __ret_gu; \
19634 })
19635
19636@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19637 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19638 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19639
19640-
19641+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19642+#define __copyuser_seg "gs;"
19643+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19644+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19645+#else
19646+#define __copyuser_seg
19647+#define __COPYUSER_SET_ES
19648+#define __COPYUSER_RESTORE_ES
19649+#endif
19650
19651 #ifdef CONFIG_X86_32
19652 #define __put_user_asm_u64(x, addr, err, errret) \
19653 asm volatile(ASM_STAC "\n" \
19654- "1: movl %%eax,0(%2)\n" \
19655- "2: movl %%edx,4(%2)\n" \
19656+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19657+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19658 "3: " ASM_CLAC "\n" \
19659 ".section .fixup,\"ax\"\n" \
19660 "4: movl %3,%0\n" \
19661@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19662
19663 #define __put_user_asm_ex_u64(x, addr) \
19664 asm volatile(ASM_STAC "\n" \
19665- "1: movl %%eax,0(%1)\n" \
19666- "2: movl %%edx,4(%1)\n" \
19667+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19668+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19669 "3: " ASM_CLAC "\n" \
19670 _ASM_EXTABLE_EX(1b, 2b) \
19671 _ASM_EXTABLE_EX(2b, 3b) \
19672@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19673 __typeof__(*(ptr)) __pu_val; \
19674 __chk_user_ptr(ptr); \
19675 might_fault(); \
19676- __pu_val = x; \
19677+ __pu_val = (x); \
19678+ pax_open_userland(); \
19679 switch (sizeof(*(ptr))) { \
19680 case 1: \
19681 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19682@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19683 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19684 break; \
19685 } \
19686+ pax_close_userland(); \
19687 __ret_pu; \
19688 })
19689
19690@@ -355,8 +401,10 @@ do { \
19691 } while (0)
19692
19693 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19694+do { \
19695+ pax_open_userland(); \
19696 asm volatile(ASM_STAC "\n" \
19697- "1: mov"itype" %2,%"rtype"1\n" \
19698+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19699 "2: " ASM_CLAC "\n" \
19700 ".section .fixup,\"ax\"\n" \
19701 "3: mov %3,%0\n" \
19702@@ -364,8 +412,10 @@ do { \
19703 " jmp 2b\n" \
19704 ".previous\n" \
19705 _ASM_EXTABLE(1b, 3b) \
19706- : "=r" (err), ltype(x) \
19707- : "m" (__m(addr)), "i" (errret), "0" (err))
19708+ : "=r" (err), ltype (x) \
19709+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19710+ pax_close_userland(); \
19711+} while (0)
19712
19713 #define __get_user_size_ex(x, ptr, size) \
19714 do { \
19715@@ -389,7 +439,7 @@ do { \
19716 } while (0)
19717
19718 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19719- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19720+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19721 "2:\n" \
19722 _ASM_EXTABLE_EX(1b, 2b) \
19723 : ltype(x) : "m" (__m(addr)))
19724@@ -406,13 +456,24 @@ do { \
19725 int __gu_err; \
19726 unsigned long __gu_val; \
19727 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19728- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19729+ (x) = (__typeof__(*(ptr)))__gu_val; \
19730 __gu_err; \
19731 })
19732
19733 /* FIXME: this hack is definitely wrong -AK */
19734 struct __large_struct { unsigned long buf[100]; };
19735-#define __m(x) (*(struct __large_struct __user *)(x))
19736+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19737+#define ____m(x) \
19738+({ \
19739+ unsigned long ____x = (unsigned long)(x); \
19740+ if (____x < pax_user_shadow_base) \
19741+ ____x += pax_user_shadow_base; \
19742+ (typeof(x))____x; \
19743+})
19744+#else
19745+#define ____m(x) (x)
19746+#endif
19747+#define __m(x) (*(struct __large_struct __user *)____m(x))
19748
19749 /*
19750 * Tell gcc we read from memory instead of writing: this is because
19751@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
19752 * aliasing issues.
19753 */
19754 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19755+do { \
19756+ pax_open_userland(); \
19757 asm volatile(ASM_STAC "\n" \
19758- "1: mov"itype" %"rtype"1,%2\n" \
19759+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19760 "2: " ASM_CLAC "\n" \
19761 ".section .fixup,\"ax\"\n" \
19762 "3: mov %3,%0\n" \
19763@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
19764 ".previous\n" \
19765 _ASM_EXTABLE(1b, 3b) \
19766 : "=r"(err) \
19767- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19768+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19769+ pax_close_userland(); \
19770+} while (0)
19771
19772 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19773- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19774+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19775 "2:\n" \
19776 _ASM_EXTABLE_EX(1b, 2b) \
19777 : : ltype(x), "m" (__m(addr)))
19778@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
19779 */
19780 #define uaccess_try do { \
19781 current_thread_info()->uaccess_err = 0; \
19782+ pax_open_userland(); \
19783 stac(); \
19784 barrier();
19785
19786 #define uaccess_catch(err) \
19787 clac(); \
19788+ pax_close_userland(); \
19789 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19790 } while (0)
19791
19792@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
19793 * On error, the variable @x is set to zero.
19794 */
19795
19796+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19797+#define __get_user(x, ptr) get_user((x), (ptr))
19798+#else
19799 #define __get_user(x, ptr) \
19800 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19801+#endif
19802
19803 /**
19804 * __put_user: - Write a simple value into user space, with less checking.
19805@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
19806 * Returns zero on success, or -EFAULT on error.
19807 */
19808
19809+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19810+#define __put_user(x, ptr) put_user((x), (ptr))
19811+#else
19812 #define __put_user(x, ptr) \
19813 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19814+#endif
19815
19816 #define __get_user_unaligned __get_user
19817 #define __put_user_unaligned __put_user
19818@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
19819 #define get_user_ex(x, ptr) do { \
19820 unsigned long __gue_val; \
19821 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19822- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19823+ (x) = (__typeof__(*(ptr)))__gue_val; \
19824 } while (0)
19825
19826 #define put_user_try uaccess_try
19827@@ -531,7 +606,7 @@ extern __must_check long strlen_user(const char __user *str);
19828 extern __must_check long strnlen_user(const char __user *str, long n);
19829
19830 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19831-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19832+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19833
19834 extern void __cmpxchg_wrong_size(void)
19835 __compiletime_error("Bad argument size for cmpxchg");
19836@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
19837 __typeof__(ptr) __uval = (uval); \
19838 __typeof__(*(ptr)) __old = (old); \
19839 __typeof__(*(ptr)) __new = (new); \
19840+ pax_open_userland(); \
19841 switch (size) { \
19842 case 1: \
19843 { \
19844 asm volatile("\t" ASM_STAC "\n" \
19845- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19846+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19847 "2:\t" ASM_CLAC "\n" \
19848 "\t.section .fixup, \"ax\"\n" \
19849 "3:\tmov %3, %0\n" \
19850 "\tjmp 2b\n" \
19851 "\t.previous\n" \
19852 _ASM_EXTABLE(1b, 3b) \
19853- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19854+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19855 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19856 : "memory" \
19857 ); \
19858@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
19859 case 2: \
19860 { \
19861 asm volatile("\t" ASM_STAC "\n" \
19862- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19863+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19864 "2:\t" ASM_CLAC "\n" \
19865 "\t.section .fixup, \"ax\"\n" \
19866 "3:\tmov %3, %0\n" \
19867 "\tjmp 2b\n" \
19868 "\t.previous\n" \
19869 _ASM_EXTABLE(1b, 3b) \
19870- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19871+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19872 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19873 : "memory" \
19874 ); \
19875@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
19876 case 4: \
19877 { \
19878 asm volatile("\t" ASM_STAC "\n" \
19879- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19880+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19881 "2:\t" ASM_CLAC "\n" \
19882 "\t.section .fixup, \"ax\"\n" \
19883 "3:\tmov %3, %0\n" \
19884 "\tjmp 2b\n" \
19885 "\t.previous\n" \
19886 _ASM_EXTABLE(1b, 3b) \
19887- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19888+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19889 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19890 : "memory" \
19891 ); \
19892@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
19893 __cmpxchg_wrong_size(); \
19894 \
19895 asm volatile("\t" ASM_STAC "\n" \
19896- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19897+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19898 "2:\t" ASM_CLAC "\n" \
19899 "\t.section .fixup, \"ax\"\n" \
19900 "3:\tmov %3, %0\n" \
19901 "\tjmp 2b\n" \
19902 "\t.previous\n" \
19903 _ASM_EXTABLE(1b, 3b) \
19904- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19905+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19906 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19907 : "memory" \
19908 ); \
19909@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
19910 default: \
19911 __cmpxchg_wrong_size(); \
19912 } \
19913+ pax_close_userland(); \
19914 *__uval = __old; \
19915 __ret; \
19916 })
19917@@ -636,17 +713,6 @@ extern struct movsl_mask {
19918
19919 #define ARCH_HAS_NOCACHE_UACCESS 1
19920
19921-#ifdef CONFIG_X86_32
19922-# include <asm/uaccess_32.h>
19923-#else
19924-# include <asm/uaccess_64.h>
19925-#endif
19926-
19927-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19928- unsigned n);
19929-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19930- unsigned n);
19931-
19932 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19933 # define copy_user_diag __compiletime_error
19934 #else
19935@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19936 extern void copy_user_diag("copy_from_user() buffer size is too small")
19937 copy_from_user_overflow(void);
19938 extern void copy_user_diag("copy_to_user() buffer size is too small")
19939-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19940+copy_to_user_overflow(void);
19941
19942 #undef copy_user_diag
19943
19944@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19945
19946 extern void
19947 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19948-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19949+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19950 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19951
19952 #else
19953@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19954
19955 #endif
19956
19957+#ifdef CONFIG_X86_32
19958+# include <asm/uaccess_32.h>
19959+#else
19960+# include <asm/uaccess_64.h>
19961+#endif
19962+
19963 static inline unsigned long __must_check
19964 copy_from_user(void *to, const void __user *from, unsigned long n)
19965 {
19966- int sz = __compiletime_object_size(to);
19967+ size_t sz = __compiletime_object_size(to);
19968
19969 might_fault();
19970
19971@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19972 * case, and do only runtime checking for non-constant sizes.
19973 */
19974
19975- if (likely(sz < 0 || sz >= n))
19976- n = _copy_from_user(to, from, n);
19977- else if(__builtin_constant_p(n))
19978- copy_from_user_overflow();
19979- else
19980- __copy_from_user_overflow(sz, n);
19981+ if (likely(sz != (size_t)-1 && sz < n)) {
19982+ if(__builtin_constant_p(n))
19983+ copy_from_user_overflow();
19984+ else
19985+ __copy_from_user_overflow(sz, n);
19986+ } else if (access_ok(VERIFY_READ, from, n))
19987+ n = __copy_from_user(to, from, n);
19988+ else if ((long)n > 0)
19989+ memset(to, 0, n);
19990
19991 return n;
19992 }
19993@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19994 static inline unsigned long __must_check
19995 copy_to_user(void __user *to, const void *from, unsigned long n)
19996 {
19997- int sz = __compiletime_object_size(from);
19998+ size_t sz = __compiletime_object_size(from);
19999
20000 might_fault();
20001
20002 /* See the comment in copy_from_user() above. */
20003- if (likely(sz < 0 || sz >= n))
20004- n = _copy_to_user(to, from, n);
20005- else if(__builtin_constant_p(n))
20006- copy_to_user_overflow();
20007- else
20008- __copy_to_user_overflow(sz, n);
20009+ if (likely(sz != (size_t)-1 && sz < n)) {
20010+ if(__builtin_constant_p(n))
20011+ copy_to_user_overflow();
20012+ else
20013+ __copy_to_user_overflow(sz, n);
20014+ } else if (access_ok(VERIFY_WRITE, to, n))
20015+ n = __copy_to_user(to, from, n);
20016
20017 return n;
20018 }
20019diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20020index 3c03a5d..edb68ae 100644
20021--- a/arch/x86/include/asm/uaccess_32.h
20022+++ b/arch/x86/include/asm/uaccess_32.h
20023@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20024 * anything, so this is accurate.
20025 */
20026
20027-static __always_inline unsigned long __must_check
20028+static __always_inline __size_overflow(3) unsigned long __must_check
20029 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20030 {
20031+ if ((long)n < 0)
20032+ return n;
20033+
20034+ check_object_size(from, n, true);
20035+
20036 if (__builtin_constant_p(n)) {
20037 unsigned long ret;
20038
20039@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20040 __copy_to_user(void __user *to, const void *from, unsigned long n)
20041 {
20042 might_fault();
20043+
20044 return __copy_to_user_inatomic(to, from, n);
20045 }
20046
20047-static __always_inline unsigned long
20048+static __always_inline __size_overflow(3) unsigned long
20049 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20050 {
20051+ if ((long)n < 0)
20052+ return n;
20053+
20054 /* Avoid zeroing the tail if the copy fails..
20055 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20056 * but as the zeroing behaviour is only significant when n is not
20057@@ -137,6 +146,12 @@ static __always_inline unsigned long
20058 __copy_from_user(void *to, const void __user *from, unsigned long n)
20059 {
20060 might_fault();
20061+
20062+ if ((long)n < 0)
20063+ return n;
20064+
20065+ check_object_size(to, n, false);
20066+
20067 if (__builtin_constant_p(n)) {
20068 unsigned long ret;
20069
20070@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20071 const void __user *from, unsigned long n)
20072 {
20073 might_fault();
20074+
20075+ if ((long)n < 0)
20076+ return n;
20077+
20078 if (__builtin_constant_p(n)) {
20079 unsigned long ret;
20080
20081@@ -181,7 +200,10 @@ static __always_inline unsigned long
20082 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20083 unsigned long n)
20084 {
20085- return __copy_from_user_ll_nocache_nozero(to, from, n);
20086+ if ((long)n < 0)
20087+ return n;
20088+
20089+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20090 }
20091
20092 #endif /* _ASM_X86_UACCESS_32_H */
20093diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20094index 12a26b9..c36fff5 100644
20095--- a/arch/x86/include/asm/uaccess_64.h
20096+++ b/arch/x86/include/asm/uaccess_64.h
20097@@ -10,6 +10,9 @@
20098 #include <asm/alternative.h>
20099 #include <asm/cpufeature.h>
20100 #include <asm/page.h>
20101+#include <asm/pgtable.h>
20102+
20103+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20104
20105 /*
20106 * Copy To/From Userspace
20107@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20108 __must_check unsigned long
20109 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20110
20111-static __always_inline __must_check unsigned long
20112-copy_user_generic(void *to, const void *from, unsigned len)
20113+static __always_inline __must_check __size_overflow(3) unsigned long
20114+copy_user_generic(void *to, const void *from, unsigned long len)
20115 {
20116 unsigned ret;
20117
20118@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20119 }
20120
20121 __must_check unsigned long
20122-copy_in_user(void __user *to, const void __user *from, unsigned len);
20123+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20124
20125 static __always_inline __must_check
20126-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20127+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20128 {
20129- int ret = 0;
20130+ size_t sz = __compiletime_object_size(dst);
20131+ unsigned ret = 0;
20132+
20133+ if (size > INT_MAX)
20134+ return size;
20135+
20136+ check_object_size(dst, size, false);
20137+
20138+#ifdef CONFIG_PAX_MEMORY_UDEREF
20139+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20140+ return size;
20141+#endif
20142+
20143+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20144+ if(__builtin_constant_p(size))
20145+ copy_from_user_overflow();
20146+ else
20147+ __copy_from_user_overflow(sz, size);
20148+ return size;
20149+ }
20150
20151 if (!__builtin_constant_p(size))
20152- return copy_user_generic(dst, (__force void *)src, size);
20153+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20154 switch (size) {
20155- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20156+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20157 ret, "b", "b", "=q", 1);
20158 return ret;
20159- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20160+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20161 ret, "w", "w", "=r", 2);
20162 return ret;
20163- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20164+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20165 ret, "l", "k", "=r", 4);
20166 return ret;
20167- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20168+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20169 ret, "q", "", "=r", 8);
20170 return ret;
20171 case 10:
20172- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20173+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20174 ret, "q", "", "=r", 10);
20175 if (unlikely(ret))
20176 return ret;
20177 __get_user_asm(*(u16 *)(8 + (char *)dst),
20178- (u16 __user *)(8 + (char __user *)src),
20179+ (const u16 __user *)(8 + (const char __user *)src),
20180 ret, "w", "w", "=r", 2);
20181 return ret;
20182 case 16:
20183- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20184+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20185 ret, "q", "", "=r", 16);
20186 if (unlikely(ret))
20187 return ret;
20188 __get_user_asm(*(u64 *)(8 + (char *)dst),
20189- (u64 __user *)(8 + (char __user *)src),
20190+ (const u64 __user *)(8 + (const char __user *)src),
20191 ret, "q", "", "=r", 8);
20192 return ret;
20193 default:
20194- return copy_user_generic(dst, (__force void *)src, size);
20195+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20196 }
20197 }
20198
20199 static __always_inline __must_check
20200-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20201+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20202 {
20203 might_fault();
20204 return __copy_from_user_nocheck(dst, src, size);
20205 }
20206
20207 static __always_inline __must_check
20208-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20209+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20210 {
20211- int ret = 0;
20212+ size_t sz = __compiletime_object_size(src);
20213+ unsigned ret = 0;
20214+
20215+ if (size > INT_MAX)
20216+ return size;
20217+
20218+ check_object_size(src, size, true);
20219+
20220+#ifdef CONFIG_PAX_MEMORY_UDEREF
20221+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20222+ return size;
20223+#endif
20224+
20225+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20226+ if(__builtin_constant_p(size))
20227+ copy_to_user_overflow();
20228+ else
20229+ __copy_to_user_overflow(sz, size);
20230+ return size;
20231+ }
20232
20233 if (!__builtin_constant_p(size))
20234- return copy_user_generic((__force void *)dst, src, size);
20235+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20236 switch (size) {
20237- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20238+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20239 ret, "b", "b", "iq", 1);
20240 return ret;
20241- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20242+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20243 ret, "w", "w", "ir", 2);
20244 return ret;
20245- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20246+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20247 ret, "l", "k", "ir", 4);
20248 return ret;
20249- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20250+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20251 ret, "q", "", "er", 8);
20252 return ret;
20253 case 10:
20254- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20255+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20256 ret, "q", "", "er", 10);
20257 if (unlikely(ret))
20258 return ret;
20259 asm("":::"memory");
20260- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20261+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20262 ret, "w", "w", "ir", 2);
20263 return ret;
20264 case 16:
20265- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20266+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20267 ret, "q", "", "er", 16);
20268 if (unlikely(ret))
20269 return ret;
20270 asm("":::"memory");
20271- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20272+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20273 ret, "q", "", "er", 8);
20274 return ret;
20275 default:
20276- return copy_user_generic((__force void *)dst, src, size);
20277+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20278 }
20279 }
20280
20281 static __always_inline __must_check
20282-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20283+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20284 {
20285 might_fault();
20286 return __copy_to_user_nocheck(dst, src, size);
20287 }
20288
20289 static __always_inline __must_check
20290-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20291+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20292 {
20293- int ret = 0;
20294+ unsigned ret = 0;
20295
20296 might_fault();
20297+
20298+ if (size > INT_MAX)
20299+ return size;
20300+
20301+#ifdef CONFIG_PAX_MEMORY_UDEREF
20302+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20303+ return size;
20304+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20305+ return size;
20306+#endif
20307+
20308 if (!__builtin_constant_p(size))
20309- return copy_user_generic((__force void *)dst,
20310- (__force void *)src, size);
20311+ return copy_user_generic((__force_kernel void *)____m(dst),
20312+ (__force_kernel const void *)____m(src), size);
20313 switch (size) {
20314 case 1: {
20315 u8 tmp;
20316- __get_user_asm(tmp, (u8 __user *)src,
20317+ __get_user_asm(tmp, (const u8 __user *)src,
20318 ret, "b", "b", "=q", 1);
20319 if (likely(!ret))
20320 __put_user_asm(tmp, (u8 __user *)dst,
20321@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20322 }
20323 case 2: {
20324 u16 tmp;
20325- __get_user_asm(tmp, (u16 __user *)src,
20326+ __get_user_asm(tmp, (const u16 __user *)src,
20327 ret, "w", "w", "=r", 2);
20328 if (likely(!ret))
20329 __put_user_asm(tmp, (u16 __user *)dst,
20330@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20331
20332 case 4: {
20333 u32 tmp;
20334- __get_user_asm(tmp, (u32 __user *)src,
20335+ __get_user_asm(tmp, (const u32 __user *)src,
20336 ret, "l", "k", "=r", 4);
20337 if (likely(!ret))
20338 __put_user_asm(tmp, (u32 __user *)dst,
20339@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20340 }
20341 case 8: {
20342 u64 tmp;
20343- __get_user_asm(tmp, (u64 __user *)src,
20344+ __get_user_asm(tmp, (const u64 __user *)src,
20345 ret, "q", "", "=r", 8);
20346 if (likely(!ret))
20347 __put_user_asm(tmp, (u64 __user *)dst,
20348@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20349 return ret;
20350 }
20351 default:
20352- return copy_user_generic((__force void *)dst,
20353- (__force void *)src, size);
20354+ return copy_user_generic((__force_kernel void *)____m(dst),
20355+ (__force_kernel const void *)____m(src), size);
20356 }
20357 }
20358
20359-static __must_check __always_inline int
20360-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20361+static __must_check __always_inline unsigned long
20362+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20363 {
20364 return __copy_from_user_nocheck(dst, src, size);
20365 }
20366
20367-static __must_check __always_inline int
20368-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20369+static __must_check __always_inline unsigned long
20370+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20371 {
20372 return __copy_to_user_nocheck(dst, src, size);
20373 }
20374
20375-extern long __copy_user_nocache(void *dst, const void __user *src,
20376- unsigned size, int zerorest);
20377+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20378+ unsigned long size, int zerorest);
20379
20380-static inline int
20381-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20382+static inline unsigned long
20383+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20384 {
20385 might_fault();
20386+
20387+ if (size > INT_MAX)
20388+ return size;
20389+
20390+#ifdef CONFIG_PAX_MEMORY_UDEREF
20391+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20392+ return size;
20393+#endif
20394+
20395 return __copy_user_nocache(dst, src, size, 1);
20396 }
20397
20398-static inline int
20399+static inline unsigned long
20400 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20401- unsigned size)
20402+ unsigned long size)
20403 {
20404+ if (size > INT_MAX)
20405+ return size;
20406+
20407+#ifdef CONFIG_PAX_MEMORY_UDEREF
20408+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20409+ return size;
20410+#endif
20411+
20412 return __copy_user_nocache(dst, src, size, 0);
20413 }
20414
20415 unsigned long
20416-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20417+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20418
20419 #endif /* _ASM_X86_UACCESS_64_H */
20420diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20421index 5b238981..77fdd78 100644
20422--- a/arch/x86/include/asm/word-at-a-time.h
20423+++ b/arch/x86/include/asm/word-at-a-time.h
20424@@ -11,7 +11,7 @@
20425 * and shift, for example.
20426 */
20427 struct word_at_a_time {
20428- const unsigned long one_bits, high_bits;
20429+ unsigned long one_bits, high_bits;
20430 };
20431
20432 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20433diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20434index f58a9c7..dc378042a 100644
20435--- a/arch/x86/include/asm/x86_init.h
20436+++ b/arch/x86/include/asm/x86_init.h
20437@@ -129,7 +129,7 @@ struct x86_init_ops {
20438 struct x86_init_timers timers;
20439 struct x86_init_iommu iommu;
20440 struct x86_init_pci pci;
20441-};
20442+} __no_const;
20443
20444 /**
20445 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20446@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20447 void (*setup_percpu_clockev)(void);
20448 void (*early_percpu_clock_init)(void);
20449 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20450-};
20451+} __no_const;
20452
20453 struct timespec;
20454
20455@@ -168,7 +168,7 @@ struct x86_platform_ops {
20456 void (*save_sched_clock_state)(void);
20457 void (*restore_sched_clock_state)(void);
20458 void (*apic_post_init)(void);
20459-};
20460+} __no_const;
20461
20462 struct pci_dev;
20463 struct msi_msg;
20464@@ -182,7 +182,7 @@ struct x86_msi_ops {
20465 void (*teardown_msi_irqs)(struct pci_dev *dev);
20466 void (*restore_msi_irqs)(struct pci_dev *dev);
20467 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20468-};
20469+} __no_const;
20470
20471 struct IO_APIC_route_entry;
20472 struct io_apic_irq_attr;
20473@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20474 unsigned int destination, int vector,
20475 struct io_apic_irq_attr *attr);
20476 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20477-};
20478+} __no_const;
20479
20480 extern struct x86_init_ops x86_init;
20481 extern struct x86_cpuinit_ops x86_cpuinit;
20482diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20483index 5eea099..ff7ef8d 100644
20484--- a/arch/x86/include/asm/xen/page.h
20485+++ b/arch/x86/include/asm/xen/page.h
20486@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20487 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20488 * cases needing an extended handling.
20489 */
20490-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20491+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20492 {
20493 unsigned long mfn;
20494
20495diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20496index 5fa9770..2b49d6c 100644
20497--- a/arch/x86/include/asm/xsave.h
20498+++ b/arch/x86/include/asm/xsave.h
20499@@ -229,12 +229,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20500 if (unlikely(err))
20501 return -EFAULT;
20502
20503+ pax_open_userland();
20504 __asm__ __volatile__(ASM_STAC "\n"
20505- "1:"XSAVE"\n"
20506+ "1:"
20507+ __copyuser_seg
20508+ XSAVE"\n"
20509 "2: " ASM_CLAC "\n"
20510 xstate_fault
20511 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20512 : "memory");
20513+ pax_close_userland();
20514 return err;
20515 }
20516
20517@@ -244,16 +248,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20518 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20519 {
20520 int err = 0;
20521- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20522+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20523 u32 lmask = mask;
20524 u32 hmask = mask >> 32;
20525
20526+ pax_open_userland();
20527 __asm__ __volatile__(ASM_STAC "\n"
20528- "1:"XRSTOR"\n"
20529+ "1:"
20530+ __copyuser_seg
20531+ XRSTOR"\n"
20532 "2: " ASM_CLAC "\n"
20533 xstate_fault
20534 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20535 : "memory"); /* memory required? */
20536+ pax_close_userland();
20537 return err;
20538 }
20539
20540diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20541index d993e33..8db1b18 100644
20542--- a/arch/x86/include/uapi/asm/e820.h
20543+++ b/arch/x86/include/uapi/asm/e820.h
20544@@ -58,7 +58,7 @@ struct e820map {
20545 #define ISA_START_ADDRESS 0xa0000
20546 #define ISA_END_ADDRESS 0x100000
20547
20548-#define BIOS_BEGIN 0x000a0000
20549+#define BIOS_BEGIN 0x000c0000
20550 #define BIOS_END 0x00100000
20551
20552 #define BIOS_ROM_BASE 0xffe00000
20553diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20554index 7b0a55a..ad115bf 100644
20555--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20556+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20557@@ -49,7 +49,6 @@
20558 #define EFLAGS 144
20559 #define RSP 152
20560 #define SS 160
20561-#define ARGOFFSET R11
20562 #endif /* __ASSEMBLY__ */
20563
20564 /* top of stack page */
20565diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20566index 5d4502c..a567e09 100644
20567--- a/arch/x86/kernel/Makefile
20568+++ b/arch/x86/kernel/Makefile
20569@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20570 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20571 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20572 obj-y += probe_roms.o
20573-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20574+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20575 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20576 obj-$(CONFIG_X86_64) += mcount_64.o
20577 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20578diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20579index b5ddc96..490b4e4 100644
20580--- a/arch/x86/kernel/acpi/boot.c
20581+++ b/arch/x86/kernel/acpi/boot.c
20582@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20583 * If your system is blacklisted here, but you find that acpi=force
20584 * works for you, please contact linux-acpi@vger.kernel.org
20585 */
20586-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20587+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20588 /*
20589 * Boxes that need ACPI disabled
20590 */
20591@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20592 };
20593
20594 /* second table for DMI checks that should run after early-quirks */
20595-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20596+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20597 /*
20598 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20599 * which includes some code which overrides all temperature
20600diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20601index 3136820..e2c6577 100644
20602--- a/arch/x86/kernel/acpi/sleep.c
20603+++ b/arch/x86/kernel/acpi/sleep.c
20604@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20605 #else /* CONFIG_64BIT */
20606 #ifdef CONFIG_SMP
20607 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20608+
20609+ pax_open_kernel();
20610 early_gdt_descr.address =
20611 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20612+ pax_close_kernel();
20613+
20614 initial_gs = per_cpu_offset(smp_processor_id());
20615 #endif
20616 initial_code = (unsigned long)wakeup_long64;
20617diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20618index 665c6b7..eae4d56 100644
20619--- a/arch/x86/kernel/acpi/wakeup_32.S
20620+++ b/arch/x86/kernel/acpi/wakeup_32.S
20621@@ -29,13 +29,11 @@ wakeup_pmode_return:
20622 # and restore the stack ... but you need gdt for this to work
20623 movl saved_context_esp, %esp
20624
20625- movl %cs:saved_magic, %eax
20626- cmpl $0x12345678, %eax
20627+ cmpl $0x12345678, saved_magic
20628 jne bogus_magic
20629
20630 # jump to place where we left off
20631- movl saved_eip, %eax
20632- jmp *%eax
20633+ jmp *(saved_eip)
20634
20635 bogus_magic:
20636 jmp bogus_magic
20637diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20638index 703130f..27a155d 100644
20639--- a/arch/x86/kernel/alternative.c
20640+++ b/arch/x86/kernel/alternative.c
20641@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20642 */
20643 for (a = start; a < end; a++) {
20644 instr = (u8 *)&a->instr_offset + a->instr_offset;
20645+
20646+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20647+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20648+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20649+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20650+#endif
20651+
20652 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20653 BUG_ON(a->replacementlen > a->instrlen);
20654 BUG_ON(a->instrlen > sizeof(insnbuf));
20655@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20656 add_nops(insnbuf + a->replacementlen,
20657 a->instrlen - a->replacementlen);
20658
20659+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20660+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20661+ instr = ktva_ktla(instr);
20662+#endif
20663+
20664 text_poke_early(instr, insnbuf, a->instrlen);
20665 }
20666 }
20667@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20668 for (poff = start; poff < end; poff++) {
20669 u8 *ptr = (u8 *)poff + *poff;
20670
20671+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20672+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20673+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20674+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20675+#endif
20676+
20677 if (!*poff || ptr < text || ptr >= text_end)
20678 continue;
20679 /* turn DS segment override prefix into lock prefix */
20680- if (*ptr == 0x3e)
20681+ if (*ktla_ktva(ptr) == 0x3e)
20682 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20683 }
20684 mutex_unlock(&text_mutex);
20685@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20686 for (poff = start; poff < end; poff++) {
20687 u8 *ptr = (u8 *)poff + *poff;
20688
20689+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20690+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20691+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20692+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20693+#endif
20694+
20695 if (!*poff || ptr < text || ptr >= text_end)
20696 continue;
20697 /* turn lock prefix into DS segment override prefix */
20698- if (*ptr == 0xf0)
20699+ if (*ktla_ktva(ptr) == 0xf0)
20700 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20701 }
20702 mutex_unlock(&text_mutex);
20703@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20704
20705 BUG_ON(p->len > MAX_PATCH_LEN);
20706 /* prep the buffer with the original instructions */
20707- memcpy(insnbuf, p->instr, p->len);
20708+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20709 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20710 (unsigned long)p->instr, p->len);
20711
20712@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20713 if (!uniproc_patched || num_possible_cpus() == 1)
20714 free_init_pages("SMP alternatives",
20715 (unsigned long)__smp_locks,
20716- (unsigned long)__smp_locks_end);
20717+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20718 #endif
20719
20720 apply_paravirt(__parainstructions, __parainstructions_end);
20721@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20722 * instructions. And on the local CPU you need to be protected again NMI or MCE
20723 * handlers seeing an inconsistent instruction while you patch.
20724 */
20725-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20726+void *__kprobes text_poke_early(void *addr, const void *opcode,
20727 size_t len)
20728 {
20729 unsigned long flags;
20730 local_irq_save(flags);
20731- memcpy(addr, opcode, len);
20732+
20733+ pax_open_kernel();
20734+ memcpy(ktla_ktva(addr), opcode, len);
20735 sync_core();
20736+ pax_close_kernel();
20737+
20738 local_irq_restore(flags);
20739 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20740 that causes hangs on some VIA CPUs. */
20741@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20742 */
20743 void *text_poke(void *addr, const void *opcode, size_t len)
20744 {
20745- unsigned long flags;
20746- char *vaddr;
20747+ unsigned char *vaddr = ktla_ktva(addr);
20748 struct page *pages[2];
20749- int i;
20750+ size_t i;
20751
20752 if (!core_kernel_text((unsigned long)addr)) {
20753- pages[0] = vmalloc_to_page(addr);
20754- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20755+ pages[0] = vmalloc_to_page(vaddr);
20756+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20757 } else {
20758- pages[0] = virt_to_page(addr);
20759+ pages[0] = virt_to_page(vaddr);
20760 WARN_ON(!PageReserved(pages[0]));
20761- pages[1] = virt_to_page(addr + PAGE_SIZE);
20762+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20763 }
20764 BUG_ON(!pages[0]);
20765- local_irq_save(flags);
20766- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20767- if (pages[1])
20768- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20769- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20770- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20771- clear_fixmap(FIX_TEXT_POKE0);
20772- if (pages[1])
20773- clear_fixmap(FIX_TEXT_POKE1);
20774- local_flush_tlb();
20775- sync_core();
20776- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20777- that causes hangs on some VIA CPUs. */
20778+ text_poke_early(addr, opcode, len);
20779 for (i = 0; i < len; i++)
20780- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20781- local_irq_restore(flags);
20782+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20783 return addr;
20784 }
20785
20786@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20787 if (likely(!bp_patching_in_progress))
20788 return 0;
20789
20790- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20791+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20792 return 0;
20793
20794 /* set up the specified breakpoint handler */
20795@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20796 */
20797 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20798 {
20799- unsigned char int3 = 0xcc;
20800+ const unsigned char int3 = 0xcc;
20801
20802 bp_int3_handler = handler;
20803 bp_int3_addr = (u8 *)addr + sizeof(int3);
20804diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20805index 29b5b18..3bdfc29 100644
20806--- a/arch/x86/kernel/apic/apic.c
20807+++ b/arch/x86/kernel/apic/apic.c
20808@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20809 /*
20810 * Debug level, exported for io_apic.c
20811 */
20812-unsigned int apic_verbosity;
20813+int apic_verbosity;
20814
20815 int pic_mode;
20816
20817@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20818 apic_write(APIC_ESR, 0);
20819 v = apic_read(APIC_ESR);
20820 ack_APIC_irq();
20821- atomic_inc(&irq_err_count);
20822+ atomic_inc_unchecked(&irq_err_count);
20823
20824 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20825 smp_processor_id(), v);
20826diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20827index de918c4..32eed23 100644
20828--- a/arch/x86/kernel/apic/apic_flat_64.c
20829+++ b/arch/x86/kernel/apic/apic_flat_64.c
20830@@ -154,7 +154,7 @@ static int flat_probe(void)
20831 return 1;
20832 }
20833
20834-static struct apic apic_flat = {
20835+static struct apic apic_flat __read_only = {
20836 .name = "flat",
20837 .probe = flat_probe,
20838 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20839@@ -260,7 +260,7 @@ static int physflat_probe(void)
20840 return 0;
20841 }
20842
20843-static struct apic apic_physflat = {
20844+static struct apic apic_physflat __read_only = {
20845
20846 .name = "physical flat",
20847 .probe = physflat_probe,
20848diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20849index b205cdb..d8503ff 100644
20850--- a/arch/x86/kernel/apic/apic_noop.c
20851+++ b/arch/x86/kernel/apic/apic_noop.c
20852@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20853 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20854 }
20855
20856-struct apic apic_noop = {
20857+struct apic apic_noop __read_only = {
20858 .name = "noop",
20859 .probe = noop_probe,
20860 .acpi_madt_oem_check = NULL,
20861diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20862index c4a8d63..fe893ac 100644
20863--- a/arch/x86/kernel/apic/bigsmp_32.c
20864+++ b/arch/x86/kernel/apic/bigsmp_32.c
20865@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20866 return dmi_bigsmp;
20867 }
20868
20869-static struct apic apic_bigsmp = {
20870+static struct apic apic_bigsmp __read_only = {
20871
20872 .name = "bigsmp",
20873 .probe = probe_bigsmp,
20874diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20875index 3f5f604..309c0e6 100644
20876--- a/arch/x86/kernel/apic/io_apic.c
20877+++ b/arch/x86/kernel/apic/io_apic.c
20878@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20879 return ret;
20880 }
20881
20882-atomic_t irq_mis_count;
20883+atomic_unchecked_t irq_mis_count;
20884
20885 #ifdef CONFIG_GENERIC_PENDING_IRQ
20886 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20887@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
20888 * at the cpu.
20889 */
20890 if (!(v & (1 << (i & 0x1f)))) {
20891- atomic_inc(&irq_mis_count);
20892+ atomic_inc_unchecked(&irq_mis_count);
20893
20894 eoi_ioapic_irq(irq, cfg);
20895 }
20896diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20897index bda4886..f9c7195 100644
20898--- a/arch/x86/kernel/apic/probe_32.c
20899+++ b/arch/x86/kernel/apic/probe_32.c
20900@@ -72,7 +72,7 @@ static int probe_default(void)
20901 return 1;
20902 }
20903
20904-static struct apic apic_default = {
20905+static struct apic apic_default __read_only = {
20906
20907 .name = "default",
20908 .probe = probe_default,
20909diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20910index 6cedd79..023ff8e 100644
20911--- a/arch/x86/kernel/apic/vector.c
20912+++ b/arch/x86/kernel/apic/vector.c
20913@@ -21,7 +21,7 @@
20914
20915 static DEFINE_RAW_SPINLOCK(vector_lock);
20916
20917-void lock_vector_lock(void)
20918+void lock_vector_lock(void) __acquires(vector_lock)
20919 {
20920 /* Used to the online set of cpus does not change
20921 * during assign_irq_vector.
20922@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20923 raw_spin_lock(&vector_lock);
20924 }
20925
20926-void unlock_vector_lock(void)
20927+void unlock_vector_lock(void) __releases(vector_lock)
20928 {
20929 raw_spin_unlock(&vector_lock);
20930 }
20931diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20932index e658f21..b695a1a 100644
20933--- a/arch/x86/kernel/apic/x2apic_cluster.c
20934+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20935@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20936 return notifier_from_errno(err);
20937 }
20938
20939-static struct notifier_block __refdata x2apic_cpu_notifier = {
20940+static struct notifier_block x2apic_cpu_notifier = {
20941 .notifier_call = update_clusterinfo,
20942 };
20943
20944@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20945 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20946 }
20947
20948-static struct apic apic_x2apic_cluster = {
20949+static struct apic apic_x2apic_cluster __read_only = {
20950
20951 .name = "cluster x2apic",
20952 .probe = x2apic_cluster_probe,
20953diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20954index 6fae733..5ca17af 100644
20955--- a/arch/x86/kernel/apic/x2apic_phys.c
20956+++ b/arch/x86/kernel/apic/x2apic_phys.c
20957@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20958 return apic == &apic_x2apic_phys;
20959 }
20960
20961-static struct apic apic_x2apic_phys = {
20962+static struct apic apic_x2apic_phys __read_only = {
20963
20964 .name = "physical x2apic",
20965 .probe = x2apic_phys_probe,
20966diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20967index 8e9dcfd..c61b3e4 100644
20968--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20969+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20970@@ -348,7 +348,7 @@ static int uv_probe(void)
20971 return apic == &apic_x2apic_uv_x;
20972 }
20973
20974-static struct apic __refdata apic_x2apic_uv_x = {
20975+static struct apic apic_x2apic_uv_x __read_only = {
20976
20977 .name = "UV large system",
20978 .probe = uv_probe,
20979diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20980index 927ec92..0dc3bd4 100644
20981--- a/arch/x86/kernel/apm_32.c
20982+++ b/arch/x86/kernel/apm_32.c
20983@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20984 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20985 * even though they are called in protected mode.
20986 */
20987-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20988+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20989 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20990
20991 static const char driver_version[] = "1.16ac"; /* no spaces */
20992@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20993 BUG_ON(cpu != 0);
20994 gdt = get_cpu_gdt_table(cpu);
20995 save_desc_40 = gdt[0x40 / 8];
20996+
20997+ pax_open_kernel();
20998 gdt[0x40 / 8] = bad_bios_desc;
20999+ pax_close_kernel();
21000
21001 apm_irq_save(flags);
21002 APM_DO_SAVE_SEGS;
21003@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21004 &call->esi);
21005 APM_DO_RESTORE_SEGS;
21006 apm_irq_restore(flags);
21007+
21008+ pax_open_kernel();
21009 gdt[0x40 / 8] = save_desc_40;
21010+ pax_close_kernel();
21011+
21012 put_cpu();
21013
21014 return call->eax & 0xff;
21015@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21016 BUG_ON(cpu != 0);
21017 gdt = get_cpu_gdt_table(cpu);
21018 save_desc_40 = gdt[0x40 / 8];
21019+
21020+ pax_open_kernel();
21021 gdt[0x40 / 8] = bad_bios_desc;
21022+ pax_close_kernel();
21023
21024 apm_irq_save(flags);
21025 APM_DO_SAVE_SEGS;
21026@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21027 &call->eax);
21028 APM_DO_RESTORE_SEGS;
21029 apm_irq_restore(flags);
21030+
21031+ pax_open_kernel();
21032 gdt[0x40 / 8] = save_desc_40;
21033+ pax_close_kernel();
21034+
21035 put_cpu();
21036 return error;
21037 }
21038@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21039 * code to that CPU.
21040 */
21041 gdt = get_cpu_gdt_table(0);
21042+
21043+ pax_open_kernel();
21044 set_desc_base(&gdt[APM_CS >> 3],
21045 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21046 set_desc_base(&gdt[APM_CS_16 >> 3],
21047 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21048 set_desc_base(&gdt[APM_DS >> 3],
21049 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21050+ pax_close_kernel();
21051
21052 proc_create("apm", 0, NULL, &apm_file_ops);
21053
21054diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21055index 9f6b934..cf5ffb3 100644
21056--- a/arch/x86/kernel/asm-offsets.c
21057+++ b/arch/x86/kernel/asm-offsets.c
21058@@ -32,6 +32,8 @@ void common(void) {
21059 OFFSET(TI_flags, thread_info, flags);
21060 OFFSET(TI_status, thread_info, status);
21061 OFFSET(TI_addr_limit, thread_info, addr_limit);
21062+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21063+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21064
21065 BLANK();
21066 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21067@@ -52,8 +54,26 @@ void common(void) {
21068 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21069 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21070 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21071+
21072+#ifdef CONFIG_PAX_KERNEXEC
21073+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21074 #endif
21075
21076+#ifdef CONFIG_PAX_MEMORY_UDEREF
21077+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21078+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21079+#ifdef CONFIG_X86_64
21080+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21081+#endif
21082+#endif
21083+
21084+#endif
21085+
21086+ BLANK();
21087+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21088+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21089+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21090+
21091 #ifdef CONFIG_XEN
21092 BLANK();
21093 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21094diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21095index fdcbb4d..036dd93 100644
21096--- a/arch/x86/kernel/asm-offsets_64.c
21097+++ b/arch/x86/kernel/asm-offsets_64.c
21098@@ -80,6 +80,7 @@ int main(void)
21099 BLANK();
21100 #undef ENTRY
21101
21102+ DEFINE(TSS_size, sizeof(struct tss_struct));
21103 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21104 BLANK();
21105
21106diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21107index 80091ae..0c5184f 100644
21108--- a/arch/x86/kernel/cpu/Makefile
21109+++ b/arch/x86/kernel/cpu/Makefile
21110@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21111 CFLAGS_REMOVE_perf_event.o = -pg
21112 endif
21113
21114-# Make sure load_percpu_segment has no stackprotector
21115-nostackp := $(call cc-option, -fno-stack-protector)
21116-CFLAGS_common.o := $(nostackp)
21117-
21118 obj-y := intel_cacheinfo.o scattered.o topology.o
21119 obj-y += common.o
21120 obj-y += rdrand.o
21121diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21122index 15c5df9..d9a604a 100644
21123--- a/arch/x86/kernel/cpu/amd.c
21124+++ b/arch/x86/kernel/cpu/amd.c
21125@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21126 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21127 {
21128 /* AMD errata T13 (order #21922) */
21129- if ((c->x86 == 6)) {
21130+ if (c->x86 == 6) {
21131 /* Duron Rev A0 */
21132 if (c->x86_model == 3 && c->x86_mask == 0)
21133 size = 64;
21134diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21135index c604965..0b0e28a 100644
21136--- a/arch/x86/kernel/cpu/common.c
21137+++ b/arch/x86/kernel/cpu/common.c
21138@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21139
21140 static const struct cpu_dev *this_cpu = &default_cpu;
21141
21142-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21143-#ifdef CONFIG_X86_64
21144- /*
21145- * We need valid kernel segments for data and code in long mode too
21146- * IRET will check the segment types kkeil 2000/10/28
21147- * Also sysret mandates a special GDT layout
21148- *
21149- * TLS descriptors are currently at a different place compared to i386.
21150- * Hopefully nobody expects them at a fixed place (Wine?)
21151- */
21152- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21153- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21154- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21155- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21156- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21157- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21158-#else
21159- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21160- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21161- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21162- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21163- /*
21164- * Segments used for calling PnP BIOS have byte granularity.
21165- * They code segments and data segments have fixed 64k limits,
21166- * the transfer segment sizes are set at run time.
21167- */
21168- /* 32-bit code */
21169- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21170- /* 16-bit code */
21171- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21172- /* 16-bit data */
21173- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21174- /* 16-bit data */
21175- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21176- /* 16-bit data */
21177- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21178- /*
21179- * The APM segments have byte granularity and their bases
21180- * are set at run time. All have 64k limits.
21181- */
21182- /* 32-bit code */
21183- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21184- /* 16-bit code */
21185- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21186- /* data */
21187- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21188-
21189- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21190- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21191- GDT_STACK_CANARY_INIT
21192-#endif
21193-} };
21194-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21195-
21196 static int __init x86_xsave_setup(char *s)
21197 {
21198 if (strlen(s))
21199@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21200 }
21201 }
21202
21203+#ifdef CONFIG_X86_64
21204+static __init int setup_disable_pcid(char *arg)
21205+{
21206+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21207+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21208+
21209+#ifdef CONFIG_PAX_MEMORY_UDEREF
21210+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21211+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21212+#endif
21213+
21214+ return 1;
21215+}
21216+__setup("nopcid", setup_disable_pcid);
21217+
21218+static void setup_pcid(struct cpuinfo_x86 *c)
21219+{
21220+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21221+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21222+
21223+#ifdef CONFIG_PAX_MEMORY_UDEREF
21224+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21225+ pax_open_kernel();
21226+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21227+ pax_close_kernel();
21228+ printk("PAX: slow and weak UDEREF enabled\n");
21229+ } else
21230+ printk("PAX: UDEREF disabled\n");
21231+#endif
21232+
21233+ return;
21234+ }
21235+
21236+ printk("PAX: PCID detected\n");
21237+ set_in_cr4(X86_CR4_PCIDE);
21238+
21239+#ifdef CONFIG_PAX_MEMORY_UDEREF
21240+ pax_open_kernel();
21241+ clone_pgd_mask = ~(pgdval_t)0UL;
21242+ pax_close_kernel();
21243+ if (pax_user_shadow_base)
21244+ printk("PAX: weak UDEREF enabled\n");
21245+ else {
21246+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21247+ printk("PAX: strong UDEREF enabled\n");
21248+ }
21249+#endif
21250+
21251+ if (cpu_has(c, X86_FEATURE_INVPCID))
21252+ printk("PAX: INVPCID detected\n");
21253+}
21254+#endif
21255+
21256 /*
21257 * Some CPU features depend on higher CPUID levels, which may not always
21258 * be available due to CPUID level capping or broken virtualization
21259@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
21260 {
21261 struct desc_ptr gdt_descr;
21262
21263- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21264+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21265 gdt_descr.size = GDT_SIZE - 1;
21266 load_gdt(&gdt_descr);
21267 /* Reload the per-cpu base */
21268@@ -895,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21269 setup_smep(c);
21270 setup_smap(c);
21271
21272+#ifdef CONFIG_X86_64
21273+ setup_pcid(c);
21274+#endif
21275+
21276 /*
21277 * The vendor-specific functions might have changed features.
21278 * Now we do "generic changes."
21279@@ -903,6 +906,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21280 /* Filter out anything that depends on CPUID levels we don't have */
21281 filter_cpuid_features(c, true);
21282
21283+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21284+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21285+#endif
21286+
21287 /* If the model name is still unset, do table lookup. */
21288 if (!c->x86_model_id[0]) {
21289 const char *p;
21290@@ -977,7 +984,7 @@ static void syscall32_cpu_init(void)
21291 void enable_sep_cpu(void)
21292 {
21293 int cpu = get_cpu();
21294- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21295+ struct tss_struct *tss = init_tss + cpu;
21296
21297 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21298 put_cpu();
21299@@ -1115,14 +1122,16 @@ static __init int setup_disablecpuid(char *arg)
21300 }
21301 __setup("clearcpuid=", setup_disablecpuid);
21302
21303+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21304+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21305+
21306 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21307- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21308+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21309 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21310
21311 #ifdef CONFIG_X86_64
21312-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21313-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21314- (unsigned long) debug_idt_table };
21315+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21316+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21317
21318 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21319 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21320@@ -1299,7 +1308,7 @@ void cpu_init(void)
21321 */
21322 load_ucode_ap();
21323
21324- t = &per_cpu(init_tss, cpu);
21325+ t = init_tss + cpu;
21326 oist = &per_cpu(orig_ist, cpu);
21327
21328 #ifdef CONFIG_NUMA
21329@@ -1331,7 +1340,6 @@ void cpu_init(void)
21330 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21331 barrier();
21332
21333- x86_configure_nx();
21334 enable_x2apic();
21335
21336 /*
21337@@ -1383,7 +1391,7 @@ void cpu_init(void)
21338 {
21339 int cpu = smp_processor_id();
21340 struct task_struct *curr = current;
21341- struct tss_struct *t = &per_cpu(init_tss, cpu);
21342+ struct tss_struct *t = init_tss + cpu;
21343 struct thread_struct *thread = &curr->thread;
21344
21345 wait_for_master_cpu(cpu);
21346diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21347index c703507..28535e3 100644
21348--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21349+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21350@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21351 };
21352
21353 #ifdef CONFIG_AMD_NB
21354+static struct attribute *default_attrs_amd_nb[] = {
21355+ &type.attr,
21356+ &level.attr,
21357+ &coherency_line_size.attr,
21358+ &physical_line_partition.attr,
21359+ &ways_of_associativity.attr,
21360+ &number_of_sets.attr,
21361+ &size.attr,
21362+ &shared_cpu_map.attr,
21363+ &shared_cpu_list.attr,
21364+ NULL,
21365+ NULL,
21366+ NULL,
21367+ NULL
21368+};
21369+
21370 static struct attribute **amd_l3_attrs(void)
21371 {
21372 static struct attribute **attrs;
21373@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21374
21375 n = ARRAY_SIZE(default_attrs);
21376
21377- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21378- n += 2;
21379-
21380- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21381- n += 1;
21382-
21383- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21384- if (attrs == NULL)
21385- return attrs = default_attrs;
21386-
21387- for (n = 0; default_attrs[n]; n++)
21388- attrs[n] = default_attrs[n];
21389+ attrs = default_attrs_amd_nb;
21390
21391 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21392 attrs[n++] = &cache_disable_0.attr;
21393@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21394 .default_attrs = default_attrs,
21395 };
21396
21397+#ifdef CONFIG_AMD_NB
21398+static struct kobj_type ktype_cache_amd_nb = {
21399+ .sysfs_ops = &sysfs_ops,
21400+ .default_attrs = default_attrs_amd_nb,
21401+};
21402+#endif
21403+
21404 static struct kobj_type ktype_percpu_entry = {
21405 .sysfs_ops = &sysfs_ops,
21406 };
21407@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21408 return retval;
21409 }
21410
21411+#ifdef CONFIG_AMD_NB
21412+ amd_l3_attrs();
21413+#endif
21414+
21415 for (i = 0; i < num_cache_leaves; i++) {
21416+ struct kobj_type *ktype;
21417+
21418 this_object = INDEX_KOBJECT_PTR(cpu, i);
21419 this_object->cpu = cpu;
21420 this_object->index = i;
21421
21422 this_leaf = CPUID4_INFO_IDX(cpu, i);
21423
21424- ktype_cache.default_attrs = default_attrs;
21425+ ktype = &ktype_cache;
21426 #ifdef CONFIG_AMD_NB
21427 if (this_leaf->base.nb)
21428- ktype_cache.default_attrs = amd_l3_attrs();
21429+ ktype = &ktype_cache_amd_nb;
21430 #endif
21431 retval = kobject_init_and_add(&(this_object->kobj),
21432- &ktype_cache,
21433+ ktype,
21434 per_cpu(ici_cache_kobject, cpu),
21435 "index%1lu", i);
21436 if (unlikely(retval)) {
21437diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21438index d2c6116..62fd7aa 100644
21439--- a/arch/x86/kernel/cpu/mcheck/mce.c
21440+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21441@@ -45,6 +45,7 @@
21442 #include <asm/processor.h>
21443 #include <asm/mce.h>
21444 #include <asm/msr.h>
21445+#include <asm/local.h>
21446
21447 #include "mce-internal.h"
21448
21449@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21450 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21451 m->cs, m->ip);
21452
21453- if (m->cs == __KERNEL_CS)
21454+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21455 print_symbol("{%s}", m->ip);
21456 pr_cont("\n");
21457 }
21458@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21459
21460 #define PANIC_TIMEOUT 5 /* 5 seconds */
21461
21462-static atomic_t mce_panicked;
21463+static atomic_unchecked_t mce_panicked;
21464
21465 static int fake_panic;
21466-static atomic_t mce_fake_panicked;
21467+static atomic_unchecked_t mce_fake_panicked;
21468
21469 /* Panic in progress. Enable interrupts and wait for final IPI */
21470 static void wait_for_panic(void)
21471@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21472 /*
21473 * Make sure only one CPU runs in machine check panic
21474 */
21475- if (atomic_inc_return(&mce_panicked) > 1)
21476+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21477 wait_for_panic();
21478 barrier();
21479
21480@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21481 console_verbose();
21482 } else {
21483 /* Don't log too much for fake panic */
21484- if (atomic_inc_return(&mce_fake_panicked) > 1)
21485+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21486 return;
21487 }
21488 /* First print corrected ones that are still unlogged */
21489@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21490 if (!fake_panic) {
21491 if (panic_timeout == 0)
21492 panic_timeout = mca_cfg.panic_timeout;
21493- panic(msg);
21494+ panic("%s", msg);
21495 } else
21496 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21497 }
21498@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
21499 * might have been modified by someone else.
21500 */
21501 rmb();
21502- if (atomic_read(&mce_panicked))
21503+ if (atomic_read_unchecked(&mce_panicked))
21504 wait_for_panic();
21505 if (!mca_cfg.monarch_timeout)
21506 goto out;
21507@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21508 }
21509
21510 /* Call the installed machine check handler for this CPU setup. */
21511-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21512+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21513 unexpected_machine_check;
21514
21515 /*
21516@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21517 return;
21518 }
21519
21520+ pax_open_kernel();
21521 machine_check_vector = do_machine_check;
21522+ pax_close_kernel();
21523
21524 __mcheck_cpu_init_generic();
21525 __mcheck_cpu_init_vendor(c);
21526@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21527 */
21528
21529 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21530-static int mce_chrdev_open_count; /* #times opened */
21531+static local_t mce_chrdev_open_count; /* #times opened */
21532 static int mce_chrdev_open_exclu; /* already open exclusive? */
21533
21534 static int mce_chrdev_open(struct inode *inode, struct file *file)
21535@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21536 spin_lock(&mce_chrdev_state_lock);
21537
21538 if (mce_chrdev_open_exclu ||
21539- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21540+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21541 spin_unlock(&mce_chrdev_state_lock);
21542
21543 return -EBUSY;
21544@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21545
21546 if (file->f_flags & O_EXCL)
21547 mce_chrdev_open_exclu = 1;
21548- mce_chrdev_open_count++;
21549+ local_inc(&mce_chrdev_open_count);
21550
21551 spin_unlock(&mce_chrdev_state_lock);
21552
21553@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21554 {
21555 spin_lock(&mce_chrdev_state_lock);
21556
21557- mce_chrdev_open_count--;
21558+ local_dec(&mce_chrdev_open_count);
21559 mce_chrdev_open_exclu = 0;
21560
21561 spin_unlock(&mce_chrdev_state_lock);
21562@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
21563
21564 for (i = 0; i < mca_cfg.banks; i++) {
21565 struct mce_bank *b = &mce_banks[i];
21566- struct device_attribute *a = &b->attr;
21567+ device_attribute_no_const *a = &b->attr;
21568
21569 sysfs_attr_init(&a->attr);
21570 a->attr.name = b->attrname;
21571@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
21572 static void mce_reset(void)
21573 {
21574 cpu_missing = 0;
21575- atomic_set(&mce_fake_panicked, 0);
21576+ atomic_set_unchecked(&mce_fake_panicked, 0);
21577 atomic_set(&mce_executing, 0);
21578 atomic_set(&mce_callin, 0);
21579 atomic_set(&global_nwo, 0);
21580diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21581index a304298..49b6d06 100644
21582--- a/arch/x86/kernel/cpu/mcheck/p5.c
21583+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21584@@ -10,6 +10,7 @@
21585 #include <asm/processor.h>
21586 #include <asm/mce.h>
21587 #include <asm/msr.h>
21588+#include <asm/pgtable.h>
21589
21590 /* By default disabled */
21591 int mce_p5_enabled __read_mostly;
21592@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21593 if (!cpu_has(c, X86_FEATURE_MCE))
21594 return;
21595
21596+ pax_open_kernel();
21597 machine_check_vector = pentium_machine_check;
21598+ pax_close_kernel();
21599 /* Make sure the vector pointer is visible before we enable MCEs: */
21600 wmb();
21601
21602diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21603index 7dc5564..1273569 100644
21604--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21605+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21606@@ -9,6 +9,7 @@
21607 #include <asm/processor.h>
21608 #include <asm/mce.h>
21609 #include <asm/msr.h>
21610+#include <asm/pgtable.h>
21611
21612 /* Machine check handler for WinChip C6: */
21613 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21614@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21615 {
21616 u32 lo, hi;
21617
21618+ pax_open_kernel();
21619 machine_check_vector = winchip_machine_check;
21620+ pax_close_kernel();
21621 /* Make sure the vector pointer is visible before we enable MCEs: */
21622 wmb();
21623
21624diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21625index 36a8361..e7058c2 100644
21626--- a/arch/x86/kernel/cpu/microcode/core.c
21627+++ b/arch/x86/kernel/cpu/microcode/core.c
21628@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21629 return NOTIFY_OK;
21630 }
21631
21632-static struct notifier_block __refdata mc_cpu_notifier = {
21633+static struct notifier_block mc_cpu_notifier = {
21634 .notifier_call = mc_cpu_callback,
21635 };
21636
21637diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21638index c6826d1..8dc677e 100644
21639--- a/arch/x86/kernel/cpu/microcode/intel.c
21640+++ b/arch/x86/kernel/cpu/microcode/intel.c
21641@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
21642 struct microcode_header_intel mc_header;
21643 unsigned int mc_size;
21644
21645+ if (leftover < sizeof(mc_header)) {
21646+ pr_err("error! Truncated header in microcode data file\n");
21647+ break;
21648+ }
21649+
21650 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
21651 break;
21652
21653@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21654
21655 static int get_ucode_user(void *to, const void *from, size_t n)
21656 {
21657- return copy_from_user(to, from, n);
21658+ return copy_from_user(to, (const void __force_user *)from, n);
21659 }
21660
21661 static enum ucode_state
21662 request_microcode_user(int cpu, const void __user *buf, size_t size)
21663 {
21664- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21665+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21666 }
21667
21668 static void microcode_fini_cpu(int cpu)
21669diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
21670index ec9df6f..420eb93 100644
21671--- a/arch/x86/kernel/cpu/microcode/intel_early.c
21672+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
21673@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
21674 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
21675 int i;
21676
21677- while (leftover) {
21678+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
21679+
21680+ if (leftover < sizeof(mc_header))
21681+ break;
21682+
21683 mc_header = (struct microcode_header_intel *)ucode_ptr;
21684
21685 mc_size = get_totalsize(mc_header);
21686diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21687index ea5f363..cb0e905 100644
21688--- a/arch/x86/kernel/cpu/mtrr/main.c
21689+++ b/arch/x86/kernel/cpu/mtrr/main.c
21690@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21691 u64 size_or_mask, size_and_mask;
21692 static bool mtrr_aps_delayed_init;
21693
21694-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21695+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21696
21697 const struct mtrr_ops *mtrr_if;
21698
21699diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21700index df5e41f..816c719 100644
21701--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21702+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21703@@ -25,7 +25,7 @@ struct mtrr_ops {
21704 int (*validate_add_page)(unsigned long base, unsigned long size,
21705 unsigned int type);
21706 int (*have_wrcomb)(void);
21707-};
21708+} __do_const;
21709
21710 extern int generic_get_free_region(unsigned long base, unsigned long size,
21711 int replace_reg);
21712diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21713index 143e5f5..5825081 100644
21714--- a/arch/x86/kernel/cpu/perf_event.c
21715+++ b/arch/x86/kernel/cpu/perf_event.c
21716@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
21717
21718 }
21719
21720-static struct attribute_group x86_pmu_format_group = {
21721+static attribute_group_no_const x86_pmu_format_group = {
21722 .name = "format",
21723 .attrs = NULL,
21724 };
21725@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
21726 NULL,
21727 };
21728
21729-static struct attribute_group x86_pmu_events_group = {
21730+static attribute_group_no_const x86_pmu_events_group = {
21731 .name = "events",
21732 .attrs = events_attr,
21733 };
21734@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
21735 if (idx > GDT_ENTRIES)
21736 return 0;
21737
21738- desc = raw_cpu_ptr(gdt_page.gdt);
21739+ desc = get_cpu_gdt_table(smp_processor_id());
21740 }
21741
21742 return get_desc_base(desc + idx);
21743@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21744 break;
21745
21746 perf_callchain_store(entry, frame.return_address);
21747- fp = frame.next_frame;
21748+ fp = (const void __force_user *)frame.next_frame;
21749 }
21750 }
21751
21752diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21753index 97242a9..cf9c30e 100644
21754--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21755+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21756@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21757 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21758 {
21759 struct attribute **attrs;
21760- struct attribute_group *attr_group;
21761+ attribute_group_no_const *attr_group;
21762 int i = 0, j;
21763
21764 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21765diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21766index 498b6d9..4126515 100644
21767--- a/arch/x86/kernel/cpu/perf_event_intel.c
21768+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21769@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21770 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21771
21772 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21773- u64 capabilities;
21774+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21775
21776- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21777- x86_pmu.intel_cap.capabilities = capabilities;
21778+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21779+ x86_pmu.intel_cap.capabilities = capabilities;
21780 }
21781
21782 intel_ds_init();
21783diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21784index c4bb8b8..9f7384d 100644
21785--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21786+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21787@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21788 NULL,
21789 };
21790
21791-static struct attribute_group rapl_pmu_events_group = {
21792+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21793 .name = "events",
21794 .attrs = NULL, /* patched at runtime */
21795 };
21796diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21797index c635b8b..b78835e 100644
21798--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21799+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21800@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21801 static int __init uncore_type_init(struct intel_uncore_type *type)
21802 {
21803 struct intel_uncore_pmu *pmus;
21804- struct attribute_group *attr_group;
21805+ attribute_group_no_const *attr_group;
21806 struct attribute **attrs;
21807 int i, j;
21808
21809diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21810index 6c8c1e7..515b98a 100644
21811--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21812+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21813@@ -114,7 +114,7 @@ struct intel_uncore_box {
21814 struct uncore_event_desc {
21815 struct kobj_attribute attr;
21816 const char *config;
21817-};
21818+} __do_const;
21819
21820 ssize_t uncore_event_show(struct kobject *kobj,
21821 struct kobj_attribute *attr, char *buf);
21822diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21823index 83741a7..bd3507d 100644
21824--- a/arch/x86/kernel/cpuid.c
21825+++ b/arch/x86/kernel/cpuid.c
21826@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21827 return notifier_from_errno(err);
21828 }
21829
21830-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21831+static struct notifier_block cpuid_class_cpu_notifier =
21832 {
21833 .notifier_call = cpuid_class_cpu_callback,
21834 };
21835diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21836index aceb2f9..c76d3e3 100644
21837--- a/arch/x86/kernel/crash.c
21838+++ b/arch/x86/kernel/crash.c
21839@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21840 #ifdef CONFIG_X86_32
21841 struct pt_regs fixed_regs;
21842
21843- if (!user_mode_vm(regs)) {
21844+ if (!user_mode(regs)) {
21845 crash_fixup_ss_esp(&fixed_regs, regs);
21846 regs = &fixed_regs;
21847 }
21848diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21849index afa64ad..dce67dd 100644
21850--- a/arch/x86/kernel/crash_dump_64.c
21851+++ b/arch/x86/kernel/crash_dump_64.c
21852@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21853 return -ENOMEM;
21854
21855 if (userbuf) {
21856- if (copy_to_user(buf, vaddr + offset, csize)) {
21857+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21858 iounmap(vaddr);
21859 return -EFAULT;
21860 }
21861diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21862index f6dfd93..892ade4 100644
21863--- a/arch/x86/kernel/doublefault.c
21864+++ b/arch/x86/kernel/doublefault.c
21865@@ -12,7 +12,7 @@
21866
21867 #define DOUBLEFAULT_STACKSIZE (1024)
21868 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21869-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21870+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21871
21872 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21873
21874@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21875 unsigned long gdt, tss;
21876
21877 native_store_gdt(&gdt_desc);
21878- gdt = gdt_desc.address;
21879+ gdt = (unsigned long)gdt_desc.address;
21880
21881 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21882
21883@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21884 /* 0x2 bit is always set */
21885 .flags = X86_EFLAGS_SF | 0x2,
21886 .sp = STACK_START,
21887- .es = __USER_DS,
21888+ .es = __KERNEL_DS,
21889 .cs = __KERNEL_CS,
21890 .ss = __KERNEL_DS,
21891- .ds = __USER_DS,
21892+ .ds = __KERNEL_DS,
21893 .fs = __KERNEL_PERCPU,
21894
21895 .__cr3 = __pa_nodebug(swapper_pg_dir),
21896diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21897index b74ebc7..2c95874 100644
21898--- a/arch/x86/kernel/dumpstack.c
21899+++ b/arch/x86/kernel/dumpstack.c
21900@@ -2,6 +2,9 @@
21901 * Copyright (C) 1991, 1992 Linus Torvalds
21902 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21903 */
21904+#ifdef CONFIG_GRKERNSEC_HIDESYM
21905+#define __INCLUDED_BY_HIDESYM 1
21906+#endif
21907 #include <linux/kallsyms.h>
21908 #include <linux/kprobes.h>
21909 #include <linux/uaccess.h>
21910@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21911
21912 void printk_address(unsigned long address)
21913 {
21914- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21915+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21916 }
21917
21918 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21919 static void
21920 print_ftrace_graph_addr(unsigned long addr, void *data,
21921 const struct stacktrace_ops *ops,
21922- struct thread_info *tinfo, int *graph)
21923+ struct task_struct *task, int *graph)
21924 {
21925- struct task_struct *task;
21926 unsigned long ret_addr;
21927 int index;
21928
21929 if (addr != (unsigned long)return_to_handler)
21930 return;
21931
21932- task = tinfo->task;
21933 index = task->curr_ret_stack;
21934
21935 if (!task->ret_stack || index < *graph)
21936@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21937 static inline void
21938 print_ftrace_graph_addr(unsigned long addr, void *data,
21939 const struct stacktrace_ops *ops,
21940- struct thread_info *tinfo, int *graph)
21941+ struct task_struct *task, int *graph)
21942 { }
21943 #endif
21944
21945@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21946 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21947 */
21948
21949-static inline int valid_stack_ptr(struct thread_info *tinfo,
21950- void *p, unsigned int size, void *end)
21951+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21952 {
21953- void *t = tinfo;
21954 if (end) {
21955 if (p < end && p >= (end-THREAD_SIZE))
21956 return 1;
21957@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21958 }
21959
21960 unsigned long
21961-print_context_stack(struct thread_info *tinfo,
21962+print_context_stack(struct task_struct *task, void *stack_start,
21963 unsigned long *stack, unsigned long bp,
21964 const struct stacktrace_ops *ops, void *data,
21965 unsigned long *end, int *graph)
21966 {
21967 struct stack_frame *frame = (struct stack_frame *)bp;
21968
21969- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21970+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21971 unsigned long addr;
21972
21973 addr = *stack;
21974@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21975 } else {
21976 ops->address(data, addr, 0);
21977 }
21978- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21979+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21980 }
21981 stack++;
21982 }
21983@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21984 EXPORT_SYMBOL_GPL(print_context_stack);
21985
21986 unsigned long
21987-print_context_stack_bp(struct thread_info *tinfo,
21988+print_context_stack_bp(struct task_struct *task, void *stack_start,
21989 unsigned long *stack, unsigned long bp,
21990 const struct stacktrace_ops *ops, void *data,
21991 unsigned long *end, int *graph)
21992@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21993 struct stack_frame *frame = (struct stack_frame *)bp;
21994 unsigned long *ret_addr = &frame->return_address;
21995
21996- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21997+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21998 unsigned long addr = *ret_addr;
21999
22000 if (!__kernel_text_address(addr))
22001@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22002 ops->address(data, addr, 1);
22003 frame = frame->next_frame;
22004 ret_addr = &frame->return_address;
22005- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22006+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22007 }
22008
22009 return (unsigned long)frame;
22010@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22011 static void print_trace_address(void *data, unsigned long addr, int reliable)
22012 {
22013 touch_nmi_watchdog();
22014- printk(data);
22015+ printk("%s", (char *)data);
22016 printk_stack_address(addr, reliable);
22017 }
22018
22019@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22020 EXPORT_SYMBOL_GPL(oops_begin);
22021 NOKPROBE_SYMBOL(oops_begin);
22022
22023+extern void gr_handle_kernel_exploit(void);
22024+
22025 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22026 {
22027 if (regs && kexec_should_crash(current))
22028@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22029 panic("Fatal exception in interrupt");
22030 if (panic_on_oops)
22031 panic("Fatal exception");
22032- do_exit(signr);
22033+
22034+ gr_handle_kernel_exploit();
22035+
22036+ do_group_exit(signr);
22037 }
22038 NOKPROBE_SYMBOL(oops_end);
22039
22040@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22041 print_modules();
22042 show_regs(regs);
22043 #ifdef CONFIG_X86_32
22044- if (user_mode_vm(regs)) {
22045+ if (user_mode(regs)) {
22046 sp = regs->sp;
22047 ss = regs->ss & 0xffff;
22048 } else {
22049@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22050 unsigned long flags = oops_begin();
22051 int sig = SIGSEGV;
22052
22053- if (!user_mode_vm(regs))
22054+ if (!user_mode(regs))
22055 report_bug(regs->ip, regs);
22056
22057 if (__die(str, regs, err))
22058diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22059index 5abd4cd..c65733b 100644
22060--- a/arch/x86/kernel/dumpstack_32.c
22061+++ b/arch/x86/kernel/dumpstack_32.c
22062@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22063 bp = stack_frame(task, regs);
22064
22065 for (;;) {
22066- struct thread_info *context;
22067+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22068 void *end_stack;
22069
22070 end_stack = is_hardirq_stack(stack, cpu);
22071 if (!end_stack)
22072 end_stack = is_softirq_stack(stack, cpu);
22073
22074- context = task_thread_info(task);
22075- bp = ops->walk_stack(context, stack, bp, ops, data,
22076+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22077 end_stack, &graph);
22078
22079 /* Stop if not on irq stack */
22080@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22081 int i;
22082
22083 show_regs_print_info(KERN_EMERG);
22084- __show_regs(regs, !user_mode_vm(regs));
22085+ __show_regs(regs, !user_mode(regs));
22086
22087 /*
22088 * When in-kernel, we also print out the stack and code at the
22089 * time of the fault..
22090 */
22091- if (!user_mode_vm(regs)) {
22092+ if (!user_mode(regs)) {
22093 unsigned int code_prologue = code_bytes * 43 / 64;
22094 unsigned int code_len = code_bytes;
22095 unsigned char c;
22096 u8 *ip;
22097+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22098
22099 pr_emerg("Stack:\n");
22100 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22101
22102 pr_emerg("Code:");
22103
22104- ip = (u8 *)regs->ip - code_prologue;
22105+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22106 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22107 /* try starting at IP */
22108- ip = (u8 *)regs->ip;
22109+ ip = (u8 *)regs->ip + cs_base;
22110 code_len = code_len - code_prologue + 1;
22111 }
22112 for (i = 0; i < code_len; i++, ip++) {
22113@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22114 pr_cont(" Bad EIP value.");
22115 break;
22116 }
22117- if (ip == (u8 *)regs->ip)
22118+ if (ip == (u8 *)regs->ip + cs_base)
22119 pr_cont(" <%02x>", c);
22120 else
22121 pr_cont(" %02x", c);
22122@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22123 {
22124 unsigned short ud2;
22125
22126+ ip = ktla_ktva(ip);
22127 if (ip < PAGE_OFFSET)
22128 return 0;
22129 if (probe_kernel_address((unsigned short *)ip, ud2))
22130@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22131
22132 return ud2 == 0x0b0f;
22133 }
22134+
22135+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22136+void pax_check_alloca(unsigned long size)
22137+{
22138+ unsigned long sp = (unsigned long)&sp, stack_left;
22139+
22140+ /* all kernel stacks are of the same size */
22141+ stack_left = sp & (THREAD_SIZE - 1);
22142+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22143+}
22144+EXPORT_SYMBOL(pax_check_alloca);
22145+#endif
22146diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22147index ff86f19..73eabf4 100644
22148--- a/arch/x86/kernel/dumpstack_64.c
22149+++ b/arch/x86/kernel/dumpstack_64.c
22150@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22151 const struct stacktrace_ops *ops, void *data)
22152 {
22153 const unsigned cpu = get_cpu();
22154- struct thread_info *tinfo;
22155 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22156 unsigned long dummy;
22157 unsigned used = 0;
22158 int graph = 0;
22159 int done = 0;
22160+ void *stack_start;
22161
22162 if (!task)
22163 task = current;
22164@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22165 * current stack address. If the stacks consist of nested
22166 * exceptions
22167 */
22168- tinfo = task_thread_info(task);
22169 while (!done) {
22170 unsigned long *stack_end;
22171 enum stack_type stype;
22172@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22173 if (ops->stack(data, id) < 0)
22174 break;
22175
22176- bp = ops->walk_stack(tinfo, stack, bp, ops,
22177+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22178 data, stack_end, &graph);
22179 ops->stack(data, "<EOE>");
22180 /*
22181@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22182 * second-to-last pointer (index -2 to end) in the
22183 * exception stack:
22184 */
22185+ if ((u16)stack_end[-1] != __KERNEL_DS)
22186+ goto out;
22187 stack = (unsigned long *) stack_end[-2];
22188 done = 0;
22189 break;
22190@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22191
22192 if (ops->stack(data, "IRQ") < 0)
22193 break;
22194- bp = ops->walk_stack(tinfo, stack, bp,
22195+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22196 ops, data, stack_end, &graph);
22197 /*
22198 * We link to the next stack (which would be
22199@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22200 /*
22201 * This handles the process stack:
22202 */
22203- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22204+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22205+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22206+out:
22207 put_cpu();
22208 }
22209 EXPORT_SYMBOL(dump_trace);
22210@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22211 {
22212 unsigned short ud2;
22213
22214- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22215+ if (probe_kernel_address((unsigned short *)ip, ud2))
22216 return 0;
22217
22218 return ud2 == 0x0b0f;
22219 }
22220+
22221+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22222+void pax_check_alloca(unsigned long size)
22223+{
22224+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22225+ unsigned cpu, used;
22226+ char *id;
22227+
22228+ /* check the process stack first */
22229+ stack_start = (unsigned long)task_stack_page(current);
22230+ stack_end = stack_start + THREAD_SIZE;
22231+ if (likely(stack_start <= sp && sp < stack_end)) {
22232+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22233+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22234+ return;
22235+ }
22236+
22237+ cpu = get_cpu();
22238+
22239+ /* check the irq stacks */
22240+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22241+ stack_start = stack_end - IRQ_STACK_SIZE;
22242+ if (stack_start <= sp && sp < stack_end) {
22243+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22244+ put_cpu();
22245+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22246+ return;
22247+ }
22248+
22249+ /* check the exception stacks */
22250+ used = 0;
22251+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22252+ stack_start = stack_end - EXCEPTION_STKSZ;
22253+ if (stack_end && stack_start <= sp && sp < stack_end) {
22254+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22255+ put_cpu();
22256+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22257+ return;
22258+ }
22259+
22260+ put_cpu();
22261+
22262+ /* unknown stack */
22263+ BUG();
22264+}
22265+EXPORT_SYMBOL(pax_check_alloca);
22266+#endif
22267diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22268index dd2f07a..845dc05 100644
22269--- a/arch/x86/kernel/e820.c
22270+++ b/arch/x86/kernel/e820.c
22271@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22272
22273 static void early_panic(char *msg)
22274 {
22275- early_printk(msg);
22276- panic(msg);
22277+ early_printk("%s", msg);
22278+ panic("%s", msg);
22279 }
22280
22281 static int userdef __initdata;
22282diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22283index 01d1c18..8073693 100644
22284--- a/arch/x86/kernel/early_printk.c
22285+++ b/arch/x86/kernel/early_printk.c
22286@@ -7,6 +7,7 @@
22287 #include <linux/pci_regs.h>
22288 #include <linux/pci_ids.h>
22289 #include <linux/errno.h>
22290+#include <linux/sched.h>
22291 #include <asm/io.h>
22292 #include <asm/processor.h>
22293 #include <asm/fcntl.h>
22294diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22295index 000d419..8f66802 100644
22296--- a/arch/x86/kernel/entry_32.S
22297+++ b/arch/x86/kernel/entry_32.S
22298@@ -177,13 +177,154 @@
22299 /*CFI_REL_OFFSET gs, PT_GS*/
22300 .endm
22301 .macro SET_KERNEL_GS reg
22302+
22303+#ifdef CONFIG_CC_STACKPROTECTOR
22304 movl $(__KERNEL_STACK_CANARY), \reg
22305+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22306+ movl $(__USER_DS), \reg
22307+#else
22308+ xorl \reg, \reg
22309+#endif
22310+
22311 movl \reg, %gs
22312 .endm
22313
22314 #endif /* CONFIG_X86_32_LAZY_GS */
22315
22316-.macro SAVE_ALL
22317+.macro pax_enter_kernel
22318+#ifdef CONFIG_PAX_KERNEXEC
22319+ call pax_enter_kernel
22320+#endif
22321+.endm
22322+
22323+.macro pax_exit_kernel
22324+#ifdef CONFIG_PAX_KERNEXEC
22325+ call pax_exit_kernel
22326+#endif
22327+.endm
22328+
22329+#ifdef CONFIG_PAX_KERNEXEC
22330+ENTRY(pax_enter_kernel)
22331+#ifdef CONFIG_PARAVIRT
22332+ pushl %eax
22333+ pushl %ecx
22334+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22335+ mov %eax, %esi
22336+#else
22337+ mov %cr0, %esi
22338+#endif
22339+ bts $16, %esi
22340+ jnc 1f
22341+ mov %cs, %esi
22342+ cmp $__KERNEL_CS, %esi
22343+ jz 3f
22344+ ljmp $__KERNEL_CS, $3f
22345+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22346+2:
22347+#ifdef CONFIG_PARAVIRT
22348+ mov %esi, %eax
22349+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22350+#else
22351+ mov %esi, %cr0
22352+#endif
22353+3:
22354+#ifdef CONFIG_PARAVIRT
22355+ popl %ecx
22356+ popl %eax
22357+#endif
22358+ ret
22359+ENDPROC(pax_enter_kernel)
22360+
22361+ENTRY(pax_exit_kernel)
22362+#ifdef CONFIG_PARAVIRT
22363+ pushl %eax
22364+ pushl %ecx
22365+#endif
22366+ mov %cs, %esi
22367+ cmp $__KERNEXEC_KERNEL_CS, %esi
22368+ jnz 2f
22369+#ifdef CONFIG_PARAVIRT
22370+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22371+ mov %eax, %esi
22372+#else
22373+ mov %cr0, %esi
22374+#endif
22375+ btr $16, %esi
22376+ ljmp $__KERNEL_CS, $1f
22377+1:
22378+#ifdef CONFIG_PARAVIRT
22379+ mov %esi, %eax
22380+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22381+#else
22382+ mov %esi, %cr0
22383+#endif
22384+2:
22385+#ifdef CONFIG_PARAVIRT
22386+ popl %ecx
22387+ popl %eax
22388+#endif
22389+ ret
22390+ENDPROC(pax_exit_kernel)
22391+#endif
22392+
22393+ .macro pax_erase_kstack
22394+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22395+ call pax_erase_kstack
22396+#endif
22397+ .endm
22398+
22399+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22400+/*
22401+ * ebp: thread_info
22402+ */
22403+ENTRY(pax_erase_kstack)
22404+ pushl %edi
22405+ pushl %ecx
22406+ pushl %eax
22407+
22408+ mov TI_lowest_stack(%ebp), %edi
22409+ mov $-0xBEEF, %eax
22410+ std
22411+
22412+1: mov %edi, %ecx
22413+ and $THREAD_SIZE_asm - 1, %ecx
22414+ shr $2, %ecx
22415+ repne scasl
22416+ jecxz 2f
22417+
22418+ cmp $2*16, %ecx
22419+ jc 2f
22420+
22421+ mov $2*16, %ecx
22422+ repe scasl
22423+ jecxz 2f
22424+ jne 1b
22425+
22426+2: cld
22427+ or $2*4, %edi
22428+ mov %esp, %ecx
22429+ sub %edi, %ecx
22430+
22431+ cmp $THREAD_SIZE_asm, %ecx
22432+ jb 3f
22433+ ud2
22434+3:
22435+
22436+ shr $2, %ecx
22437+ rep stosl
22438+
22439+ mov TI_task_thread_sp0(%ebp), %edi
22440+ sub $128, %edi
22441+ mov %edi, TI_lowest_stack(%ebp)
22442+
22443+ popl %eax
22444+ popl %ecx
22445+ popl %edi
22446+ ret
22447+ENDPROC(pax_erase_kstack)
22448+#endif
22449+
22450+.macro __SAVE_ALL _DS
22451 cld
22452 PUSH_GS
22453 pushl_cfi %fs
22454@@ -206,7 +347,7 @@
22455 CFI_REL_OFFSET ecx, 0
22456 pushl_cfi %ebx
22457 CFI_REL_OFFSET ebx, 0
22458- movl $(__USER_DS), %edx
22459+ movl $\_DS, %edx
22460 movl %edx, %ds
22461 movl %edx, %es
22462 movl $(__KERNEL_PERCPU), %edx
22463@@ -214,6 +355,15 @@
22464 SET_KERNEL_GS %edx
22465 .endm
22466
22467+.macro SAVE_ALL
22468+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22469+ __SAVE_ALL __KERNEL_DS
22470+ pax_enter_kernel
22471+#else
22472+ __SAVE_ALL __USER_DS
22473+#endif
22474+.endm
22475+
22476 .macro RESTORE_INT_REGS
22477 popl_cfi %ebx
22478 CFI_RESTORE ebx
22479@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22480 popfl_cfi
22481 jmp syscall_exit
22482 CFI_ENDPROC
22483-END(ret_from_fork)
22484+ENDPROC(ret_from_fork)
22485
22486 ENTRY(ret_from_kernel_thread)
22487 CFI_STARTPROC
22488@@ -340,7 +490,15 @@ ret_from_intr:
22489 andl $SEGMENT_RPL_MASK, %eax
22490 #endif
22491 cmpl $USER_RPL, %eax
22492+
22493+#ifdef CONFIG_PAX_KERNEXEC
22494+ jae resume_userspace
22495+
22496+ pax_exit_kernel
22497+ jmp resume_kernel
22498+#else
22499 jb resume_kernel # not returning to v8086 or userspace
22500+#endif
22501
22502 ENTRY(resume_userspace)
22503 LOCKDEP_SYS_EXIT
22504@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22505 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22506 # int/exception return?
22507 jne work_pending
22508- jmp restore_all
22509-END(ret_from_exception)
22510+ jmp restore_all_pax
22511+ENDPROC(ret_from_exception)
22512
22513 #ifdef CONFIG_PREEMPT
22514 ENTRY(resume_kernel)
22515@@ -365,7 +523,7 @@ need_resched:
22516 jz restore_all
22517 call preempt_schedule_irq
22518 jmp need_resched
22519-END(resume_kernel)
22520+ENDPROC(resume_kernel)
22521 #endif
22522 CFI_ENDPROC
22523
22524@@ -395,30 +553,45 @@ sysenter_past_esp:
22525 /*CFI_REL_OFFSET cs, 0*/
22526 /*
22527 * Push current_thread_info()->sysenter_return to the stack.
22528- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22529- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22530 */
22531- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22532+ pushl_cfi $0
22533 CFI_REL_OFFSET eip, 0
22534
22535 pushl_cfi %eax
22536 SAVE_ALL
22537+ GET_THREAD_INFO(%ebp)
22538+ movl TI_sysenter_return(%ebp),%ebp
22539+ movl %ebp,PT_EIP(%esp)
22540 ENABLE_INTERRUPTS(CLBR_NONE)
22541
22542 /*
22543 * Load the potential sixth argument from user stack.
22544 * Careful about security.
22545 */
22546+ movl PT_OLDESP(%esp),%ebp
22547+
22548+#ifdef CONFIG_PAX_MEMORY_UDEREF
22549+ mov PT_OLDSS(%esp),%ds
22550+1: movl %ds:(%ebp),%ebp
22551+ push %ss
22552+ pop %ds
22553+#else
22554 cmpl $__PAGE_OFFSET-3,%ebp
22555 jae syscall_fault
22556 ASM_STAC
22557 1: movl (%ebp),%ebp
22558 ASM_CLAC
22559+#endif
22560+
22561 movl %ebp,PT_EBP(%esp)
22562 _ASM_EXTABLE(1b,syscall_fault)
22563
22564 GET_THREAD_INFO(%ebp)
22565
22566+#ifdef CONFIG_PAX_RANDKSTACK
22567+ pax_erase_kstack
22568+#endif
22569+
22570 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22571 jnz sysenter_audit
22572 sysenter_do_call:
22573@@ -434,12 +607,24 @@ sysenter_after_call:
22574 testl $_TIF_ALLWORK_MASK, %ecx
22575 jne sysexit_audit
22576 sysenter_exit:
22577+
22578+#ifdef CONFIG_PAX_RANDKSTACK
22579+ pushl_cfi %eax
22580+ movl %esp, %eax
22581+ call pax_randomize_kstack
22582+ popl_cfi %eax
22583+#endif
22584+
22585+ pax_erase_kstack
22586+
22587 /* if something modifies registers it must also disable sysexit */
22588 movl PT_EIP(%esp), %edx
22589 movl PT_OLDESP(%esp), %ecx
22590 xorl %ebp,%ebp
22591 TRACE_IRQS_ON
22592 1: mov PT_FS(%esp), %fs
22593+2: mov PT_DS(%esp), %ds
22594+3: mov PT_ES(%esp), %es
22595 PTGS_TO_GS
22596 ENABLE_INTERRUPTS_SYSEXIT
22597
22598@@ -453,6 +638,9 @@ sysenter_audit:
22599 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22600 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22601 call __audit_syscall_entry
22602+
22603+ pax_erase_kstack
22604+
22605 popl_cfi %ecx /* get that remapped edx off the stack */
22606 popl_cfi %ecx /* get that remapped esi off the stack */
22607 movl PT_EAX(%esp),%eax /* reload syscall number */
22608@@ -479,10 +667,16 @@ sysexit_audit:
22609
22610 CFI_ENDPROC
22611 .pushsection .fixup,"ax"
22612-2: movl $0,PT_FS(%esp)
22613+4: movl $0,PT_FS(%esp)
22614+ jmp 1b
22615+5: movl $0,PT_DS(%esp)
22616+ jmp 1b
22617+6: movl $0,PT_ES(%esp)
22618 jmp 1b
22619 .popsection
22620- _ASM_EXTABLE(1b,2b)
22621+ _ASM_EXTABLE(1b,4b)
22622+ _ASM_EXTABLE(2b,5b)
22623+ _ASM_EXTABLE(3b,6b)
22624 PTGS_TO_GS_EX
22625 ENDPROC(ia32_sysenter_target)
22626
22627@@ -493,6 +687,11 @@ ENTRY(system_call)
22628 pushl_cfi %eax # save orig_eax
22629 SAVE_ALL
22630 GET_THREAD_INFO(%ebp)
22631+
22632+#ifdef CONFIG_PAX_RANDKSTACK
22633+ pax_erase_kstack
22634+#endif
22635+
22636 # system call tracing in operation / emulation
22637 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22638 jnz syscall_trace_entry
22639@@ -512,6 +711,15 @@ syscall_exit:
22640 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22641 jne syscall_exit_work
22642
22643+restore_all_pax:
22644+
22645+#ifdef CONFIG_PAX_RANDKSTACK
22646+ movl %esp, %eax
22647+ call pax_randomize_kstack
22648+#endif
22649+
22650+ pax_erase_kstack
22651+
22652 restore_all:
22653 TRACE_IRQS_IRET
22654 restore_all_notrace:
22655@@ -566,14 +774,34 @@ ldt_ss:
22656 * compensating for the offset by changing to the ESPFIX segment with
22657 * a base address that matches for the difference.
22658 */
22659-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22660+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22661 mov %esp, %edx /* load kernel esp */
22662 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22663 mov %dx, %ax /* eax: new kernel esp */
22664 sub %eax, %edx /* offset (low word is 0) */
22665+#ifdef CONFIG_SMP
22666+ movl PER_CPU_VAR(cpu_number), %ebx
22667+ shll $PAGE_SHIFT_asm, %ebx
22668+ addl $cpu_gdt_table, %ebx
22669+#else
22670+ movl $cpu_gdt_table, %ebx
22671+#endif
22672 shr $16, %edx
22673- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22674- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22675+
22676+#ifdef CONFIG_PAX_KERNEXEC
22677+ mov %cr0, %esi
22678+ btr $16, %esi
22679+ mov %esi, %cr0
22680+#endif
22681+
22682+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22683+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22684+
22685+#ifdef CONFIG_PAX_KERNEXEC
22686+ bts $16, %esi
22687+ mov %esi, %cr0
22688+#endif
22689+
22690 pushl_cfi $__ESPFIX_SS
22691 pushl_cfi %eax /* new kernel esp */
22692 /* Disable interrupts, but do not irqtrace this section: we
22693@@ -603,20 +831,18 @@ work_resched:
22694 movl TI_flags(%ebp), %ecx
22695 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22696 # than syscall tracing?
22697- jz restore_all
22698+ jz restore_all_pax
22699 testb $_TIF_NEED_RESCHED, %cl
22700 jnz work_resched
22701
22702 work_notifysig: # deal with pending signals and
22703 # notify-resume requests
22704+ movl %esp, %eax
22705 #ifdef CONFIG_VM86
22706 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22707- movl %esp, %eax
22708 jne work_notifysig_v86 # returning to kernel-space or
22709 # vm86-space
22710 1:
22711-#else
22712- movl %esp, %eax
22713 #endif
22714 TRACE_IRQS_ON
22715 ENABLE_INTERRUPTS(CLBR_NONE)
22716@@ -637,7 +863,7 @@ work_notifysig_v86:
22717 movl %eax, %esp
22718 jmp 1b
22719 #endif
22720-END(work_pending)
22721+ENDPROC(work_pending)
22722
22723 # perform syscall exit tracing
22724 ALIGN
22725@@ -645,11 +871,14 @@ syscall_trace_entry:
22726 movl $-ENOSYS,PT_EAX(%esp)
22727 movl %esp, %eax
22728 call syscall_trace_enter
22729+
22730+ pax_erase_kstack
22731+
22732 /* What it returned is what we'll actually use. */
22733 cmpl $(NR_syscalls), %eax
22734 jnae syscall_call
22735 jmp syscall_exit
22736-END(syscall_trace_entry)
22737+ENDPROC(syscall_trace_entry)
22738
22739 # perform syscall exit tracing
22740 ALIGN
22741@@ -662,26 +891,30 @@ syscall_exit_work:
22742 movl %esp, %eax
22743 call syscall_trace_leave
22744 jmp resume_userspace
22745-END(syscall_exit_work)
22746+ENDPROC(syscall_exit_work)
22747 CFI_ENDPROC
22748
22749 RING0_INT_FRAME # can't unwind into user space anyway
22750 syscall_fault:
22751+#ifdef CONFIG_PAX_MEMORY_UDEREF
22752+ push %ss
22753+ pop %ds
22754+#endif
22755 ASM_CLAC
22756 GET_THREAD_INFO(%ebp)
22757 movl $-EFAULT,PT_EAX(%esp)
22758 jmp resume_userspace
22759-END(syscall_fault)
22760+ENDPROC(syscall_fault)
22761
22762 syscall_badsys:
22763 movl $-ENOSYS,%eax
22764 jmp syscall_after_call
22765-END(syscall_badsys)
22766+ENDPROC(syscall_badsys)
22767
22768 sysenter_badsys:
22769 movl $-ENOSYS,%eax
22770 jmp sysenter_after_call
22771-END(sysenter_badsys)
22772+ENDPROC(sysenter_badsys)
22773 CFI_ENDPROC
22774
22775 .macro FIXUP_ESPFIX_STACK
22776@@ -694,8 +927,15 @@ END(sysenter_badsys)
22777 */
22778 #ifdef CONFIG_X86_ESPFIX32
22779 /* fixup the stack */
22780- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22781- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22782+#ifdef CONFIG_SMP
22783+ movl PER_CPU_VAR(cpu_number), %ebx
22784+ shll $PAGE_SHIFT_asm, %ebx
22785+ addl $cpu_gdt_table, %ebx
22786+#else
22787+ movl $cpu_gdt_table, %ebx
22788+#endif
22789+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22790+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22791 shl $16, %eax
22792 addl %esp, %eax /* the adjusted stack pointer */
22793 pushl_cfi $__KERNEL_DS
22794@@ -751,7 +991,7 @@ vector=vector+1
22795 .endr
22796 2: jmp common_interrupt
22797 .endr
22798-END(irq_entries_start)
22799+ENDPROC(irq_entries_start)
22800
22801 .previous
22802 END(interrupt)
22803@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22804 pushl_cfi $do_coprocessor_error
22805 jmp error_code
22806 CFI_ENDPROC
22807-END(coprocessor_error)
22808+ENDPROC(coprocessor_error)
22809
22810 ENTRY(simd_coprocessor_error)
22811 RING0_INT_FRAME
22812@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22813 .section .altinstructions,"a"
22814 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22815 .previous
22816-.section .altinstr_replacement,"ax"
22817+.section .altinstr_replacement,"a"
22818 663: pushl $do_simd_coprocessor_error
22819 664:
22820 .previous
22821@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22822 #endif
22823 jmp error_code
22824 CFI_ENDPROC
22825-END(simd_coprocessor_error)
22826+ENDPROC(simd_coprocessor_error)
22827
22828 ENTRY(device_not_available)
22829 RING0_INT_FRAME
22830@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22831 pushl_cfi $do_device_not_available
22832 jmp error_code
22833 CFI_ENDPROC
22834-END(device_not_available)
22835+ENDPROC(device_not_available)
22836
22837 #ifdef CONFIG_PARAVIRT
22838 ENTRY(native_iret)
22839 iret
22840 _ASM_EXTABLE(native_iret, iret_exc)
22841-END(native_iret)
22842+ENDPROC(native_iret)
22843
22844 ENTRY(native_irq_enable_sysexit)
22845 sti
22846 sysexit
22847-END(native_irq_enable_sysexit)
22848+ENDPROC(native_irq_enable_sysexit)
22849 #endif
22850
22851 ENTRY(overflow)
22852@@ -860,7 +1100,7 @@ ENTRY(overflow)
22853 pushl_cfi $do_overflow
22854 jmp error_code
22855 CFI_ENDPROC
22856-END(overflow)
22857+ENDPROC(overflow)
22858
22859 ENTRY(bounds)
22860 RING0_INT_FRAME
22861@@ -869,7 +1109,7 @@ ENTRY(bounds)
22862 pushl_cfi $do_bounds
22863 jmp error_code
22864 CFI_ENDPROC
22865-END(bounds)
22866+ENDPROC(bounds)
22867
22868 ENTRY(invalid_op)
22869 RING0_INT_FRAME
22870@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22871 pushl_cfi $do_invalid_op
22872 jmp error_code
22873 CFI_ENDPROC
22874-END(invalid_op)
22875+ENDPROC(invalid_op)
22876
22877 ENTRY(coprocessor_segment_overrun)
22878 RING0_INT_FRAME
22879@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22880 pushl_cfi $do_coprocessor_segment_overrun
22881 jmp error_code
22882 CFI_ENDPROC
22883-END(coprocessor_segment_overrun)
22884+ENDPROC(coprocessor_segment_overrun)
22885
22886 ENTRY(invalid_TSS)
22887 RING0_EC_FRAME
22888@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22889 pushl_cfi $do_invalid_TSS
22890 jmp error_code
22891 CFI_ENDPROC
22892-END(invalid_TSS)
22893+ENDPROC(invalid_TSS)
22894
22895 ENTRY(segment_not_present)
22896 RING0_EC_FRAME
22897@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22898 pushl_cfi $do_segment_not_present
22899 jmp error_code
22900 CFI_ENDPROC
22901-END(segment_not_present)
22902+ENDPROC(segment_not_present)
22903
22904 ENTRY(stack_segment)
22905 RING0_EC_FRAME
22906@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22907 pushl_cfi $do_stack_segment
22908 jmp error_code
22909 CFI_ENDPROC
22910-END(stack_segment)
22911+ENDPROC(stack_segment)
22912
22913 ENTRY(alignment_check)
22914 RING0_EC_FRAME
22915@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22916 pushl_cfi $do_alignment_check
22917 jmp error_code
22918 CFI_ENDPROC
22919-END(alignment_check)
22920+ENDPROC(alignment_check)
22921
22922 ENTRY(divide_error)
22923 RING0_INT_FRAME
22924@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22925 pushl_cfi $do_divide_error
22926 jmp error_code
22927 CFI_ENDPROC
22928-END(divide_error)
22929+ENDPROC(divide_error)
22930
22931 #ifdef CONFIG_X86_MCE
22932 ENTRY(machine_check)
22933@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22934 pushl_cfi machine_check_vector
22935 jmp error_code
22936 CFI_ENDPROC
22937-END(machine_check)
22938+ENDPROC(machine_check)
22939 #endif
22940
22941 ENTRY(spurious_interrupt_bug)
22942@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22943 pushl_cfi $do_spurious_interrupt_bug
22944 jmp error_code
22945 CFI_ENDPROC
22946-END(spurious_interrupt_bug)
22947+ENDPROC(spurious_interrupt_bug)
22948
22949 #ifdef CONFIG_XEN
22950 /* Xen doesn't set %esp to be precisely what the normal sysenter
22951@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22952
22953 ENTRY(mcount)
22954 ret
22955-END(mcount)
22956+ENDPROC(mcount)
22957
22958 ENTRY(ftrace_caller)
22959 pushl %eax
22960@@ -1084,7 +1324,7 @@ ftrace_graph_call:
22961 .globl ftrace_stub
22962 ftrace_stub:
22963 ret
22964-END(ftrace_caller)
22965+ENDPROC(ftrace_caller)
22966
22967 ENTRY(ftrace_regs_caller)
22968 pushf /* push flags before compare (in cs location) */
22969@@ -1182,7 +1422,7 @@ trace:
22970 popl %ecx
22971 popl %eax
22972 jmp ftrace_stub
22973-END(mcount)
22974+ENDPROC(mcount)
22975 #endif /* CONFIG_DYNAMIC_FTRACE */
22976 #endif /* CONFIG_FUNCTION_TRACER */
22977
22978@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
22979 popl %ecx
22980 popl %eax
22981 ret
22982-END(ftrace_graph_caller)
22983+ENDPROC(ftrace_graph_caller)
22984
22985 .globl return_to_handler
22986 return_to_handler:
22987@@ -1261,15 +1501,18 @@ error_code:
22988 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22989 REG_TO_PTGS %ecx
22990 SET_KERNEL_GS %ecx
22991- movl $(__USER_DS), %ecx
22992+ movl $(__KERNEL_DS), %ecx
22993 movl %ecx, %ds
22994 movl %ecx, %es
22995+
22996+ pax_enter_kernel
22997+
22998 TRACE_IRQS_OFF
22999 movl %esp,%eax # pt_regs pointer
23000 call *%edi
23001 jmp ret_from_exception
23002 CFI_ENDPROC
23003-END(page_fault)
23004+ENDPROC(page_fault)
23005
23006 /*
23007 * Debug traps and NMI can happen at the one SYSENTER instruction
23008@@ -1312,7 +1555,7 @@ debug_stack_correct:
23009 call do_debug
23010 jmp ret_from_exception
23011 CFI_ENDPROC
23012-END(debug)
23013+ENDPROC(debug)
23014
23015 /*
23016 * NMI is doubly nasty. It can happen _while_ we're handling
23017@@ -1352,6 +1595,9 @@ nmi_stack_correct:
23018 xorl %edx,%edx # zero error code
23019 movl %esp,%eax # pt_regs pointer
23020 call do_nmi
23021+
23022+ pax_exit_kernel
23023+
23024 jmp restore_all_notrace
23025 CFI_ENDPROC
23026
23027@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
23028 FIXUP_ESPFIX_STACK # %eax == %esp
23029 xorl %edx,%edx # zero error code
23030 call do_nmi
23031+
23032+ pax_exit_kernel
23033+
23034 RESTORE_REGS
23035 lss 12+4(%esp), %esp # back to espfix stack
23036 CFI_ADJUST_CFA_OFFSET -24
23037 jmp irq_return
23038 #endif
23039 CFI_ENDPROC
23040-END(nmi)
23041+ENDPROC(nmi)
23042
23043 ENTRY(int3)
23044 RING0_INT_FRAME
23045@@ -1408,14 +1657,14 @@ ENTRY(int3)
23046 call do_int3
23047 jmp ret_from_exception
23048 CFI_ENDPROC
23049-END(int3)
23050+ENDPROC(int3)
23051
23052 ENTRY(general_protection)
23053 RING0_EC_FRAME
23054 pushl_cfi $do_general_protection
23055 jmp error_code
23056 CFI_ENDPROC
23057-END(general_protection)
23058+ENDPROC(general_protection)
23059
23060 #ifdef CONFIG_KVM_GUEST
23061 ENTRY(async_page_fault)
23062@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
23063 pushl_cfi $do_async_page_fault
23064 jmp error_code
23065 CFI_ENDPROC
23066-END(async_page_fault)
23067+ENDPROC(async_page_fault)
23068 #endif
23069
23070diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23071index 9ebaf63..c786610 100644
23072--- a/arch/x86/kernel/entry_64.S
23073+++ b/arch/x86/kernel/entry_64.S
23074@@ -59,6 +59,8 @@
23075 #include <asm/smap.h>
23076 #include <asm/pgtable_types.h>
23077 #include <linux/err.h>
23078+#include <asm/pgtable.h>
23079+#include <asm/alternative-asm.h>
23080
23081 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23082 #include <linux/elf-em.h>
23083@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23084 ENDPROC(native_usergs_sysret64)
23085 #endif /* CONFIG_PARAVIRT */
23086
23087+ .macro ljmpq sel, off
23088+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23089+ .byte 0x48; ljmp *1234f(%rip)
23090+ .pushsection .rodata
23091+ .align 16
23092+ 1234: .quad \off; .word \sel
23093+ .popsection
23094+#else
23095+ pushq $\sel
23096+ pushq $\off
23097+ lretq
23098+#endif
23099+ .endm
23100+
23101+ .macro pax_enter_kernel
23102+ pax_set_fptr_mask
23103+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23104+ call pax_enter_kernel
23105+#endif
23106+ .endm
23107+
23108+ .macro pax_exit_kernel
23109+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23110+ call pax_exit_kernel
23111+#endif
23112+
23113+ .endm
23114+
23115+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23116+ENTRY(pax_enter_kernel)
23117+ pushq %rdi
23118+
23119+#ifdef CONFIG_PARAVIRT
23120+ PV_SAVE_REGS(CLBR_RDI)
23121+#endif
23122+
23123+#ifdef CONFIG_PAX_KERNEXEC
23124+ GET_CR0_INTO_RDI
23125+ bts $16,%rdi
23126+ jnc 3f
23127+ mov %cs,%edi
23128+ cmp $__KERNEL_CS,%edi
23129+ jnz 2f
23130+1:
23131+#endif
23132+
23133+#ifdef CONFIG_PAX_MEMORY_UDEREF
23134+ 661: jmp 111f
23135+ .pushsection .altinstr_replacement, "a"
23136+ 662: ASM_NOP2
23137+ .popsection
23138+ .pushsection .altinstructions, "a"
23139+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23140+ .popsection
23141+ GET_CR3_INTO_RDI
23142+ cmp $0,%dil
23143+ jnz 112f
23144+ mov $__KERNEL_DS,%edi
23145+ mov %edi,%ss
23146+ jmp 111f
23147+112: cmp $1,%dil
23148+ jz 113f
23149+ ud2
23150+113: sub $4097,%rdi
23151+ bts $63,%rdi
23152+ SET_RDI_INTO_CR3
23153+ mov $__UDEREF_KERNEL_DS,%edi
23154+ mov %edi,%ss
23155+111:
23156+#endif
23157+
23158+#ifdef CONFIG_PARAVIRT
23159+ PV_RESTORE_REGS(CLBR_RDI)
23160+#endif
23161+
23162+ popq %rdi
23163+ pax_force_retaddr
23164+ retq
23165+
23166+#ifdef CONFIG_PAX_KERNEXEC
23167+2: ljmpq __KERNEL_CS,1b
23168+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23169+4: SET_RDI_INTO_CR0
23170+ jmp 1b
23171+#endif
23172+ENDPROC(pax_enter_kernel)
23173+
23174+ENTRY(pax_exit_kernel)
23175+ pushq %rdi
23176+
23177+#ifdef CONFIG_PARAVIRT
23178+ PV_SAVE_REGS(CLBR_RDI)
23179+#endif
23180+
23181+#ifdef CONFIG_PAX_KERNEXEC
23182+ mov %cs,%rdi
23183+ cmp $__KERNEXEC_KERNEL_CS,%edi
23184+ jz 2f
23185+ GET_CR0_INTO_RDI
23186+ bts $16,%rdi
23187+ jnc 4f
23188+1:
23189+#endif
23190+
23191+#ifdef CONFIG_PAX_MEMORY_UDEREF
23192+ 661: jmp 111f
23193+ .pushsection .altinstr_replacement, "a"
23194+ 662: ASM_NOP2
23195+ .popsection
23196+ .pushsection .altinstructions, "a"
23197+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23198+ .popsection
23199+ mov %ss,%edi
23200+ cmp $__UDEREF_KERNEL_DS,%edi
23201+ jnz 111f
23202+ GET_CR3_INTO_RDI
23203+ cmp $0,%dil
23204+ jz 112f
23205+ ud2
23206+112: add $4097,%rdi
23207+ bts $63,%rdi
23208+ SET_RDI_INTO_CR3
23209+ mov $__KERNEL_DS,%edi
23210+ mov %edi,%ss
23211+111:
23212+#endif
23213+
23214+#ifdef CONFIG_PARAVIRT
23215+ PV_RESTORE_REGS(CLBR_RDI);
23216+#endif
23217+
23218+ popq %rdi
23219+ pax_force_retaddr
23220+ retq
23221+
23222+#ifdef CONFIG_PAX_KERNEXEC
23223+2: GET_CR0_INTO_RDI
23224+ btr $16,%rdi
23225+ jnc 4f
23226+ ljmpq __KERNEL_CS,3f
23227+3: SET_RDI_INTO_CR0
23228+ jmp 1b
23229+4: ud2
23230+ jmp 4b
23231+#endif
23232+ENDPROC(pax_exit_kernel)
23233+#endif
23234+
23235+ .macro pax_enter_kernel_user
23236+ pax_set_fptr_mask
23237+#ifdef CONFIG_PAX_MEMORY_UDEREF
23238+ call pax_enter_kernel_user
23239+#endif
23240+ .endm
23241+
23242+ .macro pax_exit_kernel_user
23243+#ifdef CONFIG_PAX_MEMORY_UDEREF
23244+ call pax_exit_kernel_user
23245+#endif
23246+#ifdef CONFIG_PAX_RANDKSTACK
23247+ pushq %rax
23248+ pushq %r11
23249+ call pax_randomize_kstack
23250+ popq %r11
23251+ popq %rax
23252+#endif
23253+ .endm
23254+
23255+#ifdef CONFIG_PAX_MEMORY_UDEREF
23256+ENTRY(pax_enter_kernel_user)
23257+ pushq %rdi
23258+ pushq %rbx
23259+
23260+#ifdef CONFIG_PARAVIRT
23261+ PV_SAVE_REGS(CLBR_RDI)
23262+#endif
23263+
23264+ 661: jmp 111f
23265+ .pushsection .altinstr_replacement, "a"
23266+ 662: ASM_NOP2
23267+ .popsection
23268+ .pushsection .altinstructions, "a"
23269+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23270+ .popsection
23271+ GET_CR3_INTO_RDI
23272+ cmp $1,%dil
23273+ jnz 4f
23274+ sub $4097,%rdi
23275+ bts $63,%rdi
23276+ SET_RDI_INTO_CR3
23277+ jmp 3f
23278+111:
23279+
23280+ GET_CR3_INTO_RDI
23281+ mov %rdi,%rbx
23282+ add $__START_KERNEL_map,%rbx
23283+ sub phys_base(%rip),%rbx
23284+
23285+#ifdef CONFIG_PARAVIRT
23286+ cmpl $0, pv_info+PARAVIRT_enabled
23287+ jz 1f
23288+ pushq %rdi
23289+ i = 0
23290+ .rept USER_PGD_PTRS
23291+ mov i*8(%rbx),%rsi
23292+ mov $0,%sil
23293+ lea i*8(%rbx),%rdi
23294+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23295+ i = i + 1
23296+ .endr
23297+ popq %rdi
23298+ jmp 2f
23299+1:
23300+#endif
23301+
23302+ i = 0
23303+ .rept USER_PGD_PTRS
23304+ movb $0,i*8(%rbx)
23305+ i = i + 1
23306+ .endr
23307+
23308+2: SET_RDI_INTO_CR3
23309+
23310+#ifdef CONFIG_PAX_KERNEXEC
23311+ GET_CR0_INTO_RDI
23312+ bts $16,%rdi
23313+ SET_RDI_INTO_CR0
23314+#endif
23315+
23316+3:
23317+
23318+#ifdef CONFIG_PARAVIRT
23319+ PV_RESTORE_REGS(CLBR_RDI)
23320+#endif
23321+
23322+ popq %rbx
23323+ popq %rdi
23324+ pax_force_retaddr
23325+ retq
23326+4: ud2
23327+ENDPROC(pax_enter_kernel_user)
23328+
23329+ENTRY(pax_exit_kernel_user)
23330+ pushq %rdi
23331+ pushq %rbx
23332+
23333+#ifdef CONFIG_PARAVIRT
23334+ PV_SAVE_REGS(CLBR_RDI)
23335+#endif
23336+
23337+ GET_CR3_INTO_RDI
23338+ 661: jmp 1f
23339+ .pushsection .altinstr_replacement, "a"
23340+ 662: ASM_NOP2
23341+ .popsection
23342+ .pushsection .altinstructions, "a"
23343+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23344+ .popsection
23345+ cmp $0,%dil
23346+ jnz 3f
23347+ add $4097,%rdi
23348+ bts $63,%rdi
23349+ SET_RDI_INTO_CR3
23350+ jmp 2f
23351+1:
23352+
23353+ mov %rdi,%rbx
23354+
23355+#ifdef CONFIG_PAX_KERNEXEC
23356+ GET_CR0_INTO_RDI
23357+ btr $16,%rdi
23358+ jnc 3f
23359+ SET_RDI_INTO_CR0
23360+#endif
23361+
23362+ add $__START_KERNEL_map,%rbx
23363+ sub phys_base(%rip),%rbx
23364+
23365+#ifdef CONFIG_PARAVIRT
23366+ cmpl $0, pv_info+PARAVIRT_enabled
23367+ jz 1f
23368+ i = 0
23369+ .rept USER_PGD_PTRS
23370+ mov i*8(%rbx),%rsi
23371+ mov $0x67,%sil
23372+ lea i*8(%rbx),%rdi
23373+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23374+ i = i + 1
23375+ .endr
23376+ jmp 2f
23377+1:
23378+#endif
23379+
23380+ i = 0
23381+ .rept USER_PGD_PTRS
23382+ movb $0x67,i*8(%rbx)
23383+ i = i + 1
23384+ .endr
23385+2:
23386+
23387+#ifdef CONFIG_PARAVIRT
23388+ PV_RESTORE_REGS(CLBR_RDI)
23389+#endif
23390+
23391+ popq %rbx
23392+ popq %rdi
23393+ pax_force_retaddr
23394+ retq
23395+3: ud2
23396+ENDPROC(pax_exit_kernel_user)
23397+#endif
23398+
23399+ .macro pax_enter_kernel_nmi
23400+ pax_set_fptr_mask
23401+
23402+#ifdef CONFIG_PAX_KERNEXEC
23403+ GET_CR0_INTO_RDI
23404+ bts $16,%rdi
23405+ jc 110f
23406+ SET_RDI_INTO_CR0
23407+ or $2,%ebx
23408+110:
23409+#endif
23410+
23411+#ifdef CONFIG_PAX_MEMORY_UDEREF
23412+ 661: jmp 111f
23413+ .pushsection .altinstr_replacement, "a"
23414+ 662: ASM_NOP2
23415+ .popsection
23416+ .pushsection .altinstructions, "a"
23417+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23418+ .popsection
23419+ GET_CR3_INTO_RDI
23420+ cmp $0,%dil
23421+ jz 111f
23422+ sub $4097,%rdi
23423+ or $4,%ebx
23424+ bts $63,%rdi
23425+ SET_RDI_INTO_CR3
23426+ mov $__UDEREF_KERNEL_DS,%edi
23427+ mov %edi,%ss
23428+111:
23429+#endif
23430+ .endm
23431+
23432+ .macro pax_exit_kernel_nmi
23433+#ifdef CONFIG_PAX_KERNEXEC
23434+ btr $1,%ebx
23435+ jnc 110f
23436+ GET_CR0_INTO_RDI
23437+ btr $16,%rdi
23438+ SET_RDI_INTO_CR0
23439+110:
23440+#endif
23441+
23442+#ifdef CONFIG_PAX_MEMORY_UDEREF
23443+ btr $2,%ebx
23444+ jnc 111f
23445+ GET_CR3_INTO_RDI
23446+ add $4097,%rdi
23447+ bts $63,%rdi
23448+ SET_RDI_INTO_CR3
23449+ mov $__KERNEL_DS,%edi
23450+ mov %edi,%ss
23451+111:
23452+#endif
23453+ .endm
23454+
23455+ .macro pax_erase_kstack
23456+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23457+ call pax_erase_kstack
23458+#endif
23459+ .endm
23460+
23461+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23462+ENTRY(pax_erase_kstack)
23463+ pushq %rdi
23464+ pushq %rcx
23465+ pushq %rax
23466+ pushq %r11
23467+
23468+ GET_THREAD_INFO(%r11)
23469+ mov TI_lowest_stack(%r11), %rdi
23470+ mov $-0xBEEF, %rax
23471+ std
23472+
23473+1: mov %edi, %ecx
23474+ and $THREAD_SIZE_asm - 1, %ecx
23475+ shr $3, %ecx
23476+ repne scasq
23477+ jecxz 2f
23478+
23479+ cmp $2*8, %ecx
23480+ jc 2f
23481+
23482+ mov $2*8, %ecx
23483+ repe scasq
23484+ jecxz 2f
23485+ jne 1b
23486+
23487+2: cld
23488+ or $2*8, %rdi
23489+ mov %esp, %ecx
23490+ sub %edi, %ecx
23491+
23492+ cmp $THREAD_SIZE_asm, %rcx
23493+ jb 3f
23494+ ud2
23495+3:
23496+
23497+ shr $3, %ecx
23498+ rep stosq
23499+
23500+ mov TI_task_thread_sp0(%r11), %rdi
23501+ sub $256, %rdi
23502+ mov %rdi, TI_lowest_stack(%r11)
23503+
23504+ popq %r11
23505+ popq %rax
23506+ popq %rcx
23507+ popq %rdi
23508+ pax_force_retaddr
23509+ ret
23510+ENDPROC(pax_erase_kstack)
23511+#endif
23512
23513 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23514 #ifdef CONFIG_TRACE_IRQFLAGS
23515@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23516 .endm
23517
23518 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23519- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23520+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23521 jnc 1f
23522 TRACE_IRQS_ON_DEBUG
23523 1:
23524@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
23525 movq \tmp,R11+\offset(%rsp)
23526 .endm
23527
23528- .macro FAKE_STACK_FRAME child_rip
23529- /* push in order ss, rsp, eflags, cs, rip */
23530- xorl %eax, %eax
23531- pushq_cfi $__KERNEL_DS /* ss */
23532- /*CFI_REL_OFFSET ss,0*/
23533- pushq_cfi %rax /* rsp */
23534- CFI_REL_OFFSET rsp,0
23535- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23536- /*CFI_REL_OFFSET rflags,0*/
23537- pushq_cfi $__KERNEL_CS /* cs */
23538- /*CFI_REL_OFFSET cs,0*/
23539- pushq_cfi \child_rip /* rip */
23540- CFI_REL_OFFSET rip,0
23541- pushq_cfi %rax /* orig rax */
23542- .endm
23543-
23544- .macro UNFAKE_STACK_FRAME
23545- addq $8*6, %rsp
23546- CFI_ADJUST_CFA_OFFSET -(6*8)
23547- .endm
23548-
23549 /*
23550 * initial frame state for interrupts (and exceptions without error code)
23551 */
23552@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23553 /* save partial stack frame */
23554 .macro SAVE_ARGS_IRQ
23555 cld
23556- /* start from rbp in pt_regs and jump over */
23557- movq_cfi rdi, (RDI-RBP)
23558- movq_cfi rsi, (RSI-RBP)
23559- movq_cfi rdx, (RDX-RBP)
23560- movq_cfi rcx, (RCX-RBP)
23561- movq_cfi rax, (RAX-RBP)
23562- movq_cfi r8, (R8-RBP)
23563- movq_cfi r9, (R9-RBP)
23564- movq_cfi r10, (R10-RBP)
23565- movq_cfi r11, (R11-RBP)
23566+ /* start from r15 in pt_regs and jump over */
23567+ movq_cfi rdi, RDI
23568+ movq_cfi rsi, RSI
23569+ movq_cfi rdx, RDX
23570+ movq_cfi rcx, RCX
23571+ movq_cfi rax, RAX
23572+ movq_cfi r8, R8
23573+ movq_cfi r9, R9
23574+ movq_cfi r10, R10
23575+ movq_cfi r11, R11
23576+ movq_cfi r12, R12
23577
23578 /* Save rbp so that we can unwind from get_irq_regs() */
23579- movq_cfi rbp, 0
23580+ movq_cfi rbp, RBP
23581
23582 /* Save previous stack value */
23583 movq %rsp, %rsi
23584
23585- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23586- testl $3, CS-RBP(%rsi)
23587+ movq %rsp,%rdi /* arg1 for handler */
23588+ testb $3, CS(%rsi)
23589 je 1f
23590 SWAPGS
23591 /*
23592@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23593 0x06 /* DW_OP_deref */, \
23594 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23595 0x22 /* DW_OP_plus */
23596+
23597+#ifdef CONFIG_PAX_MEMORY_UDEREF
23598+ testb $3, CS(%rdi)
23599+ jnz 1f
23600+ pax_enter_kernel
23601+ jmp 2f
23602+1: pax_enter_kernel_user
23603+2:
23604+#else
23605+ pax_enter_kernel
23606+#endif
23607+
23608 /* We entered an interrupt context - irqs are off: */
23609 TRACE_IRQS_OFF
23610 .endm
23611@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
23612 js 1f /* negative -> in kernel */
23613 SWAPGS
23614 xorl %ebx,%ebx
23615-1: ret
23616+1:
23617+#ifdef CONFIG_PAX_MEMORY_UDEREF
23618+ testb $3, CS+8(%rsp)
23619+ jnz 1f
23620+ pax_enter_kernel
23621+ jmp 2f
23622+1: pax_enter_kernel_user
23623+2:
23624+#else
23625+ pax_enter_kernel
23626+#endif
23627+ pax_force_retaddr
23628+ ret
23629 CFI_ENDPROC
23630-END(save_paranoid)
23631+ENDPROC(save_paranoid)
23632+
23633+ENTRY(save_paranoid_nmi)
23634+ XCPT_FRAME 1 RDI+8
23635+ cld
23636+ movq_cfi rdi, RDI+8
23637+ movq_cfi rsi, RSI+8
23638+ movq_cfi rdx, RDX+8
23639+ movq_cfi rcx, RCX+8
23640+ movq_cfi rax, RAX+8
23641+ movq_cfi r8, R8+8
23642+ movq_cfi r9, R9+8
23643+ movq_cfi r10, R10+8
23644+ movq_cfi r11, R11+8
23645+ movq_cfi rbx, RBX+8
23646+ movq_cfi rbp, RBP+8
23647+ movq_cfi r12, R12+8
23648+ movq_cfi r13, R13+8
23649+ movq_cfi r14, R14+8
23650+ movq_cfi r15, R15+8
23651+ movl $1,%ebx
23652+ movl $MSR_GS_BASE,%ecx
23653+ rdmsr
23654+ testl %edx,%edx
23655+ js 1f /* negative -> in kernel */
23656+ SWAPGS
23657+ xorl %ebx,%ebx
23658+1: pax_enter_kernel_nmi
23659+ pax_force_retaddr
23660+ ret
23661+ CFI_ENDPROC
23662+ENDPROC(save_paranoid_nmi)
23663
23664 /*
23665 * A newly forked process directly context switches into this address.
23666@@ -331,25 +793,26 @@ ENTRY(ret_from_fork)
23667
23668 RESTORE_REST
23669
23670- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23671+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23672 jz 1f
23673
23674- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23675- jnz int_ret_from_sys_call
23676-
23677- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
23678- jmp ret_from_sys_call # go to the SYSRET fastpath
23679+ /*
23680+ * By the time we get here, we have no idea whether our pt_regs,
23681+ * ti flags, and ti status came from the 64-bit SYSCALL fast path,
23682+ * the slow path, or one of the ia32entry paths.
23683+ * Use int_ret_from_sys_call to return, since it can safely handle
23684+ * all of the above.
23685+ */
23686+ jmp int_ret_from_sys_call
23687
23688 1:
23689- subq $REST_SKIP, %rsp # leave space for volatiles
23690- CFI_ADJUST_CFA_OFFSET REST_SKIP
23691 movq %rbp, %rdi
23692 call *%rbx
23693 movl $0, RAX(%rsp)
23694 RESTORE_REST
23695 jmp int_ret_from_sys_call
23696 CFI_ENDPROC
23697-END(ret_from_fork)
23698+ENDPROC(ret_from_fork)
23699
23700 /*
23701 * System call entry. Up to 6 arguments in registers are supported.
23702@@ -386,7 +849,7 @@ END(ret_from_fork)
23703 ENTRY(system_call)
23704 CFI_STARTPROC simple
23705 CFI_SIGNAL_FRAME
23706- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23707+ CFI_DEF_CFA rsp,0
23708 CFI_REGISTER rip,rcx
23709 /*CFI_REGISTER rflags,r11*/
23710 SWAPGS_UNSAFE_STACK
23711@@ -399,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
23712
23713 movq %rsp,PER_CPU_VAR(old_rsp)
23714 movq PER_CPU_VAR(kernel_stack),%rsp
23715+ SAVE_ARGS 8*6, 0, rax_enosys=1
23716+ pax_enter_kernel_user
23717+
23718+#ifdef CONFIG_PAX_RANDKSTACK
23719+ pax_erase_kstack
23720+#endif
23721+
23722 /*
23723 * No need to follow this irqs off/on section - it's straight
23724 * and short:
23725 */
23726 ENABLE_INTERRUPTS(CLBR_NONE)
23727- SAVE_ARGS 8, 0, rax_enosys=1
23728 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23729 movq %rcx,RIP-ARGOFFSET(%rsp)
23730 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23731- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23732+ GET_THREAD_INFO(%rcx)
23733+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23734 jnz tracesys
23735 system_call_fastpath:
23736 #if __SYSCALL_MASK == ~0
23737@@ -432,10 +902,13 @@ sysret_check:
23738 LOCKDEP_SYS_EXIT
23739 DISABLE_INTERRUPTS(CLBR_NONE)
23740 TRACE_IRQS_OFF
23741- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23742+ GET_THREAD_INFO(%rcx)
23743+ movl TI_flags(%rcx),%edx
23744 andl %edi,%edx
23745 jnz sysret_careful
23746 CFI_REMEMBER_STATE
23747+ pax_exit_kernel_user
23748+ pax_erase_kstack
23749 /*
23750 * sysretq will re-enable interrupts:
23751 */
23752@@ -494,12 +967,15 @@ sysret_audit:
23753
23754 /* Do syscall tracing */
23755 tracesys:
23756- leaq -REST_SKIP(%rsp), %rdi
23757+ movq %rsp, %rdi
23758 movq $AUDIT_ARCH_X86_64, %rsi
23759 call syscall_trace_enter_phase1
23760 test %rax, %rax
23761 jnz tracesys_phase2 /* if needed, run the slow path */
23762- LOAD_ARGS 0 /* else restore clobbered regs */
23763+
23764+ pax_erase_kstack
23765+
23766+ LOAD_ARGS /* else restore clobbered regs */
23767 jmp system_call_fastpath /* and return to the fast path */
23768
23769 tracesys_phase2:
23770@@ -510,12 +986,14 @@ tracesys_phase2:
23771 movq %rax,%rdx
23772 call syscall_trace_enter_phase2
23773
23774+ pax_erase_kstack
23775+
23776 /*
23777 * Reload arg registers from stack in case ptrace changed them.
23778 * We don't reload %rax because syscall_trace_entry_phase2() returned
23779 * the value it wants us to use in the table lookup.
23780 */
23781- LOAD_ARGS ARGOFFSET, 1
23782+ LOAD_ARGS 1
23783 RESTORE_REST
23784 #if __SYSCALL_MASK == ~0
23785 cmpq $__NR_syscall_max,%rax
23786@@ -545,7 +1023,9 @@ GLOBAL(int_with_check)
23787 andl %edi,%edx
23788 jnz int_careful
23789 andl $~TS_COMPAT,TI_status(%rcx)
23790- jmp retint_swapgs
23791+ pax_exit_kernel_user
23792+ pax_erase_kstack
23793+ jmp retint_swapgs_pax
23794
23795 /* Either reschedule or signal or syscall exit tracking needed. */
23796 /* First do a reschedule test. */
23797@@ -591,7 +1071,7 @@ int_restore_rest:
23798 TRACE_IRQS_OFF
23799 jmp int_with_check
23800 CFI_ENDPROC
23801-END(system_call)
23802+ENDPROC(system_call)
23803
23804 .macro FORK_LIKE func
23805 ENTRY(stub_\func)
23806@@ -604,9 +1084,10 @@ ENTRY(stub_\func)
23807 DEFAULT_FRAME 0 8 /* offset 8: return address */
23808 call sys_\func
23809 RESTORE_TOP_OF_STACK %r11, 8
23810- ret $REST_SKIP /* pop extended registers */
23811+ pax_force_retaddr
23812+ ret
23813 CFI_ENDPROC
23814-END(stub_\func)
23815+ENDPROC(stub_\func)
23816 .endm
23817
23818 .macro FIXED_FRAME label,func
23819@@ -616,9 +1097,10 @@ ENTRY(\label)
23820 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23821 call \func
23822 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23823+ pax_force_retaddr
23824 ret
23825 CFI_ENDPROC
23826-END(\label)
23827+ENDPROC(\label)
23828 .endm
23829
23830 FORK_LIKE clone
23831@@ -626,19 +1108,6 @@ END(\label)
23832 FORK_LIKE vfork
23833 FIXED_FRAME stub_iopl, sys_iopl
23834
23835-ENTRY(ptregscall_common)
23836- DEFAULT_FRAME 1 8 /* offset 8: return address */
23837- RESTORE_TOP_OF_STACK %r11, 8
23838- movq_cfi_restore R15+8, r15
23839- movq_cfi_restore R14+8, r14
23840- movq_cfi_restore R13+8, r13
23841- movq_cfi_restore R12+8, r12
23842- movq_cfi_restore RBP+8, rbp
23843- movq_cfi_restore RBX+8, rbx
23844- ret $REST_SKIP /* pop extended registers */
23845- CFI_ENDPROC
23846-END(ptregscall_common)
23847-
23848 ENTRY(stub_execve)
23849 CFI_STARTPROC
23850 addq $8, %rsp
23851@@ -650,7 +1119,7 @@ ENTRY(stub_execve)
23852 RESTORE_REST
23853 jmp int_ret_from_sys_call
23854 CFI_ENDPROC
23855-END(stub_execve)
23856+ENDPROC(stub_execve)
23857
23858 ENTRY(stub_execveat)
23859 CFI_STARTPROC
23860@@ -664,7 +1133,7 @@ ENTRY(stub_execveat)
23861 RESTORE_REST
23862 jmp int_ret_from_sys_call
23863 CFI_ENDPROC
23864-END(stub_execveat)
23865+ENDPROC(stub_execveat)
23866
23867 /*
23868 * sigreturn is special because it needs to restore all registers on return.
23869@@ -681,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
23870 RESTORE_REST
23871 jmp int_ret_from_sys_call
23872 CFI_ENDPROC
23873-END(stub_rt_sigreturn)
23874+ENDPROC(stub_rt_sigreturn)
23875
23876 #ifdef CONFIG_X86_X32_ABI
23877 ENTRY(stub_x32_rt_sigreturn)
23878@@ -695,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
23879 RESTORE_REST
23880 jmp int_ret_from_sys_call
23881 CFI_ENDPROC
23882-END(stub_x32_rt_sigreturn)
23883+ENDPROC(stub_x32_rt_sigreturn)
23884
23885 ENTRY(stub_x32_execve)
23886 CFI_STARTPROC
23887@@ -760,7 +1229,7 @@ vector=vector+1
23888 2: jmp common_interrupt
23889 .endr
23890 CFI_ENDPROC
23891-END(irq_entries_start)
23892+ENDPROC(irq_entries_start)
23893
23894 .previous
23895 END(interrupt)
23896@@ -777,8 +1246,8 @@ END(interrupt)
23897 /* 0(%rsp): ~(interrupt number) */
23898 .macro interrupt func
23899 /* reserve pt_regs for scratch regs and rbp */
23900- subq $ORIG_RAX-RBP, %rsp
23901- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23902+ subq $ORIG_RAX, %rsp
23903+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23904 SAVE_ARGS_IRQ
23905 call \func
23906 .endm
23907@@ -801,14 +1270,14 @@ ret_from_intr:
23908
23909 /* Restore saved previous stack */
23910 popq %rsi
23911- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23912- leaq ARGOFFSET-RBP(%rsi), %rsp
23913+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23914+ movq %rsi, %rsp
23915 CFI_DEF_CFA_REGISTER rsp
23916- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23917+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23918
23919 exit_intr:
23920 GET_THREAD_INFO(%rcx)
23921- testl $3,CS-ARGOFFSET(%rsp)
23922+ testb $3,CS-ARGOFFSET(%rsp)
23923 je retint_kernel
23924
23925 /* Interrupt came from user space */
23926@@ -830,12 +1299,35 @@ retint_swapgs: /* return to user-space */
23927 * The iretq could re-enable interrupts:
23928 */
23929 DISABLE_INTERRUPTS(CLBR_ANY)
23930+ pax_exit_kernel_user
23931+retint_swapgs_pax:
23932 TRACE_IRQS_IRETQ
23933 SWAPGS
23934 jmp restore_args
23935
23936 retint_restore_args: /* return to kernel space */
23937 DISABLE_INTERRUPTS(CLBR_ANY)
23938+ pax_exit_kernel
23939+
23940+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23941+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23942+ * namely calling EFI runtime services with a phys mapping. We're
23943+ * starting off with NOPs and patch in the real instrumentation
23944+ * (BTS/OR) before starting any userland process; even before starting
23945+ * up the APs.
23946+ */
23947+ .pushsection .altinstr_replacement, "a"
23948+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23949+ 602:
23950+ .popsection
23951+ 603: .fill 602b-601b, 1, 0x90
23952+ .pushsection .altinstructions, "a"
23953+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23954+ .popsection
23955+#else
23956+ pax_force_retaddr (RIP-ARGOFFSET)
23957+#endif
23958+
23959 /*
23960 * The iretq could re-enable interrupts:
23961 */
23962@@ -873,15 +1365,15 @@ native_irq_return_ldt:
23963 SWAPGS
23964 movq PER_CPU_VAR(espfix_waddr),%rdi
23965 movq %rax,(0*8)(%rdi) /* RAX */
23966- movq (2*8)(%rsp),%rax /* RIP */
23967+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23968 movq %rax,(1*8)(%rdi)
23969- movq (3*8)(%rsp),%rax /* CS */
23970+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23971 movq %rax,(2*8)(%rdi)
23972- movq (4*8)(%rsp),%rax /* RFLAGS */
23973+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23974 movq %rax,(3*8)(%rdi)
23975- movq (6*8)(%rsp),%rax /* SS */
23976+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23977 movq %rax,(5*8)(%rdi)
23978- movq (5*8)(%rsp),%rax /* RSP */
23979+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23980 movq %rax,(4*8)(%rdi)
23981 andl $0xffff0000,%eax
23982 popq_cfi %rdi
23983@@ -935,7 +1427,7 @@ ENTRY(retint_kernel)
23984 jmp exit_intr
23985 #endif
23986 CFI_ENDPROC
23987-END(common_interrupt)
23988+ENDPROC(common_interrupt)
23989
23990 /*
23991 * APIC interrupts.
23992@@ -949,7 +1441,7 @@ ENTRY(\sym)
23993 interrupt \do_sym
23994 jmp ret_from_intr
23995 CFI_ENDPROC
23996-END(\sym)
23997+ENDPROC(\sym)
23998 .endm
23999
24000 #ifdef CONFIG_TRACING
24001@@ -1022,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24002 /*
24003 * Exception entry points.
24004 */
24005-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24006+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24007
24008 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24009 ENTRY(\sym)
24010@@ -1073,6 +1565,12 @@ ENTRY(\sym)
24011 .endif
24012
24013 .if \shift_ist != -1
24014+#ifdef CONFIG_SMP
24015+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24016+ lea init_tss(%r13), %r13
24017+#else
24018+ lea init_tss(%rip), %r13
24019+#endif
24020 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24021 .endif
24022
24023@@ -1089,7 +1587,7 @@ ENTRY(\sym)
24024 .endif
24025
24026 CFI_ENDPROC
24027-END(\sym)
24028+ENDPROC(\sym)
24029 .endm
24030
24031 #ifdef CONFIG_TRACING
24032@@ -1130,9 +1628,10 @@ gs_change:
24033 2: mfence /* workaround */
24034 SWAPGS
24035 popfq_cfi
24036+ pax_force_retaddr
24037 ret
24038 CFI_ENDPROC
24039-END(native_load_gs_index)
24040+ENDPROC(native_load_gs_index)
24041
24042 _ASM_EXTABLE(gs_change,bad_gs)
24043 .section .fixup,"ax"
24044@@ -1160,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
24045 CFI_DEF_CFA_REGISTER rsp
24046 CFI_ADJUST_CFA_OFFSET -8
24047 decl PER_CPU_VAR(irq_count)
24048+ pax_force_retaddr
24049 ret
24050 CFI_ENDPROC
24051-END(do_softirq_own_stack)
24052+ENDPROC(do_softirq_own_stack)
24053
24054 #ifdef CONFIG_XEN
24055 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24056@@ -1200,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24057 decl PER_CPU_VAR(irq_count)
24058 jmp error_exit
24059 CFI_ENDPROC
24060-END(xen_do_hypervisor_callback)
24061+ENDPROC(xen_do_hypervisor_callback)
24062
24063 /*
24064 * Hypervisor uses this for application faults while it executes.
24065@@ -1259,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
24066 SAVE_ALL
24067 jmp error_exit
24068 CFI_ENDPROC
24069-END(xen_failsafe_callback)
24070+ENDPROC(xen_failsafe_callback)
24071
24072 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24073 xen_hvm_callback_vector xen_evtchn_do_upcall
24074@@ -1306,18 +1806,33 @@ ENTRY(paranoid_exit)
24075 DEFAULT_FRAME
24076 DISABLE_INTERRUPTS(CLBR_NONE)
24077 TRACE_IRQS_OFF_DEBUG
24078- testl %ebx,%ebx /* swapgs needed? */
24079+ testl $1,%ebx /* swapgs needed? */
24080 jnz paranoid_restore
24081- testl $3,CS(%rsp)
24082+ testb $3,CS(%rsp)
24083 jnz paranoid_userspace
24084+#ifdef CONFIG_PAX_MEMORY_UDEREF
24085+ pax_exit_kernel
24086+ TRACE_IRQS_IRETQ 0
24087+ SWAPGS_UNSAFE_STACK
24088+ RESTORE_ALL 8
24089+ pax_force_retaddr_bts
24090+ jmp irq_return
24091+#endif
24092 paranoid_swapgs:
24093+#ifdef CONFIG_PAX_MEMORY_UDEREF
24094+ pax_exit_kernel_user
24095+#else
24096+ pax_exit_kernel
24097+#endif
24098 TRACE_IRQS_IRETQ 0
24099 SWAPGS_UNSAFE_STACK
24100 RESTORE_ALL 8
24101 jmp irq_return
24102 paranoid_restore:
24103+ pax_exit_kernel
24104 TRACE_IRQS_IRETQ_DEBUG 0
24105 RESTORE_ALL 8
24106+ pax_force_retaddr_bts
24107 jmp irq_return
24108 paranoid_userspace:
24109 GET_THREAD_INFO(%rcx)
24110@@ -1346,7 +1861,7 @@ paranoid_schedule:
24111 TRACE_IRQS_OFF
24112 jmp paranoid_userspace
24113 CFI_ENDPROC
24114-END(paranoid_exit)
24115+ENDPROC(paranoid_exit)
24116
24117 /*
24118 * Exception entry point. This expects an error code/orig_rax on the stack.
24119@@ -1373,12 +1888,23 @@ ENTRY(error_entry)
24120 movq %r14, R14+8(%rsp)
24121 movq %r15, R15+8(%rsp)
24122 xorl %ebx,%ebx
24123- testl $3,CS+8(%rsp)
24124+ testb $3,CS+8(%rsp)
24125 je error_kernelspace
24126 error_swapgs:
24127 SWAPGS
24128 error_sti:
24129+#ifdef CONFIG_PAX_MEMORY_UDEREF
24130+ testb $3, CS+8(%rsp)
24131+ jnz 1f
24132+ pax_enter_kernel
24133+ jmp 2f
24134+1: pax_enter_kernel_user
24135+2:
24136+#else
24137+ pax_enter_kernel
24138+#endif
24139 TRACE_IRQS_OFF
24140+ pax_force_retaddr
24141 ret
24142
24143 /*
24144@@ -1413,7 +1939,7 @@ error_bad_iret:
24145 decl %ebx /* Return to usergs */
24146 jmp error_sti
24147 CFI_ENDPROC
24148-END(error_entry)
24149+ENDPROC(error_entry)
24150
24151
24152 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24153@@ -1424,7 +1950,7 @@ ENTRY(error_exit)
24154 DISABLE_INTERRUPTS(CLBR_NONE)
24155 TRACE_IRQS_OFF
24156 GET_THREAD_INFO(%rcx)
24157- testl %eax,%eax
24158+ testl $1,%eax
24159 jne retint_kernel
24160 LOCKDEP_SYS_EXIT_IRQ
24161 movl TI_flags(%rcx),%edx
24162@@ -1433,7 +1959,7 @@ ENTRY(error_exit)
24163 jnz retint_careful
24164 jmp retint_swapgs
24165 CFI_ENDPROC
24166-END(error_exit)
24167+ENDPROC(error_exit)
24168
24169 /*
24170 * Test if a given stack is an NMI stack or not.
24171@@ -1491,9 +2017,11 @@ ENTRY(nmi)
24172 * If %cs was not the kernel segment, then the NMI triggered in user
24173 * space, which means it is definitely not nested.
24174 */
24175+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24176+ je 1f
24177 cmpl $__KERNEL_CS, 16(%rsp)
24178 jne first_nmi
24179-
24180+1:
24181 /*
24182 * Check the special variable on the stack to see if NMIs are
24183 * executing.
24184@@ -1527,8 +2055,7 @@ nested_nmi:
24185
24186 1:
24187 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24188- leaq -1*8(%rsp), %rdx
24189- movq %rdx, %rsp
24190+ subq $8, %rsp
24191 CFI_ADJUST_CFA_OFFSET 1*8
24192 leaq -10*8(%rsp), %rdx
24193 pushq_cfi $__KERNEL_DS
24194@@ -1546,6 +2073,7 @@ nested_nmi_out:
24195 CFI_RESTORE rdx
24196
24197 /* No need to check faults here */
24198+# pax_force_retaddr_bts
24199 INTERRUPT_RETURN
24200
24201 CFI_RESTORE_STATE
24202@@ -1642,13 +2170,13 @@ end_repeat_nmi:
24203 subq $ORIG_RAX-R15, %rsp
24204 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24205 /*
24206- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24207+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24208 * as we should not be calling schedule in NMI context.
24209 * Even with normal interrupts enabled. An NMI should not be
24210 * setting NEED_RESCHED or anything that normal interrupts and
24211 * exceptions might do.
24212 */
24213- call save_paranoid
24214+ call save_paranoid_nmi
24215 DEFAULT_FRAME 0
24216
24217 /*
24218@@ -1658,9 +2186,9 @@ end_repeat_nmi:
24219 * NMI itself takes a page fault, the page fault that was preempted
24220 * will read the information from the NMI page fault and not the
24221 * origin fault. Save it off and restore it if it changes.
24222- * Use the r12 callee-saved register.
24223+ * Use the r13 callee-saved register.
24224 */
24225- movq %cr2, %r12
24226+ movq %cr2, %r13
24227
24228 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24229 movq %rsp,%rdi
24230@@ -1669,29 +2197,34 @@ end_repeat_nmi:
24231
24232 /* Did the NMI take a page fault? Restore cr2 if it did */
24233 movq %cr2, %rcx
24234- cmpq %rcx, %r12
24235+ cmpq %rcx, %r13
24236 je 1f
24237- movq %r12, %cr2
24238+ movq %r13, %cr2
24239 1:
24240
24241- testl %ebx,%ebx /* swapgs needed? */
24242+ testl $1,%ebx /* swapgs needed? */
24243 jnz nmi_restore
24244 nmi_swapgs:
24245 SWAPGS_UNSAFE_STACK
24246 nmi_restore:
24247+ pax_exit_kernel_nmi
24248 /* Pop the extra iret frame at once */
24249 RESTORE_ALL 6*8
24250+ testb $3, 8(%rsp)
24251+ jnz 1f
24252+ pax_force_retaddr_bts
24253+1:
24254
24255 /* Clear the NMI executing stack variable */
24256 movq $0, 5*8(%rsp)
24257 jmp irq_return
24258 CFI_ENDPROC
24259-END(nmi)
24260+ENDPROC(nmi)
24261
24262 ENTRY(ignore_sysret)
24263 CFI_STARTPROC
24264 mov $-ENOSYS,%eax
24265 sysret
24266 CFI_ENDPROC
24267-END(ignore_sysret)
24268+ENDPROC(ignore_sysret)
24269
24270diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24271index f5d0730..5bce89c 100644
24272--- a/arch/x86/kernel/espfix_64.c
24273+++ b/arch/x86/kernel/espfix_64.c
24274@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24275 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24276 static void *espfix_pages[ESPFIX_MAX_PAGES];
24277
24278-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24279- __aligned(PAGE_SIZE);
24280+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24281
24282 static unsigned int page_random, slot_random;
24283
24284@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24285 void __init init_espfix_bsp(void)
24286 {
24287 pgd_t *pgd_p;
24288+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24289
24290 /* Install the espfix pud into the kernel page directory */
24291- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24292+ pgd_p = &init_level4_pgt[index];
24293 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24294
24295+#ifdef CONFIG_PAX_PER_CPU_PGD
24296+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24297+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24298+#endif
24299+
24300 /* Randomize the locations */
24301 init_espfix_random();
24302
24303@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24304 set_pte(&pte_p[n*PTE_STRIDE], pte);
24305
24306 /* Job is done for this CPU and any CPU which shares this page */
24307- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24308+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24309
24310 unlock_done:
24311 mutex_unlock(&espfix_init_mutex);
24312diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24313index 8b7b0a5..2395f29 100644
24314--- a/arch/x86/kernel/ftrace.c
24315+++ b/arch/x86/kernel/ftrace.c
24316@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24317 * kernel identity mapping to modify code.
24318 */
24319 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24320- ip = (unsigned long)__va(__pa_symbol(ip));
24321+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24322
24323 return ip;
24324 }
24325@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24326 {
24327 unsigned char replaced[MCOUNT_INSN_SIZE];
24328
24329+ ip = ktla_ktva(ip);
24330+
24331 /*
24332 * Note: Due to modules and __init, code can
24333 * disappear and change, we need to protect against faulting
24334@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24335 unsigned char old[MCOUNT_INSN_SIZE];
24336 int ret;
24337
24338- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24339+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24340
24341 ftrace_update_func = ip;
24342 /* Make sure the breakpoints see the ftrace_update_func update */
24343@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24344 unsigned char replaced[MCOUNT_INSN_SIZE];
24345 unsigned char brk = BREAKPOINT_INSTRUCTION;
24346
24347- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24348+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24349 return -EFAULT;
24350
24351 /* Make sure it is what we expect it to be */
24352diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24353index eda1a86..8f6df48 100644
24354--- a/arch/x86/kernel/head64.c
24355+++ b/arch/x86/kernel/head64.c
24356@@ -67,12 +67,12 @@ again:
24357 pgd = *pgd_p;
24358
24359 /*
24360- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24361- * critical -- __PAGE_OFFSET would point us back into the dynamic
24362+ * The use of __early_va rather than __va here is critical:
24363+ * __va would point us back into the dynamic
24364 * range and we might end up looping forever...
24365 */
24366 if (pgd)
24367- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24368+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24369 else {
24370 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24371 reset_early_page_tables();
24372@@ -82,13 +82,13 @@ again:
24373 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24374 for (i = 0; i < PTRS_PER_PUD; i++)
24375 pud_p[i] = 0;
24376- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24377+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24378 }
24379 pud_p += pud_index(address);
24380 pud = *pud_p;
24381
24382 if (pud)
24383- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24384+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24385 else {
24386 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24387 reset_early_page_tables();
24388@@ -98,7 +98,7 @@ again:
24389 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24390 for (i = 0; i < PTRS_PER_PMD; i++)
24391 pmd_p[i] = 0;
24392- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24393+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24394 }
24395 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24396 pmd_p[pmd_index(address)] = pmd;
24397@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24398 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24399 early_printk("Kernel alive\n");
24400
24401- clear_page(init_level4_pgt);
24402 /* set init_level4_pgt kernel high mapping*/
24403 init_level4_pgt[511] = early_level4_pgt[511];
24404
24405diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24406index f36bd42..0ab4474 100644
24407--- a/arch/x86/kernel/head_32.S
24408+++ b/arch/x86/kernel/head_32.S
24409@@ -26,6 +26,12 @@
24410 /* Physical address */
24411 #define pa(X) ((X) - __PAGE_OFFSET)
24412
24413+#ifdef CONFIG_PAX_KERNEXEC
24414+#define ta(X) (X)
24415+#else
24416+#define ta(X) ((X) - __PAGE_OFFSET)
24417+#endif
24418+
24419 /*
24420 * References to members of the new_cpu_data structure.
24421 */
24422@@ -55,11 +61,7 @@
24423 * and small than max_low_pfn, otherwise will waste some page table entries
24424 */
24425
24426-#if PTRS_PER_PMD > 1
24427-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24428-#else
24429-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24430-#endif
24431+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24432
24433 /* Number of possible pages in the lowmem region */
24434 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24435@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24436 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24437
24438 /*
24439+ * Real beginning of normal "text" segment
24440+ */
24441+ENTRY(stext)
24442+ENTRY(_stext)
24443+
24444+/*
24445 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24446 * %esi points to the real-mode code as a 32-bit pointer.
24447 * CS and DS must be 4 GB flat segments, but we don't depend on
24448@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24449 * can.
24450 */
24451 __HEAD
24452+
24453+#ifdef CONFIG_PAX_KERNEXEC
24454+ jmp startup_32
24455+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24456+.fill PAGE_SIZE-5,1,0xcc
24457+#endif
24458+
24459 ENTRY(startup_32)
24460 movl pa(stack_start),%ecx
24461
24462@@ -106,6 +121,59 @@ ENTRY(startup_32)
24463 2:
24464 leal -__PAGE_OFFSET(%ecx),%esp
24465
24466+#ifdef CONFIG_SMP
24467+ movl $pa(cpu_gdt_table),%edi
24468+ movl $__per_cpu_load,%eax
24469+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24470+ rorl $16,%eax
24471+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24472+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24473+ movl $__per_cpu_end - 1,%eax
24474+ subl $__per_cpu_start,%eax
24475+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24476+#endif
24477+
24478+#ifdef CONFIG_PAX_MEMORY_UDEREF
24479+ movl $NR_CPUS,%ecx
24480+ movl $pa(cpu_gdt_table),%edi
24481+1:
24482+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24483+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24484+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24485+ addl $PAGE_SIZE_asm,%edi
24486+ loop 1b
24487+#endif
24488+
24489+#ifdef CONFIG_PAX_KERNEXEC
24490+ movl $pa(boot_gdt),%edi
24491+ movl $__LOAD_PHYSICAL_ADDR,%eax
24492+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24493+ rorl $16,%eax
24494+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24495+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24496+ rorl $16,%eax
24497+
24498+ ljmp $(__BOOT_CS),$1f
24499+1:
24500+
24501+ movl $NR_CPUS,%ecx
24502+ movl $pa(cpu_gdt_table),%edi
24503+ addl $__PAGE_OFFSET,%eax
24504+1:
24505+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24506+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24507+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24508+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24509+ rorl $16,%eax
24510+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24511+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24512+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24513+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24514+ rorl $16,%eax
24515+ addl $PAGE_SIZE_asm,%edi
24516+ loop 1b
24517+#endif
24518+
24519 /*
24520 * Clear BSS first so that there are no surprises...
24521 */
24522@@ -201,8 +269,11 @@ ENTRY(startup_32)
24523 movl %eax, pa(max_pfn_mapped)
24524
24525 /* Do early initialization of the fixmap area */
24526- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24527- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24528+#ifdef CONFIG_COMPAT_VDSO
24529+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24530+#else
24531+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24532+#endif
24533 #else /* Not PAE */
24534
24535 page_pde_offset = (__PAGE_OFFSET >> 20);
24536@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24537 movl %eax, pa(max_pfn_mapped)
24538
24539 /* Do early initialization of the fixmap area */
24540- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24541- movl %eax,pa(initial_page_table+0xffc)
24542+#ifdef CONFIG_COMPAT_VDSO
24543+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24544+#else
24545+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24546+#endif
24547 #endif
24548
24549 #ifdef CONFIG_PARAVIRT
24550@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24551 cmpl $num_subarch_entries, %eax
24552 jae bad_subarch
24553
24554- movl pa(subarch_entries)(,%eax,4), %eax
24555- subl $__PAGE_OFFSET, %eax
24556- jmp *%eax
24557+ jmp *pa(subarch_entries)(,%eax,4)
24558
24559 bad_subarch:
24560 WEAK(lguest_entry)
24561@@ -261,10 +333,10 @@ WEAK(xen_entry)
24562 __INITDATA
24563
24564 subarch_entries:
24565- .long default_entry /* normal x86/PC */
24566- .long lguest_entry /* lguest hypervisor */
24567- .long xen_entry /* Xen hypervisor */
24568- .long default_entry /* Moorestown MID */
24569+ .long ta(default_entry) /* normal x86/PC */
24570+ .long ta(lguest_entry) /* lguest hypervisor */
24571+ .long ta(xen_entry) /* Xen hypervisor */
24572+ .long ta(default_entry) /* Moorestown MID */
24573 num_subarch_entries = (. - subarch_entries) / 4
24574 .previous
24575 #else
24576@@ -354,6 +426,7 @@ default_entry:
24577 movl pa(mmu_cr4_features),%eax
24578 movl %eax,%cr4
24579
24580+#ifdef CONFIG_X86_PAE
24581 testb $X86_CR4_PAE, %al # check if PAE is enabled
24582 jz enable_paging
24583
24584@@ -382,6 +455,9 @@ default_entry:
24585 /* Make changes effective */
24586 wrmsr
24587
24588+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24589+#endif
24590+
24591 enable_paging:
24592
24593 /*
24594@@ -449,14 +525,20 @@ is486:
24595 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24596 movl %eax,%ss # after changing gdt.
24597
24598- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24599+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24600 movl %eax,%ds
24601 movl %eax,%es
24602
24603 movl $(__KERNEL_PERCPU), %eax
24604 movl %eax,%fs # set this cpu's percpu
24605
24606+#ifdef CONFIG_CC_STACKPROTECTOR
24607 movl $(__KERNEL_STACK_CANARY),%eax
24608+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24609+ movl $(__USER_DS),%eax
24610+#else
24611+ xorl %eax,%eax
24612+#endif
24613 movl %eax,%gs
24614
24615 xorl %eax,%eax # Clear LDT
24616@@ -512,8 +594,11 @@ setup_once:
24617 * relocation. Manually set base address in stack canary
24618 * segment descriptor.
24619 */
24620- movl $gdt_page,%eax
24621+ movl $cpu_gdt_table,%eax
24622 movl $stack_canary,%ecx
24623+#ifdef CONFIG_SMP
24624+ addl $__per_cpu_load,%ecx
24625+#endif
24626 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24627 shrl $16, %ecx
24628 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24629@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24630 cmpl $2,(%esp) # X86_TRAP_NMI
24631 je is_nmi # Ignore NMI
24632
24633- cmpl $2,%ss:early_recursion_flag
24634+ cmpl $1,%ss:early_recursion_flag
24635 je hlt_loop
24636 incl %ss:early_recursion_flag
24637
24638@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24639 pushl (20+6*4)(%esp) /* trapno */
24640 pushl $fault_msg
24641 call printk
24642-#endif
24643 call dump_stack
24644+#endif
24645 hlt_loop:
24646 hlt
24647 jmp hlt_loop
24648@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24649 /* This is the default interrupt "handler" :-) */
24650 ALIGN
24651 ignore_int:
24652- cld
24653 #ifdef CONFIG_PRINTK
24654+ cmpl $2,%ss:early_recursion_flag
24655+ je hlt_loop
24656+ incl %ss:early_recursion_flag
24657+ cld
24658 pushl %eax
24659 pushl %ecx
24660 pushl %edx
24661@@ -617,9 +705,6 @@ ignore_int:
24662 movl $(__KERNEL_DS),%eax
24663 movl %eax,%ds
24664 movl %eax,%es
24665- cmpl $2,early_recursion_flag
24666- je hlt_loop
24667- incl early_recursion_flag
24668 pushl 16(%esp)
24669 pushl 24(%esp)
24670 pushl 32(%esp)
24671@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24672 /*
24673 * BSS section
24674 */
24675-__PAGE_ALIGNED_BSS
24676- .align PAGE_SIZE
24677 #ifdef CONFIG_X86_PAE
24678+.section .initial_pg_pmd,"a",@progbits
24679 initial_pg_pmd:
24680 .fill 1024*KPMDS,4,0
24681 #else
24682+.section .initial_page_table,"a",@progbits
24683 ENTRY(initial_page_table)
24684 .fill 1024,4,0
24685 #endif
24686+.section .initial_pg_fixmap,"a",@progbits
24687 initial_pg_fixmap:
24688 .fill 1024,4,0
24689+.section .empty_zero_page,"a",@progbits
24690 ENTRY(empty_zero_page)
24691 .fill 4096,1,0
24692+.section .swapper_pg_dir,"a",@progbits
24693 ENTRY(swapper_pg_dir)
24694+#ifdef CONFIG_X86_PAE
24695+ .fill 4,8,0
24696+#else
24697 .fill 1024,4,0
24698+#endif
24699
24700 /*
24701 * This starts the data section.
24702 */
24703 #ifdef CONFIG_X86_PAE
24704-__PAGE_ALIGNED_DATA
24705- /* Page-aligned for the benefit of paravirt? */
24706- .align PAGE_SIZE
24707+.section .initial_page_table,"a",@progbits
24708 ENTRY(initial_page_table)
24709 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24710 # if KPMDS == 3
24711@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24712 # error "Kernel PMDs should be 1, 2 or 3"
24713 # endif
24714 .align PAGE_SIZE /* needs to be page-sized too */
24715+
24716+#ifdef CONFIG_PAX_PER_CPU_PGD
24717+ENTRY(cpu_pgd)
24718+ .rept 2*NR_CPUS
24719+ .fill 4,8,0
24720+ .endr
24721+#endif
24722+
24723 #endif
24724
24725 .data
24726 .balign 4
24727 ENTRY(stack_start)
24728- .long init_thread_union+THREAD_SIZE
24729+ .long init_thread_union+THREAD_SIZE-8
24730
24731 __INITRODATA
24732 int_msg:
24733@@ -727,7 +825,7 @@ fault_msg:
24734 * segment size, and 32-bit linear address value:
24735 */
24736
24737- .data
24738+.section .rodata,"a",@progbits
24739 .globl boot_gdt_descr
24740 .globl idt_descr
24741
24742@@ -736,7 +834,7 @@ fault_msg:
24743 .word 0 # 32 bit align gdt_desc.address
24744 boot_gdt_descr:
24745 .word __BOOT_DS+7
24746- .long boot_gdt - __PAGE_OFFSET
24747+ .long pa(boot_gdt)
24748
24749 .word 0 # 32-bit align idt_desc.address
24750 idt_descr:
24751@@ -747,7 +845,7 @@ idt_descr:
24752 .word 0 # 32 bit align gdt_desc.address
24753 ENTRY(early_gdt_descr)
24754 .word GDT_ENTRIES*8-1
24755- .long gdt_page /* Overwritten for secondary CPUs */
24756+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24757
24758 /*
24759 * The boot_gdt must mirror the equivalent in setup.S and is
24760@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24761 .align L1_CACHE_BYTES
24762 ENTRY(boot_gdt)
24763 .fill GDT_ENTRY_BOOT_CS,8,0
24764- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24765- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24766+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24767+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24768+
24769+ .align PAGE_SIZE_asm
24770+ENTRY(cpu_gdt_table)
24771+ .rept NR_CPUS
24772+ .quad 0x0000000000000000 /* NULL descriptor */
24773+ .quad 0x0000000000000000 /* 0x0b reserved */
24774+ .quad 0x0000000000000000 /* 0x13 reserved */
24775+ .quad 0x0000000000000000 /* 0x1b reserved */
24776+
24777+#ifdef CONFIG_PAX_KERNEXEC
24778+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24779+#else
24780+ .quad 0x0000000000000000 /* 0x20 unused */
24781+#endif
24782+
24783+ .quad 0x0000000000000000 /* 0x28 unused */
24784+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24785+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24786+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24787+ .quad 0x0000000000000000 /* 0x4b reserved */
24788+ .quad 0x0000000000000000 /* 0x53 reserved */
24789+ .quad 0x0000000000000000 /* 0x5b reserved */
24790+
24791+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24792+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24793+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24794+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24795+
24796+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24797+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24798+
24799+ /*
24800+ * Segments used for calling PnP BIOS have byte granularity.
24801+ * The code segments and data segments have fixed 64k limits,
24802+ * the transfer segment sizes are set at run time.
24803+ */
24804+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24805+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24806+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24807+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24808+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24809+
24810+ /*
24811+ * The APM segments have byte granularity and their bases
24812+ * are set at run time. All have 64k limits.
24813+ */
24814+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24815+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24816+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24817+
24818+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24819+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24820+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24821+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24822+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24823+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24824+
24825+ /* Be sure this is zeroed to avoid false validations in Xen */
24826+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24827+ .endr
24828diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24829index a468c0a..8b5a879 100644
24830--- a/arch/x86/kernel/head_64.S
24831+++ b/arch/x86/kernel/head_64.S
24832@@ -20,6 +20,8 @@
24833 #include <asm/processor-flags.h>
24834 #include <asm/percpu.h>
24835 #include <asm/nops.h>
24836+#include <asm/cpufeature.h>
24837+#include <asm/alternative-asm.h>
24838
24839 #ifdef CONFIG_PARAVIRT
24840 #include <asm/asm-offsets.h>
24841@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24842 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24843 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24844 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24845+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24846+L3_VMALLOC_START = pud_index(VMALLOC_START)
24847+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24848+L3_VMALLOC_END = pud_index(VMALLOC_END)
24849+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24850+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24851
24852 .text
24853 __HEAD
24854@@ -89,11 +97,24 @@ startup_64:
24855 * Fixup the physical addresses in the page table
24856 */
24857 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24858+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24859+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24860+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24861+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24862+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24863
24864- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24865- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24866+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24867+#ifndef CONFIG_XEN
24868+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24869+#endif
24870+
24871+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24872+
24873+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24874+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24875
24876 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24877+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24878
24879 /*
24880 * Set up the identity mapping for the switchover. These
24881@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24882 * after the boot processor executes this code.
24883 */
24884
24885+ orq $-1, %rbp
24886 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24887 1:
24888
24889- /* Enable PAE mode and PGE */
24890- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24891+ /* Enable PAE mode and PSE/PGE */
24892+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24893 movq %rcx, %cr4
24894
24895 /* Setup early boot stage 4 level pagetables. */
24896@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24897 movl $MSR_EFER, %ecx
24898 rdmsr
24899 btsl $_EFER_SCE, %eax /* Enable System Call */
24900- btl $20,%edi /* No Execute supported? */
24901+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24902 jnc 1f
24903 btsl $_EFER_NX, %eax
24904+ cmpq $-1, %rbp
24905+ je 1f
24906 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24907+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24908+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24909+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24910+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24911+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24912+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24913+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24914 1: wrmsr /* Make changes effective */
24915
24916 /* Setup cr0 */
24917@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
24918 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24919 * address given in m16:64.
24920 */
24921+ pax_set_fptr_mask
24922 movq initial_code(%rip),%rax
24923 pushq $0 # fake return address to stop unwinder
24924 pushq $__KERNEL_CS # set correct cs
24925@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
24926 .quad INIT_PER_CPU_VAR(irq_stack_union)
24927
24928 GLOBAL(stack_start)
24929- .quad init_thread_union+THREAD_SIZE-8
24930+ .quad init_thread_union+THREAD_SIZE-16
24931 .word 0
24932 __FINITDATA
24933
24934@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
24935 call dump_stack
24936 #ifdef CONFIG_KALLSYMS
24937 leaq early_idt_ripmsg(%rip),%rdi
24938- movq 40(%rsp),%rsi # %rip again
24939+ movq 88(%rsp),%rsi # %rip again
24940 call __print_symbol
24941 #endif
24942 #endif /* EARLY_PRINTK */
24943@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
24944 early_recursion_flag:
24945 .long 0
24946
24947+ .section .rodata,"a",@progbits
24948 #ifdef CONFIG_EARLY_PRINTK
24949 early_idt_msg:
24950 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24951@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
24952 NEXT_PAGE(early_dynamic_pgts)
24953 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24954
24955- .data
24956+ .section .rodata,"a",@progbits
24957
24958-#ifndef CONFIG_XEN
24959 NEXT_PAGE(init_level4_pgt)
24960- .fill 512,8,0
24961-#else
24962-NEXT_PAGE(init_level4_pgt)
24963- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24964 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24965 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24966+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24967+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24968+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24969+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24970+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24971+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24972 .org init_level4_pgt + L4_START_KERNEL*8, 0
24973 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24974 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24975
24976+#ifdef CONFIG_PAX_PER_CPU_PGD
24977+NEXT_PAGE(cpu_pgd)
24978+ .rept 2*NR_CPUS
24979+ .fill 512,8,0
24980+ .endr
24981+#endif
24982+
24983 NEXT_PAGE(level3_ident_pgt)
24984 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24985+#ifdef CONFIG_XEN
24986 .fill 511, 8, 0
24987+#else
24988+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24989+ .fill 510,8,0
24990+#endif
24991+
24992+NEXT_PAGE(level3_vmalloc_start_pgt)
24993+ .fill 512,8,0
24994+
24995+NEXT_PAGE(level3_vmalloc_end_pgt)
24996+ .fill 512,8,0
24997+
24998+NEXT_PAGE(level3_vmemmap_pgt)
24999+ .fill L3_VMEMMAP_START,8,0
25000+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25001+
25002 NEXT_PAGE(level2_ident_pgt)
25003- /* Since I easily can, map the first 1G.
25004+ /* Since I easily can, map the first 2G.
25005 * Don't set NX because code runs from these pages.
25006 */
25007- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25008-#endif
25009+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25010
25011 NEXT_PAGE(level3_kernel_pgt)
25012 .fill L3_START_KERNEL,8,0
25013@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25014 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25015 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25016
25017+NEXT_PAGE(level2_vmemmap_pgt)
25018+ .fill 512,8,0
25019+
25020 NEXT_PAGE(level2_kernel_pgt)
25021 /*
25022 * 512 MB kernel mapping. We spend a full page on this pagetable
25023@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25024 NEXT_PAGE(level2_fixmap_pgt)
25025 .fill 506,8,0
25026 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25027- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25028- .fill 5,8,0
25029+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25030+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25031+ .fill 4,8,0
25032
25033 NEXT_PAGE(level1_fixmap_pgt)
25034 .fill 512,8,0
25035
25036+NEXT_PAGE(level1_vsyscall_pgt)
25037+ .fill 512,8,0
25038+
25039 #undef PMDS
25040
25041- .data
25042+ .align PAGE_SIZE
25043+ENTRY(cpu_gdt_table)
25044+ .rept NR_CPUS
25045+ .quad 0x0000000000000000 /* NULL descriptor */
25046+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25047+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25048+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25049+ .quad 0x00cffb000000ffff /* __USER32_CS */
25050+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25051+ .quad 0x00affb000000ffff /* __USER_CS */
25052+
25053+#ifdef CONFIG_PAX_KERNEXEC
25054+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25055+#else
25056+ .quad 0x0 /* unused */
25057+#endif
25058+
25059+ .quad 0,0 /* TSS */
25060+ .quad 0,0 /* LDT */
25061+ .quad 0,0,0 /* three TLS descriptors */
25062+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25063+ /* asm/segment.h:GDT_ENTRIES must match this */
25064+
25065+#ifdef CONFIG_PAX_MEMORY_UDEREF
25066+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25067+#else
25068+ .quad 0x0 /* unused */
25069+#endif
25070+
25071+ /* zero the remaining page */
25072+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25073+ .endr
25074+
25075 .align 16
25076 .globl early_gdt_descr
25077 early_gdt_descr:
25078 .word GDT_ENTRIES*8-1
25079 early_gdt_descr_base:
25080- .quad INIT_PER_CPU_VAR(gdt_page)
25081+ .quad cpu_gdt_table
25082
25083 ENTRY(phys_base)
25084 /* This must match the first entry in level2_kernel_pgt */
25085 .quad 0x0000000000000000
25086
25087 #include "../../x86/xen/xen-head.S"
25088-
25089- __PAGE_ALIGNED_BSS
25090+
25091+ .section .rodata,"a",@progbits
25092 NEXT_PAGE(empty_zero_page)
25093 .skip PAGE_SIZE
25094diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25095index 05fd74f..c3548b1 100644
25096--- a/arch/x86/kernel/i386_ksyms_32.c
25097+++ b/arch/x86/kernel/i386_ksyms_32.c
25098@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25099 EXPORT_SYMBOL(cmpxchg8b_emu);
25100 #endif
25101
25102+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25103+
25104 /* Networking helper routines. */
25105 EXPORT_SYMBOL(csum_partial_copy_generic);
25106+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25107+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25108
25109 EXPORT_SYMBOL(__get_user_1);
25110 EXPORT_SYMBOL(__get_user_2);
25111@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25112 EXPORT_SYMBOL(___preempt_schedule_context);
25113 #endif
25114 #endif
25115+
25116+#ifdef CONFIG_PAX_KERNEXEC
25117+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25118+#endif
25119+
25120+#ifdef CONFIG_PAX_PER_CPU_PGD
25121+EXPORT_SYMBOL(cpu_pgd);
25122+#endif
25123diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25124index a9a4229..6f4d476 100644
25125--- a/arch/x86/kernel/i387.c
25126+++ b/arch/x86/kernel/i387.c
25127@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25128 static inline bool interrupted_user_mode(void)
25129 {
25130 struct pt_regs *regs = get_irq_regs();
25131- return regs && user_mode_vm(regs);
25132+ return regs && user_mode(regs);
25133 }
25134
25135 /*
25136diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25137index e7cc537..67d7372 100644
25138--- a/arch/x86/kernel/i8259.c
25139+++ b/arch/x86/kernel/i8259.c
25140@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25141 static void make_8259A_irq(unsigned int irq)
25142 {
25143 disable_irq_nosync(irq);
25144- io_apic_irqs &= ~(1<<irq);
25145+ io_apic_irqs &= ~(1UL<<irq);
25146 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25147 enable_irq(irq);
25148 }
25149@@ -208,7 +208,7 @@ spurious_8259A_irq:
25150 "spurious 8259A interrupt: IRQ%d.\n", irq);
25151 spurious_irq_mask |= irqmask;
25152 }
25153- atomic_inc(&irq_err_count);
25154+ atomic_inc_unchecked(&irq_err_count);
25155 /*
25156 * Theoretically we do not have to handle this IRQ,
25157 * but in Linux this does not cause problems and is
25158@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25159 /* (slave's support for AEOI in flat mode is to be investigated) */
25160 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25161
25162+ pax_open_kernel();
25163 if (auto_eoi)
25164 /*
25165 * In AEOI mode we just have to mask the interrupt
25166 * when acking.
25167 */
25168- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25169+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25170 else
25171- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25172+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25173+ pax_close_kernel();
25174
25175 udelay(100); /* wait for 8259A to initialize */
25176
25177diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25178index a979b5b..1d6db75 100644
25179--- a/arch/x86/kernel/io_delay.c
25180+++ b/arch/x86/kernel/io_delay.c
25181@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25182 * Quirk table for systems that misbehave (lock up, etc.) if port
25183 * 0x80 is used:
25184 */
25185-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25186+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25187 {
25188 .callback = dmi_io_delay_0xed_port,
25189 .ident = "Compaq Presario V6000",
25190diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25191index 4ddaf66..49d5c18 100644
25192--- a/arch/x86/kernel/ioport.c
25193+++ b/arch/x86/kernel/ioport.c
25194@@ -6,6 +6,7 @@
25195 #include <linux/sched.h>
25196 #include <linux/kernel.h>
25197 #include <linux/capability.h>
25198+#include <linux/security.h>
25199 #include <linux/errno.h>
25200 #include <linux/types.h>
25201 #include <linux/ioport.h>
25202@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25203 return -EINVAL;
25204 if (turn_on && !capable(CAP_SYS_RAWIO))
25205 return -EPERM;
25206+#ifdef CONFIG_GRKERNSEC_IO
25207+ if (turn_on && grsec_disable_privio) {
25208+ gr_handle_ioperm();
25209+ return -ENODEV;
25210+ }
25211+#endif
25212
25213 /*
25214 * If it's the first ioperm() call in this thread's lifetime, set the
25215@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25216 * because the ->io_bitmap_max value must match the bitmap
25217 * contents:
25218 */
25219- tss = &per_cpu(init_tss, get_cpu());
25220+ tss = init_tss + get_cpu();
25221
25222 if (turn_on)
25223 bitmap_clear(t->io_bitmap_ptr, from, num);
25224@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25225 if (level > old) {
25226 if (!capable(CAP_SYS_RAWIO))
25227 return -EPERM;
25228+#ifdef CONFIG_GRKERNSEC_IO
25229+ if (grsec_disable_privio) {
25230+ gr_handle_iopl();
25231+ return -ENODEV;
25232+ }
25233+#endif
25234 }
25235 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25236 t->iopl = level << 12;
25237diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25238index 705ef8d..8672c9d 100644
25239--- a/arch/x86/kernel/irq.c
25240+++ b/arch/x86/kernel/irq.c
25241@@ -22,7 +22,7 @@
25242 #define CREATE_TRACE_POINTS
25243 #include <asm/trace/irq_vectors.h>
25244
25245-atomic_t irq_err_count;
25246+atomic_unchecked_t irq_err_count;
25247
25248 /* Function pointer for generic interrupt vector handling */
25249 void (*x86_platform_ipi_callback)(void) = NULL;
25250@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25251 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25252 seq_puts(p, " Hypervisor callback interrupts\n");
25253 #endif
25254- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25255+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25256 #if defined(CONFIG_X86_IO_APIC)
25257- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25258+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25259 #endif
25260 return 0;
25261 }
25262@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25263
25264 u64 arch_irq_stat(void)
25265 {
25266- u64 sum = atomic_read(&irq_err_count);
25267+ u64 sum = atomic_read_unchecked(&irq_err_count);
25268 return sum;
25269 }
25270
25271diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25272index 63ce838..2ea3e06 100644
25273--- a/arch/x86/kernel/irq_32.c
25274+++ b/arch/x86/kernel/irq_32.c
25275@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25276
25277 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25278
25279+extern void gr_handle_kernel_exploit(void);
25280+
25281 int sysctl_panic_on_stackoverflow __read_mostly;
25282
25283 /* Debugging check for stack overflow: is there less than 1KB free? */
25284@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25285 __asm__ __volatile__("andl %%esp,%0" :
25286 "=r" (sp) : "0" (THREAD_SIZE - 1));
25287
25288- return sp < (sizeof(struct thread_info) + STACK_WARN);
25289+ return sp < STACK_WARN;
25290 }
25291
25292 static void print_stack_overflow(void)
25293 {
25294 printk(KERN_WARNING "low stack detected by irq handler\n");
25295 dump_stack();
25296+ gr_handle_kernel_exploit();
25297 if (sysctl_panic_on_stackoverflow)
25298 panic("low stack detected by irq handler - check messages\n");
25299 }
25300@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25301 static inline int
25302 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25303 {
25304- struct irq_stack *curstk, *irqstk;
25305+ struct irq_stack *irqstk;
25306 u32 *isp, *prev_esp, arg1, arg2;
25307
25308- curstk = (struct irq_stack *) current_stack();
25309 irqstk = __this_cpu_read(hardirq_stack);
25310
25311 /*
25312@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25313 * handler) we can't do that and just have to keep using the
25314 * current stack (which is the irq stack already after all)
25315 */
25316- if (unlikely(curstk == irqstk))
25317+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25318 return 0;
25319
25320- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25321+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25322
25323 /* Save the next esp at the bottom of the stack */
25324 prev_esp = (u32 *)irqstk;
25325 *prev_esp = current_stack_pointer;
25326
25327+#ifdef CONFIG_PAX_MEMORY_UDEREF
25328+ __set_fs(MAKE_MM_SEG(0));
25329+#endif
25330+
25331 if (unlikely(overflow))
25332 call_on_stack(print_stack_overflow, isp);
25333
25334@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25335 : "0" (irq), "1" (desc), "2" (isp),
25336 "D" (desc->handle_irq)
25337 : "memory", "cc", "ecx");
25338+
25339+#ifdef CONFIG_PAX_MEMORY_UDEREF
25340+ __set_fs(current_thread_info()->addr_limit);
25341+#endif
25342+
25343 return 1;
25344 }
25345
25346@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25347 */
25348 void irq_ctx_init(int cpu)
25349 {
25350- struct irq_stack *irqstk;
25351-
25352 if (per_cpu(hardirq_stack, cpu))
25353 return;
25354
25355- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25356- THREADINFO_GFP,
25357- THREAD_SIZE_ORDER));
25358- per_cpu(hardirq_stack, cpu) = irqstk;
25359-
25360- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25361- THREADINFO_GFP,
25362- THREAD_SIZE_ORDER));
25363- per_cpu(softirq_stack, cpu) = irqstk;
25364-
25365- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25366- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25367+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25368+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25369 }
25370
25371 void do_softirq_own_stack(void)
25372 {
25373- struct thread_info *curstk;
25374 struct irq_stack *irqstk;
25375 u32 *isp, *prev_esp;
25376
25377- curstk = current_stack();
25378 irqstk = __this_cpu_read(softirq_stack);
25379
25380 /* build the stack frame on the softirq stack */
25381@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25382 prev_esp = (u32 *)irqstk;
25383 *prev_esp = current_stack_pointer;
25384
25385+#ifdef CONFIG_PAX_MEMORY_UDEREF
25386+ __set_fs(MAKE_MM_SEG(0));
25387+#endif
25388+
25389 call_on_stack(__do_softirq, isp);
25390+
25391+#ifdef CONFIG_PAX_MEMORY_UDEREF
25392+ __set_fs(current_thread_info()->addr_limit);
25393+#endif
25394+
25395 }
25396
25397 bool handle_irq(unsigned irq, struct pt_regs *regs)
25398@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25399 if (unlikely(!desc))
25400 return false;
25401
25402- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25403+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25404 if (unlikely(overflow))
25405 print_stack_overflow();
25406 desc->handle_irq(irq, desc);
25407diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25408index e4b503d..824fce8 100644
25409--- a/arch/x86/kernel/irq_64.c
25410+++ b/arch/x86/kernel/irq_64.c
25411@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25412 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25413 EXPORT_PER_CPU_SYMBOL(irq_regs);
25414
25415+extern void gr_handle_kernel_exploit(void);
25416+
25417 int sysctl_panic_on_stackoverflow;
25418
25419 /*
25420@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25421 u64 estack_top, estack_bottom;
25422 u64 curbase = (u64)task_stack_page(current);
25423
25424- if (user_mode_vm(regs))
25425+ if (user_mode(regs))
25426 return;
25427
25428 if (regs->sp >= curbase + sizeof(struct thread_info) +
25429@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25430 irq_stack_top, irq_stack_bottom,
25431 estack_top, estack_bottom);
25432
25433+ gr_handle_kernel_exploit();
25434+
25435 if (sysctl_panic_on_stackoverflow)
25436 panic("low stack detected by irq handler - check messages\n");
25437 #endif
25438diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25439index 26d5a55..a01160a 100644
25440--- a/arch/x86/kernel/jump_label.c
25441+++ b/arch/x86/kernel/jump_label.c
25442@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25443 * Jump label is enabled for the first time.
25444 * So we expect a default_nop...
25445 */
25446- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25447+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25448 != 0))
25449 bug_at((void *)entry->code, __LINE__);
25450 } else {
25451@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25452 * ...otherwise expect an ideal_nop. Otherwise
25453 * something went horribly wrong.
25454 */
25455- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25456+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25457 != 0))
25458 bug_at((void *)entry->code, __LINE__);
25459 }
25460@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25461 * are converting the default nop to the ideal nop.
25462 */
25463 if (init) {
25464- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25465+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25466 bug_at((void *)entry->code, __LINE__);
25467 } else {
25468 code.jump = 0xe9;
25469 code.offset = entry->target -
25470 (entry->code + JUMP_LABEL_NOP_SIZE);
25471- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25472+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25473 bug_at((void *)entry->code, __LINE__);
25474 }
25475 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25476diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25477index 7ec1d5f..5a7d130 100644
25478--- a/arch/x86/kernel/kgdb.c
25479+++ b/arch/x86/kernel/kgdb.c
25480@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25481 #ifdef CONFIG_X86_32
25482 switch (regno) {
25483 case GDB_SS:
25484- if (!user_mode_vm(regs))
25485+ if (!user_mode(regs))
25486 *(unsigned long *)mem = __KERNEL_DS;
25487 break;
25488 case GDB_SP:
25489- if (!user_mode_vm(regs))
25490+ if (!user_mode(regs))
25491 *(unsigned long *)mem = kernel_stack_pointer(regs);
25492 break;
25493 case GDB_GS:
25494@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25495 bp->attr.bp_addr = breakinfo[breakno].addr;
25496 bp->attr.bp_len = breakinfo[breakno].len;
25497 bp->attr.bp_type = breakinfo[breakno].type;
25498- info->address = breakinfo[breakno].addr;
25499+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25500+ info->address = ktla_ktva(breakinfo[breakno].addr);
25501+ else
25502+ info->address = breakinfo[breakno].addr;
25503 info->len = breakinfo[breakno].len;
25504 info->type = breakinfo[breakno].type;
25505 val = arch_install_hw_breakpoint(bp);
25506@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25507 case 'k':
25508 /* clear the trace bit */
25509 linux_regs->flags &= ~X86_EFLAGS_TF;
25510- atomic_set(&kgdb_cpu_doing_single_step, -1);
25511+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25512
25513 /* set the trace bit if we're stepping */
25514 if (remcomInBuffer[0] == 's') {
25515 linux_regs->flags |= X86_EFLAGS_TF;
25516- atomic_set(&kgdb_cpu_doing_single_step,
25517+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25518 raw_smp_processor_id());
25519 }
25520
25521@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25522
25523 switch (cmd) {
25524 case DIE_DEBUG:
25525- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25526+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25527 if (user_mode(regs))
25528 return single_step_cont(regs, args);
25529 break;
25530@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25531 #endif /* CONFIG_DEBUG_RODATA */
25532
25533 bpt->type = BP_BREAKPOINT;
25534- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25535+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25536 BREAK_INSTR_SIZE);
25537 if (err)
25538 return err;
25539- err = probe_kernel_write((char *)bpt->bpt_addr,
25540+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25541 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25542 #ifdef CONFIG_DEBUG_RODATA
25543 if (!err)
25544@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25545 return -EBUSY;
25546 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25547 BREAK_INSTR_SIZE);
25548- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25549+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25550 if (err)
25551 return err;
25552 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25553@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25554 if (mutex_is_locked(&text_mutex))
25555 goto knl_write;
25556 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25557- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25558+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25559 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25560 goto knl_write;
25561 return err;
25562 knl_write:
25563 #endif /* CONFIG_DEBUG_RODATA */
25564- return probe_kernel_write((char *)bpt->bpt_addr,
25565+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25566 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25567 }
25568
25569diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25570index 98f654d..ac04352 100644
25571--- a/arch/x86/kernel/kprobes/core.c
25572+++ b/arch/x86/kernel/kprobes/core.c
25573@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25574 s32 raddr;
25575 } __packed *insn;
25576
25577- insn = (struct __arch_relative_insn *)from;
25578+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25579+
25580+ pax_open_kernel();
25581 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25582 insn->op = op;
25583+ pax_close_kernel();
25584 }
25585
25586 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25587@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25588 kprobe_opcode_t opcode;
25589 kprobe_opcode_t *orig_opcodes = opcodes;
25590
25591- if (search_exception_tables((unsigned long)opcodes))
25592+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25593 return 0; /* Page fault may occur on this address. */
25594
25595 retry:
25596@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25597 * for the first byte, we can recover the original instruction
25598 * from it and kp->opcode.
25599 */
25600- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25601+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25602 buf[0] = kp->opcode;
25603- return (unsigned long)buf;
25604+ return ktva_ktla((unsigned long)buf);
25605 }
25606
25607 /*
25608@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25609 /* Another subsystem puts a breakpoint, failed to recover */
25610 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25611 return 0;
25612+ pax_open_kernel();
25613 memcpy(dest, insn.kaddr, insn.length);
25614+ pax_close_kernel();
25615
25616 #ifdef CONFIG_X86_64
25617 if (insn_rip_relative(&insn)) {
25618@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25619 return 0;
25620 }
25621 disp = (u8 *) dest + insn_offset_displacement(&insn);
25622+ pax_open_kernel();
25623 *(s32 *) disp = (s32) newdisp;
25624+ pax_close_kernel();
25625 }
25626 #endif
25627 return insn.length;
25628@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25629 * nor set current_kprobe, because it doesn't use single
25630 * stepping.
25631 */
25632- regs->ip = (unsigned long)p->ainsn.insn;
25633+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25634 preempt_enable_no_resched();
25635 return;
25636 }
25637@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25638 regs->flags &= ~X86_EFLAGS_IF;
25639 /* single step inline if the instruction is an int3 */
25640 if (p->opcode == BREAKPOINT_INSTRUCTION)
25641- regs->ip = (unsigned long)p->addr;
25642+ regs->ip = ktla_ktva((unsigned long)p->addr);
25643 else
25644- regs->ip = (unsigned long)p->ainsn.insn;
25645+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25646 }
25647 NOKPROBE_SYMBOL(setup_singlestep);
25648
25649@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25650 struct kprobe *p;
25651 struct kprobe_ctlblk *kcb;
25652
25653- if (user_mode_vm(regs))
25654+ if (user_mode(regs))
25655 return 0;
25656
25657 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25658@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25659 setup_singlestep(p, regs, kcb, 0);
25660 return 1;
25661 }
25662- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25663+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25664 /*
25665 * The breakpoint instruction was removed right
25666 * after we hit it. Another cpu has removed
25667@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
25668 " movq %rax, 152(%rsp)\n"
25669 RESTORE_REGS_STRING
25670 " popfq\n"
25671+#ifdef KERNEXEC_PLUGIN
25672+ " btsq $63,(%rsp)\n"
25673+#endif
25674 #else
25675 " pushf\n"
25676 SAVE_REGS_STRING
25677@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25678 struct kprobe_ctlblk *kcb)
25679 {
25680 unsigned long *tos = stack_addr(regs);
25681- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25682+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25683 unsigned long orig_ip = (unsigned long)p->addr;
25684 kprobe_opcode_t *insn = p->ainsn.insn;
25685
25686@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25687 struct die_args *args = data;
25688 int ret = NOTIFY_DONE;
25689
25690- if (args->regs && user_mode_vm(args->regs))
25691+ if (args->regs && user_mode(args->regs))
25692 return ret;
25693
25694 if (val == DIE_GPF) {
25695diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25696index 7c523bb..01b051b 100644
25697--- a/arch/x86/kernel/kprobes/opt.c
25698+++ b/arch/x86/kernel/kprobes/opt.c
25699@@ -79,6 +79,7 @@ found:
25700 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25701 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25702 {
25703+ pax_open_kernel();
25704 #ifdef CONFIG_X86_64
25705 *addr++ = 0x48;
25706 *addr++ = 0xbf;
25707@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25708 *addr++ = 0xb8;
25709 #endif
25710 *(unsigned long *)addr = val;
25711+ pax_close_kernel();
25712 }
25713
25714 asm (
25715@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25716 * Verify if the address gap is in 2GB range, because this uses
25717 * a relative jump.
25718 */
25719- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25720+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25721 if (abs(rel) > 0x7fffffff) {
25722 __arch_remove_optimized_kprobe(op, 0);
25723 return -ERANGE;
25724@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25725 op->optinsn.size = ret;
25726
25727 /* Copy arch-dep-instance from template */
25728- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25729+ pax_open_kernel();
25730+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25731+ pax_close_kernel();
25732
25733 /* Set probe information */
25734 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25735
25736 /* Set probe function call */
25737- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25738+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25739
25740 /* Set returning jmp instruction at the tail of out-of-line buffer */
25741- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25742+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25743 (u8 *)op->kp.addr + op->optinsn.size);
25744
25745 flush_icache_range((unsigned long) buf,
25746@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25747 WARN_ON(kprobe_disabled(&op->kp));
25748
25749 /* Backup instructions which will be replaced by jump address */
25750- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25751+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25752 RELATIVE_ADDR_SIZE);
25753
25754 insn_buf[0] = RELATIVEJUMP_OPCODE;
25755@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25756 /* This kprobe is really able to run optimized path. */
25757 op = container_of(p, struct optimized_kprobe, kp);
25758 /* Detour through copied instructions */
25759- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25760+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25761 if (!reenter)
25762 reset_current_kprobe();
25763 preempt_enable_no_resched();
25764diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25765index c2bedae..25e7ab60 100644
25766--- a/arch/x86/kernel/ksysfs.c
25767+++ b/arch/x86/kernel/ksysfs.c
25768@@ -184,7 +184,7 @@ out:
25769
25770 static struct kobj_attribute type_attr = __ATTR_RO(type);
25771
25772-static struct bin_attribute data_attr = {
25773+static bin_attribute_no_const data_attr __read_only = {
25774 .attr = {
25775 .name = "data",
25776 .mode = S_IRUGO,
25777diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25778index c37886d..d851d32 100644
25779--- a/arch/x86/kernel/ldt.c
25780+++ b/arch/x86/kernel/ldt.c
25781@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25782 if (reload) {
25783 #ifdef CONFIG_SMP
25784 preempt_disable();
25785- load_LDT(pc);
25786+ load_LDT_nolock(pc);
25787 if (!cpumask_equal(mm_cpumask(current->mm),
25788 cpumask_of(smp_processor_id())))
25789 smp_call_function(flush_ldt, current->mm, 1);
25790 preempt_enable();
25791 #else
25792- load_LDT(pc);
25793+ load_LDT_nolock(pc);
25794 #endif
25795 }
25796 if (oldsize) {
25797@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25798 return err;
25799
25800 for (i = 0; i < old->size; i++)
25801- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25802+ write_ldt_entry(new->ldt, i, old->ldt + i);
25803 return 0;
25804 }
25805
25806@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25807 retval = copy_ldt(&mm->context, &old_mm->context);
25808 mutex_unlock(&old_mm->context.lock);
25809 }
25810+
25811+ if (tsk == current) {
25812+ mm->context.vdso = 0;
25813+
25814+#ifdef CONFIG_X86_32
25815+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25816+ mm->context.user_cs_base = 0UL;
25817+ mm->context.user_cs_limit = ~0UL;
25818+
25819+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25820+ cpus_clear(mm->context.cpu_user_cs_mask);
25821+#endif
25822+
25823+#endif
25824+#endif
25825+
25826+ }
25827+
25828 return retval;
25829 }
25830
25831@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25832 }
25833 }
25834
25835+#ifdef CONFIG_PAX_SEGMEXEC
25836+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25837+ error = -EINVAL;
25838+ goto out_unlock;
25839+ }
25840+#endif
25841+
25842 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25843 error = -EINVAL;
25844 goto out_unlock;
25845diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25846index 469b23d..5449cfe 100644
25847--- a/arch/x86/kernel/machine_kexec_32.c
25848+++ b/arch/x86/kernel/machine_kexec_32.c
25849@@ -26,7 +26,7 @@
25850 #include <asm/cacheflush.h>
25851 #include <asm/debugreg.h>
25852
25853-static void set_idt(void *newidt, __u16 limit)
25854+static void set_idt(struct desc_struct *newidt, __u16 limit)
25855 {
25856 struct desc_ptr curidt;
25857
25858@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25859 }
25860
25861
25862-static void set_gdt(void *newgdt, __u16 limit)
25863+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25864 {
25865 struct desc_ptr curgdt;
25866
25867@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25868 }
25869
25870 control_page = page_address(image->control_code_page);
25871- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25872+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25873
25874 relocate_kernel_ptr = control_page;
25875 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25876diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25877index 94ea120..4154cea 100644
25878--- a/arch/x86/kernel/mcount_64.S
25879+++ b/arch/x86/kernel/mcount_64.S
25880@@ -7,7 +7,7 @@
25881 #include <linux/linkage.h>
25882 #include <asm/ptrace.h>
25883 #include <asm/ftrace.h>
25884-
25885+#include <asm/alternative-asm.h>
25886
25887 .code64
25888 .section .entry.text, "ax"
25889@@ -148,8 +148,9 @@
25890 #ifdef CONFIG_DYNAMIC_FTRACE
25891
25892 ENTRY(function_hook)
25893+ pax_force_retaddr
25894 retq
25895-END(function_hook)
25896+ENDPROC(function_hook)
25897
25898 ENTRY(ftrace_caller)
25899 /* save_mcount_regs fills in first two parameters */
25900@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25901 #endif
25902
25903 GLOBAL(ftrace_stub)
25904+ pax_force_retaddr
25905 retq
25906-END(ftrace_caller)
25907+ENDPROC(ftrace_caller)
25908
25909 ENTRY(ftrace_regs_caller)
25910 /* Save the current flags before any operations that can change them */
25911@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25912
25913 jmp ftrace_return
25914
25915-END(ftrace_regs_caller)
25916+ENDPROC(ftrace_regs_caller)
25917
25918
25919 #else /* ! CONFIG_DYNAMIC_FTRACE */
25920@@ -272,18 +274,20 @@ fgraph_trace:
25921 #endif
25922
25923 GLOBAL(ftrace_stub)
25924+ pax_force_retaddr
25925 retq
25926
25927 trace:
25928 /* save_mcount_regs fills in first two parameters */
25929 save_mcount_regs
25930
25931+ pax_force_fptr ftrace_trace_function
25932 call *ftrace_trace_function
25933
25934 restore_mcount_regs
25935
25936 jmp fgraph_trace
25937-END(function_hook)
25938+ENDPROC(function_hook)
25939 #endif /* CONFIG_DYNAMIC_FTRACE */
25940 #endif /* CONFIG_FUNCTION_TRACER */
25941
25942@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25943
25944 restore_mcount_regs
25945
25946+ pax_force_retaddr
25947 retq
25948-END(ftrace_graph_caller)
25949+ENDPROC(ftrace_graph_caller)
25950
25951 GLOBAL(return_to_handler)
25952 subq $24, %rsp
25953@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25954 movq 8(%rsp), %rdx
25955 movq (%rsp), %rax
25956 addq $24, %rsp
25957+ pax_force_fptr %rdi
25958 jmp *%rdi
25959+ENDPROC(return_to_handler)
25960 #endif
25961diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25962index e69f988..72902b7 100644
25963--- a/arch/x86/kernel/module.c
25964+++ b/arch/x86/kernel/module.c
25965@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
25966 }
25967 #endif
25968
25969-void *module_alloc(unsigned long size)
25970+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25971 {
25972- if (PAGE_ALIGN(size) > MODULES_LEN)
25973+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25974 return NULL;
25975 return __vmalloc_node_range(size, 1,
25976 MODULES_VADDR + get_module_load_offset(),
25977- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25978- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
25979+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25980+ prot, NUMA_NO_NODE,
25981 __builtin_return_address(0));
25982 }
25983
25984+void *module_alloc(unsigned long size)
25985+{
25986+
25987+#ifdef CONFIG_PAX_KERNEXEC
25988+ return __module_alloc(size, PAGE_KERNEL);
25989+#else
25990+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25991+#endif
25992+
25993+}
25994+
25995+#ifdef CONFIG_PAX_KERNEXEC
25996+#ifdef CONFIG_X86_32
25997+void *module_alloc_exec(unsigned long size)
25998+{
25999+ struct vm_struct *area;
26000+
26001+ if (size == 0)
26002+ return NULL;
26003+
26004+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26005+return area ? area->addr : NULL;
26006+}
26007+EXPORT_SYMBOL(module_alloc_exec);
26008+
26009+void module_memfree_exec(void *module_region)
26010+{
26011+ vunmap(module_region);
26012+}
26013+EXPORT_SYMBOL(module_memfree_exec);
26014+#else
26015+void module_memfree_exec(void *module_region)
26016+{
26017+ module_memfree(module_region);
26018+}
26019+EXPORT_SYMBOL(module_memfree_exec);
26020+
26021+void *module_alloc_exec(unsigned long size)
26022+{
26023+ return __module_alloc(size, PAGE_KERNEL_RX);
26024+}
26025+EXPORT_SYMBOL(module_alloc_exec);
26026+#endif
26027+#endif
26028+
26029 #ifdef CONFIG_X86_32
26030 int apply_relocate(Elf32_Shdr *sechdrs,
26031 const char *strtab,
26032@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26033 unsigned int i;
26034 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26035 Elf32_Sym *sym;
26036- uint32_t *location;
26037+ uint32_t *plocation, location;
26038
26039 DEBUGP("Applying relocate section %u to %u\n",
26040 relsec, sechdrs[relsec].sh_info);
26041 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26042 /* This is where to make the change */
26043- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26044- + rel[i].r_offset;
26045+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26046+ location = (uint32_t)plocation;
26047+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26048+ plocation = ktla_ktva((void *)plocation);
26049 /* This is the symbol it is referring to. Note that all
26050 undefined symbols have been resolved. */
26051 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26052@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26053 switch (ELF32_R_TYPE(rel[i].r_info)) {
26054 case R_386_32:
26055 /* We add the value into the location given */
26056- *location += sym->st_value;
26057+ pax_open_kernel();
26058+ *plocation += sym->st_value;
26059+ pax_close_kernel();
26060 break;
26061 case R_386_PC32:
26062 /* Add the value, subtract its position */
26063- *location += sym->st_value - (uint32_t)location;
26064+ pax_open_kernel();
26065+ *plocation += sym->st_value - location;
26066+ pax_close_kernel();
26067 break;
26068 default:
26069 pr_err("%s: Unknown relocation: %u\n",
26070@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26071 case R_X86_64_NONE:
26072 break;
26073 case R_X86_64_64:
26074+ pax_open_kernel();
26075 *(u64 *)loc = val;
26076+ pax_close_kernel();
26077 break;
26078 case R_X86_64_32:
26079+ pax_open_kernel();
26080 *(u32 *)loc = val;
26081+ pax_close_kernel();
26082 if (val != *(u32 *)loc)
26083 goto overflow;
26084 break;
26085 case R_X86_64_32S:
26086+ pax_open_kernel();
26087 *(s32 *)loc = val;
26088+ pax_close_kernel();
26089 if ((s64)val != *(s32 *)loc)
26090 goto overflow;
26091 break;
26092 case R_X86_64_PC32:
26093 val -= (u64)loc;
26094+ pax_open_kernel();
26095 *(u32 *)loc = val;
26096+ pax_close_kernel();
26097+
26098 #if 0
26099 if ((s64)val != *(s32 *)loc)
26100 goto overflow;
26101diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26102index 113e707..0a690e1 100644
26103--- a/arch/x86/kernel/msr.c
26104+++ b/arch/x86/kernel/msr.c
26105@@ -39,6 +39,7 @@
26106 #include <linux/notifier.h>
26107 #include <linux/uaccess.h>
26108 #include <linux/gfp.h>
26109+#include <linux/grsecurity.h>
26110
26111 #include <asm/processor.h>
26112 #include <asm/msr.h>
26113@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26114 int err = 0;
26115 ssize_t bytes = 0;
26116
26117+#ifdef CONFIG_GRKERNSEC_KMEM
26118+ gr_handle_msr_write();
26119+ return -EPERM;
26120+#endif
26121+
26122 if (count % 8)
26123 return -EINVAL; /* Invalid chunk size */
26124
26125@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26126 err = -EBADF;
26127 break;
26128 }
26129+#ifdef CONFIG_GRKERNSEC_KMEM
26130+ gr_handle_msr_write();
26131+ return -EPERM;
26132+#endif
26133 if (copy_from_user(&regs, uregs, sizeof regs)) {
26134 err = -EFAULT;
26135 break;
26136@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26137 return notifier_from_errno(err);
26138 }
26139
26140-static struct notifier_block __refdata msr_class_cpu_notifier = {
26141+static struct notifier_block msr_class_cpu_notifier = {
26142 .notifier_call = msr_class_cpu_callback,
26143 };
26144
26145diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26146index c3e985d..110a36a 100644
26147--- a/arch/x86/kernel/nmi.c
26148+++ b/arch/x86/kernel/nmi.c
26149@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26150
26151 static void nmi_max_handler(struct irq_work *w)
26152 {
26153- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26154+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26155 int remainder_ns, decimal_msecs;
26156- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26157+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26158
26159 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26160 decimal_msecs = remainder_ns / 1000;
26161
26162 printk_ratelimited(KERN_INFO
26163 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26164- a->handler, whole_msecs, decimal_msecs);
26165+ n->action->handler, whole_msecs, decimal_msecs);
26166 }
26167
26168 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26169@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26170 delta = sched_clock() - delta;
26171 trace_nmi_handler(a->handler, (int)delta, thishandled);
26172
26173- if (delta < nmi_longest_ns || delta < a->max_duration)
26174+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26175 continue;
26176
26177- a->max_duration = delta;
26178- irq_work_queue(&a->irq_work);
26179+ a->work->max_duration = delta;
26180+ irq_work_queue(&a->work->irq_work);
26181 }
26182
26183 rcu_read_unlock();
26184@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26185 }
26186 NOKPROBE_SYMBOL(nmi_handle);
26187
26188-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26189+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26190 {
26191 struct nmi_desc *desc = nmi_to_desc(type);
26192 unsigned long flags;
26193@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26194 if (!action->handler)
26195 return -EINVAL;
26196
26197- init_irq_work(&action->irq_work, nmi_max_handler);
26198+ action->work->action = action;
26199+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26200
26201 spin_lock_irqsave(&desc->lock, flags);
26202
26203@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26204 * event confuses some handlers (kdump uses this flag)
26205 */
26206 if (action->flags & NMI_FLAG_FIRST)
26207- list_add_rcu(&action->list, &desc->head);
26208+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26209 else
26210- list_add_tail_rcu(&action->list, &desc->head);
26211+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26212
26213 spin_unlock_irqrestore(&desc->lock, flags);
26214 return 0;
26215@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26216 if (!strcmp(n->name, name)) {
26217 WARN(in_nmi(),
26218 "Trying to free NMI (%s) from NMI context!\n", n->name);
26219- list_del_rcu(&n->list);
26220+ pax_list_del_rcu((struct list_head *)&n->list);
26221 break;
26222 }
26223 }
26224@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26225 dotraplinkage notrace void
26226 do_nmi(struct pt_regs *regs, long error_code)
26227 {
26228+
26229+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26230+ if (!user_mode(regs)) {
26231+ unsigned long cs = regs->cs & 0xFFFF;
26232+ unsigned long ip = ktva_ktla(regs->ip);
26233+
26234+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26235+ regs->ip = ip;
26236+ }
26237+#endif
26238+
26239 nmi_nesting_preprocess(regs);
26240
26241 nmi_enter();
26242diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26243index 6d9582e..f746287 100644
26244--- a/arch/x86/kernel/nmi_selftest.c
26245+++ b/arch/x86/kernel/nmi_selftest.c
26246@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26247 {
26248 /* trap all the unknown NMIs we may generate */
26249 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26250- __initdata);
26251+ __initconst);
26252 }
26253
26254 static void __init cleanup_nmi_testsuite(void)
26255@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26256 unsigned long timeout;
26257
26258 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26259- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26260+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26261 nmi_fail = FAILURE;
26262 return;
26263 }
26264diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26265index bbb6c73..24a58ef 100644
26266--- a/arch/x86/kernel/paravirt-spinlocks.c
26267+++ b/arch/x86/kernel/paravirt-spinlocks.c
26268@@ -8,7 +8,7 @@
26269
26270 #include <asm/paravirt.h>
26271
26272-struct pv_lock_ops pv_lock_ops = {
26273+struct pv_lock_ops pv_lock_ops __read_only = {
26274 #ifdef CONFIG_SMP
26275 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26276 .unlock_kick = paravirt_nop,
26277diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26278index 548d25f..f8fb99c 100644
26279--- a/arch/x86/kernel/paravirt.c
26280+++ b/arch/x86/kernel/paravirt.c
26281@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26282 {
26283 return x;
26284 }
26285+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26286+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26287+#endif
26288
26289 void __init default_banner(void)
26290 {
26291@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26292
26293 if (opfunc == NULL)
26294 /* If there's no function, patch it with a ud2a (BUG) */
26295- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26296- else if (opfunc == _paravirt_nop)
26297+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26298+ else if (opfunc == (void *)_paravirt_nop)
26299 /* If the operation is a nop, then nop the callsite */
26300 ret = paravirt_patch_nop();
26301
26302 /* identity functions just return their single argument */
26303- else if (opfunc == _paravirt_ident_32)
26304+ else if (opfunc == (void *)_paravirt_ident_32)
26305 ret = paravirt_patch_ident_32(insnbuf, len);
26306- else if (opfunc == _paravirt_ident_64)
26307+ else if (opfunc == (void *)_paravirt_ident_64)
26308 ret = paravirt_patch_ident_64(insnbuf, len);
26309+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26310+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26311+ ret = paravirt_patch_ident_64(insnbuf, len);
26312+#endif
26313
26314 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26315 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26316@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26317 if (insn_len > len || start == NULL)
26318 insn_len = len;
26319 else
26320- memcpy(insnbuf, start, insn_len);
26321+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26322
26323 return insn_len;
26324 }
26325@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26326 return this_cpu_read(paravirt_lazy_mode);
26327 }
26328
26329-struct pv_info pv_info = {
26330+struct pv_info pv_info __read_only = {
26331 .name = "bare hardware",
26332 .paravirt_enabled = 0,
26333 .kernel_rpl = 0,
26334@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26335 #endif
26336 };
26337
26338-struct pv_init_ops pv_init_ops = {
26339+struct pv_init_ops pv_init_ops __read_only = {
26340 .patch = native_patch,
26341 };
26342
26343-struct pv_time_ops pv_time_ops = {
26344+struct pv_time_ops pv_time_ops __read_only = {
26345 .sched_clock = native_sched_clock,
26346 .steal_clock = native_steal_clock,
26347 };
26348
26349-__visible struct pv_irq_ops pv_irq_ops = {
26350+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26351 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26352 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26353 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26354@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26355 #endif
26356 };
26357
26358-__visible struct pv_cpu_ops pv_cpu_ops = {
26359+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26360 .cpuid = native_cpuid,
26361 .get_debugreg = native_get_debugreg,
26362 .set_debugreg = native_set_debugreg,
26363@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26364 NOKPROBE_SYMBOL(native_set_debugreg);
26365 NOKPROBE_SYMBOL(native_load_idt);
26366
26367-struct pv_apic_ops pv_apic_ops = {
26368+struct pv_apic_ops pv_apic_ops __read_only= {
26369 #ifdef CONFIG_X86_LOCAL_APIC
26370 .startup_ipi_hook = paravirt_nop,
26371 #endif
26372 };
26373
26374-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26375+#ifdef CONFIG_X86_32
26376+#ifdef CONFIG_X86_PAE
26377+/* 64-bit pagetable entries */
26378+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26379+#else
26380 /* 32-bit pagetable entries */
26381 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26382+#endif
26383 #else
26384 /* 64-bit pagetable entries */
26385 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26386 #endif
26387
26388-struct pv_mmu_ops pv_mmu_ops = {
26389+struct pv_mmu_ops pv_mmu_ops __read_only = {
26390
26391 .read_cr2 = native_read_cr2,
26392 .write_cr2 = native_write_cr2,
26393@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26394 .make_pud = PTE_IDENT,
26395
26396 .set_pgd = native_set_pgd,
26397+ .set_pgd_batched = native_set_pgd_batched,
26398 #endif
26399 #endif /* PAGETABLE_LEVELS >= 3 */
26400
26401@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26402 },
26403
26404 .set_fixmap = native_set_fixmap,
26405+
26406+#ifdef CONFIG_PAX_KERNEXEC
26407+ .pax_open_kernel = native_pax_open_kernel,
26408+ .pax_close_kernel = native_pax_close_kernel,
26409+#endif
26410+
26411 };
26412
26413 EXPORT_SYMBOL_GPL(pv_time_ops);
26414diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26415index a1da673..b6f5831 100644
26416--- a/arch/x86/kernel/paravirt_patch_64.c
26417+++ b/arch/x86/kernel/paravirt_patch_64.c
26418@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26419 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26420 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26421 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26422+
26423+#ifndef CONFIG_PAX_MEMORY_UDEREF
26424 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26425+#endif
26426+
26427 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26428 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26429
26430@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26431 PATCH_SITE(pv_mmu_ops, read_cr3);
26432 PATCH_SITE(pv_mmu_ops, write_cr3);
26433 PATCH_SITE(pv_cpu_ops, clts);
26434+
26435+#ifndef CONFIG_PAX_MEMORY_UDEREF
26436 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26437+#endif
26438+
26439 PATCH_SITE(pv_cpu_ops, wbinvd);
26440
26441 patch_site:
26442diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26443index 0497f71..7186c0d 100644
26444--- a/arch/x86/kernel/pci-calgary_64.c
26445+++ b/arch/x86/kernel/pci-calgary_64.c
26446@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26447 tce_space = be64_to_cpu(readq(target));
26448 tce_space = tce_space & TAR_SW_BITS;
26449
26450- tce_space = tce_space & (~specified_table_size);
26451+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26452 info->tce_space = (u64 *)__va(tce_space);
26453 }
26454 }
26455diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26456index 35ccf75..7a15747 100644
26457--- a/arch/x86/kernel/pci-iommu_table.c
26458+++ b/arch/x86/kernel/pci-iommu_table.c
26459@@ -2,7 +2,7 @@
26460 #include <asm/iommu_table.h>
26461 #include <linux/string.h>
26462 #include <linux/kallsyms.h>
26463-
26464+#include <linux/sched.h>
26465
26466 #define DEBUG 1
26467
26468diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26469index 77dd0ad..9ec4723 100644
26470--- a/arch/x86/kernel/pci-swiotlb.c
26471+++ b/arch/x86/kernel/pci-swiotlb.c
26472@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26473 struct dma_attrs *attrs)
26474 {
26475 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26476- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26477+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26478 else
26479 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26480 }
26481diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26482index e127dda..94e384d 100644
26483--- a/arch/x86/kernel/process.c
26484+++ b/arch/x86/kernel/process.c
26485@@ -36,7 +36,8 @@
26486 * section. Since TSS's are completely CPU-local, we want them
26487 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26488 */
26489-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26490+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26491+EXPORT_SYMBOL(init_tss);
26492
26493 #ifdef CONFIG_X86_64
26494 static DEFINE_PER_CPU(unsigned char, is_idle);
26495@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
26496 task_xstate_cachep =
26497 kmem_cache_create("task_xstate", xstate_size,
26498 __alignof__(union thread_xstate),
26499- SLAB_PANIC | SLAB_NOTRACK, NULL);
26500+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26501 setup_xstate_comp();
26502 }
26503
26504@@ -108,7 +109,7 @@ void exit_thread(void)
26505 unsigned long *bp = t->io_bitmap_ptr;
26506
26507 if (bp) {
26508- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26509+ struct tss_struct *tss = init_tss + get_cpu();
26510
26511 t->io_bitmap_ptr = NULL;
26512 clear_thread_flag(TIF_IO_BITMAP);
26513@@ -128,6 +129,9 @@ void flush_thread(void)
26514 {
26515 struct task_struct *tsk = current;
26516
26517+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26518+ loadsegment(gs, 0);
26519+#endif
26520 flush_ptrace_hw_breakpoint(tsk);
26521 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26522 drop_init_fpu(tsk);
26523@@ -274,7 +278,7 @@ static void __exit_idle(void)
26524 void exit_idle(void)
26525 {
26526 /* idle loop has pid 0 */
26527- if (current->pid)
26528+ if (task_pid_nr(current))
26529 return;
26530 __exit_idle();
26531 }
26532@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26533 return ret;
26534 }
26535 #endif
26536-void stop_this_cpu(void *dummy)
26537+__noreturn void stop_this_cpu(void *dummy)
26538 {
26539 local_irq_disable();
26540 /*
26541@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26542 }
26543 early_param("idle", idle_setup);
26544
26545-unsigned long arch_align_stack(unsigned long sp)
26546+#ifdef CONFIG_PAX_RANDKSTACK
26547+void pax_randomize_kstack(struct pt_regs *regs)
26548 {
26549- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26550- sp -= get_random_int() % 8192;
26551- return sp & ~0xf;
26552-}
26553+ struct thread_struct *thread = &current->thread;
26554+ unsigned long time;
26555
26556-unsigned long arch_randomize_brk(struct mm_struct *mm)
26557-{
26558- unsigned long range_end = mm->brk + 0x02000000;
26559- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26560-}
26561+ if (!randomize_va_space)
26562+ return;
26563+
26564+ if (v8086_mode(regs))
26565+ return;
26566
26567+ rdtscl(time);
26568+
26569+ /* P4 seems to return a 0 LSB, ignore it */
26570+#ifdef CONFIG_MPENTIUM4
26571+ time &= 0x3EUL;
26572+ time <<= 2;
26573+#elif defined(CONFIG_X86_64)
26574+ time &= 0xFUL;
26575+ time <<= 4;
26576+#else
26577+ time &= 0x1FUL;
26578+ time <<= 3;
26579+#endif
26580+
26581+ thread->sp0 ^= time;
26582+ load_sp0(init_tss + smp_processor_id(), thread);
26583+
26584+#ifdef CONFIG_X86_64
26585+ this_cpu_write(kernel_stack, thread->sp0);
26586+#endif
26587+}
26588+#endif
26589diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26590index 8f3ebfe..cbc731b 100644
26591--- a/arch/x86/kernel/process_32.c
26592+++ b/arch/x86/kernel/process_32.c
26593@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26594 unsigned long thread_saved_pc(struct task_struct *tsk)
26595 {
26596 return ((unsigned long *)tsk->thread.sp)[3];
26597+//XXX return tsk->thread.eip;
26598 }
26599
26600 void __show_regs(struct pt_regs *regs, int all)
26601@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26602 unsigned long sp;
26603 unsigned short ss, gs;
26604
26605- if (user_mode_vm(regs)) {
26606+ if (user_mode(regs)) {
26607 sp = regs->sp;
26608 ss = regs->ss & 0xffff;
26609- gs = get_user_gs(regs);
26610 } else {
26611 sp = kernel_stack_pointer(regs);
26612 savesegment(ss, ss);
26613- savesegment(gs, gs);
26614 }
26615+ gs = get_user_gs(regs);
26616
26617 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26618 (u16)regs->cs, regs->ip, regs->flags,
26619- smp_processor_id());
26620+ raw_smp_processor_id());
26621 print_symbol("EIP is at %s\n", regs->ip);
26622
26623 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26624@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26625 int copy_thread(unsigned long clone_flags, unsigned long sp,
26626 unsigned long arg, struct task_struct *p)
26627 {
26628- struct pt_regs *childregs = task_pt_regs(p);
26629+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26630 struct task_struct *tsk;
26631 int err;
26632
26633 p->thread.sp = (unsigned long) childregs;
26634 p->thread.sp0 = (unsigned long) (childregs+1);
26635+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26636 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26637
26638 if (unlikely(p->flags & PF_KTHREAD)) {
26639 /* kernel thread */
26640 memset(childregs, 0, sizeof(struct pt_regs));
26641 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26642- task_user_gs(p) = __KERNEL_STACK_CANARY;
26643- childregs->ds = __USER_DS;
26644- childregs->es = __USER_DS;
26645+ savesegment(gs, childregs->gs);
26646+ childregs->ds = __KERNEL_DS;
26647+ childregs->es = __KERNEL_DS;
26648 childregs->fs = __KERNEL_PERCPU;
26649 childregs->bx = sp; /* function */
26650 childregs->bp = arg;
26651@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26652 struct thread_struct *prev = &prev_p->thread,
26653 *next = &next_p->thread;
26654 int cpu = smp_processor_id();
26655- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26656+ struct tss_struct *tss = init_tss + cpu;
26657 fpu_switch_t fpu;
26658
26659 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26660@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26661 */
26662 lazy_save_gs(prev->gs);
26663
26664+#ifdef CONFIG_PAX_MEMORY_UDEREF
26665+ __set_fs(task_thread_info(next_p)->addr_limit);
26666+#endif
26667+
26668 /*
26669 * Load the per-thread Thread-Local Storage descriptor.
26670 */
26671@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26672 */
26673 arch_end_context_switch(next_p);
26674
26675- this_cpu_write(kernel_stack,
26676- (unsigned long)task_stack_page(next_p) +
26677- THREAD_SIZE - KERNEL_STACK_OFFSET);
26678+ this_cpu_write(current_task, next_p);
26679+ this_cpu_write(current_tinfo, &next_p->tinfo);
26680+ this_cpu_write(kernel_stack, next->sp0);
26681
26682 /*
26683 * Restore %gs if needed (which is common)
26684@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26685
26686 switch_fpu_finish(next_p, fpu);
26687
26688- this_cpu_write(current_task, next_p);
26689-
26690 return prev_p;
26691 }
26692
26693@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26694 } while (count++ < 16);
26695 return 0;
26696 }
26697-
26698diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26699index 5a2c029..ec8611d 100644
26700--- a/arch/x86/kernel/process_64.c
26701+++ b/arch/x86/kernel/process_64.c
26702@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26703 struct pt_regs *childregs;
26704 struct task_struct *me = current;
26705
26706- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26707+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26708 childregs = task_pt_regs(p);
26709 p->thread.sp = (unsigned long) childregs;
26710 p->thread.usersp = me->thread.usersp;
26711+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26712 set_tsk_thread_flag(p, TIF_FORK);
26713 p->thread.io_bitmap_ptr = NULL;
26714
26715@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26716 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26717 savesegment(es, p->thread.es);
26718 savesegment(ds, p->thread.ds);
26719+ savesegment(ss, p->thread.ss);
26720+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26721 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26722
26723 if (unlikely(p->flags & PF_KTHREAD)) {
26724@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26725 struct thread_struct *prev = &prev_p->thread;
26726 struct thread_struct *next = &next_p->thread;
26727 int cpu = smp_processor_id();
26728- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26729+ struct tss_struct *tss = init_tss + cpu;
26730 unsigned fsindex, gsindex;
26731 fpu_switch_t fpu;
26732
26733@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26734 if (unlikely(next->ds | prev->ds))
26735 loadsegment(ds, next->ds);
26736
26737+ savesegment(ss, prev->ss);
26738+ if (unlikely(next->ss != prev->ss))
26739+ loadsegment(ss, next->ss);
26740+
26741 /*
26742 * Switch FS and GS.
26743 *
26744@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26745 prev->usersp = this_cpu_read(old_rsp);
26746 this_cpu_write(old_rsp, next->usersp);
26747 this_cpu_write(current_task, next_p);
26748+ this_cpu_write(current_tinfo, &next_p->tinfo);
26749
26750 /*
26751 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26752@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26753 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26754 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26755
26756- this_cpu_write(kernel_stack,
26757- (unsigned long)task_stack_page(next_p) +
26758- THREAD_SIZE - KERNEL_STACK_OFFSET);
26759+ this_cpu_write(kernel_stack, next->sp0);
26760
26761 /*
26762 * Now maybe reload the debug registers and handle I/O bitmaps
26763@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26764 if (!p || p == current || p->state == TASK_RUNNING)
26765 return 0;
26766 stack = (unsigned long)task_stack_page(p);
26767- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26768+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26769 return 0;
26770 fp = *(u64 *)(p->thread.sp);
26771 do {
26772- if (fp < (unsigned long)stack ||
26773- fp >= (unsigned long)stack+THREAD_SIZE)
26774+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26775 return 0;
26776 ip = *(u64 *)(fp+8);
26777 if (!in_sched_functions(ip))
26778diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26779index e510618..5165ac0 100644
26780--- a/arch/x86/kernel/ptrace.c
26781+++ b/arch/x86/kernel/ptrace.c
26782@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26783 unsigned long sp = (unsigned long)&regs->sp;
26784 u32 *prev_esp;
26785
26786- if (context == (sp & ~(THREAD_SIZE - 1)))
26787+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26788 return sp;
26789
26790- prev_esp = (u32 *)(context);
26791+ prev_esp = *(u32 **)(context);
26792 if (prev_esp)
26793 return (unsigned long)prev_esp;
26794
26795@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26796 if (child->thread.gs != value)
26797 return do_arch_prctl(child, ARCH_SET_GS, value);
26798 return 0;
26799+
26800+ case offsetof(struct user_regs_struct,ip):
26801+ /*
26802+ * Protect against any attempt to set ip to an
26803+ * impossible address. There are dragons lurking if the
26804+ * address is noncanonical. (This explicitly allows
26805+ * setting ip to TASK_SIZE_MAX, because user code can do
26806+ * that all by itself by running off the end of its
26807+ * address space.
26808+ */
26809+ if (value > TASK_SIZE_MAX)
26810+ return -EIO;
26811+ break;
26812+
26813 #endif
26814 }
26815
26816@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26817 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26818 {
26819 int i;
26820- int dr7 = 0;
26821+ unsigned long dr7 = 0;
26822 struct arch_hw_breakpoint *info;
26823
26824 for (i = 0; i < HBP_NUM; i++) {
26825@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26826 unsigned long addr, unsigned long data)
26827 {
26828 int ret;
26829- unsigned long __user *datap = (unsigned long __user *)data;
26830+ unsigned long __user *datap = (__force unsigned long __user *)data;
26831
26832 switch (request) {
26833 /* read the word at location addr in the USER area. */
26834@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26835 if ((int) addr < 0)
26836 return -EIO;
26837 ret = do_get_thread_area(child, addr,
26838- (struct user_desc __user *)data);
26839+ (__force struct user_desc __user *) data);
26840 break;
26841
26842 case PTRACE_SET_THREAD_AREA:
26843 if ((int) addr < 0)
26844 return -EIO;
26845 ret = do_set_thread_area(child, addr,
26846- (struct user_desc __user *)data, 0);
26847+ (__force struct user_desc __user *) data, 0);
26848 break;
26849 #endif
26850
26851@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26852
26853 #ifdef CONFIG_X86_64
26854
26855-static struct user_regset x86_64_regsets[] __read_mostly = {
26856+static user_regset_no_const x86_64_regsets[] __read_only = {
26857 [REGSET_GENERAL] = {
26858 .core_note_type = NT_PRSTATUS,
26859 .n = sizeof(struct user_regs_struct) / sizeof(long),
26860@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26861 #endif /* CONFIG_X86_64 */
26862
26863 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26864-static struct user_regset x86_32_regsets[] __read_mostly = {
26865+static user_regset_no_const x86_32_regsets[] __read_only = {
26866 [REGSET_GENERAL] = {
26867 .core_note_type = NT_PRSTATUS,
26868 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26869@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26870 */
26871 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26872
26873-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26874+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26875 {
26876 #ifdef CONFIG_X86_64
26877 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26878@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26879 memset(info, 0, sizeof(*info));
26880 info->si_signo = SIGTRAP;
26881 info->si_code = si_code;
26882- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26883+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26884 }
26885
26886 void user_single_step_siginfo(struct task_struct *tsk,
26887@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26888 }
26889 }
26890
26891+#ifdef CONFIG_GRKERNSEC_SETXID
26892+extern void gr_delayed_cred_worker(void);
26893+#endif
26894+
26895 /*
26896 * We can return 0 to resume the syscall or anything else to go to phase
26897 * 2. If we resume the syscall, we need to put something appropriate in
26898@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26899
26900 BUG_ON(regs != task_pt_regs(current));
26901
26902+#ifdef CONFIG_GRKERNSEC_SETXID
26903+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26904+ gr_delayed_cred_worker();
26905+#endif
26906+
26907 /*
26908 * If we stepped into a sysenter/syscall insn, it trapped in
26909 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26910@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26911 */
26912 user_exit();
26913
26914+#ifdef CONFIG_GRKERNSEC_SETXID
26915+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26916+ gr_delayed_cred_worker();
26917+#endif
26918+
26919 audit_syscall_exit(regs);
26920
26921 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26922diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26923index 2f355d2..e75ed0a 100644
26924--- a/arch/x86/kernel/pvclock.c
26925+++ b/arch/x86/kernel/pvclock.c
26926@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26927 reset_hung_task_detector();
26928 }
26929
26930-static atomic64_t last_value = ATOMIC64_INIT(0);
26931+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26932
26933 void pvclock_resume(void)
26934 {
26935- atomic64_set(&last_value, 0);
26936+ atomic64_set_unchecked(&last_value, 0);
26937 }
26938
26939 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26940@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26941 * updating at the same time, and one of them could be slightly behind,
26942 * making the assumption that last_value always go forward fail to hold.
26943 */
26944- last = atomic64_read(&last_value);
26945+ last = atomic64_read_unchecked(&last_value);
26946 do {
26947 if (ret < last)
26948 return last;
26949- last = atomic64_cmpxchg(&last_value, last, ret);
26950+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26951 } while (unlikely(last != ret));
26952
26953 return ret;
26954diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26955index bae6c60..b438619 100644
26956--- a/arch/x86/kernel/reboot.c
26957+++ b/arch/x86/kernel/reboot.c
26958@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26959
26960 void __noreturn machine_real_restart(unsigned int type)
26961 {
26962+
26963+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26964+ struct desc_struct *gdt;
26965+#endif
26966+
26967 local_irq_disable();
26968
26969 /*
26970@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26971
26972 /* Jump to the identity-mapped low memory code */
26973 #ifdef CONFIG_X86_32
26974- asm volatile("jmpl *%0" : :
26975+
26976+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26977+ gdt = get_cpu_gdt_table(smp_processor_id());
26978+ pax_open_kernel();
26979+#ifdef CONFIG_PAX_MEMORY_UDEREF
26980+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26981+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26982+ loadsegment(ds, __KERNEL_DS);
26983+ loadsegment(es, __KERNEL_DS);
26984+ loadsegment(ss, __KERNEL_DS);
26985+#endif
26986+#ifdef CONFIG_PAX_KERNEXEC
26987+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26988+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26989+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26990+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26991+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26992+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26993+#endif
26994+ pax_close_kernel();
26995+#endif
26996+
26997+ asm volatile("ljmpl *%0" : :
26998 "rm" (real_mode_header->machine_real_restart_asm),
26999 "a" (type));
27000 #else
27001@@ -501,7 +528,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27002 * This means that this function can never return, it can misbehave
27003 * by not rebooting properly and hanging.
27004 */
27005-static void native_machine_emergency_restart(void)
27006+static void __noreturn native_machine_emergency_restart(void)
27007 {
27008 int i;
27009 int attempt = 0;
27010@@ -621,13 +648,13 @@ void native_machine_shutdown(void)
27011 #endif
27012 }
27013
27014-static void __machine_emergency_restart(int emergency)
27015+static void __noreturn __machine_emergency_restart(int emergency)
27016 {
27017 reboot_emergency = emergency;
27018 machine_ops.emergency_restart();
27019 }
27020
27021-static void native_machine_restart(char *__unused)
27022+static void __noreturn native_machine_restart(char *__unused)
27023 {
27024 pr_notice("machine restart\n");
27025
27026@@ -636,7 +663,7 @@ static void native_machine_restart(char *__unused)
27027 __machine_emergency_restart(0);
27028 }
27029
27030-static void native_machine_halt(void)
27031+static void __noreturn native_machine_halt(void)
27032 {
27033 /* Stop other cpus and apics */
27034 machine_shutdown();
27035@@ -646,7 +673,7 @@ static void native_machine_halt(void)
27036 stop_this_cpu(NULL);
27037 }
27038
27039-static void native_machine_power_off(void)
27040+static void __noreturn native_machine_power_off(void)
27041 {
27042 if (pm_power_off) {
27043 if (!reboot_force)
27044@@ -655,9 +682,10 @@ static void native_machine_power_off(void)
27045 }
27046 /* A fallback in case there is no PM info available */
27047 tboot_shutdown(TB_SHUTDOWN_HALT);
27048+ unreachable();
27049 }
27050
27051-struct machine_ops machine_ops = {
27052+struct machine_ops machine_ops __read_only = {
27053 .power_off = native_machine_power_off,
27054 .shutdown = native_machine_shutdown,
27055 .emergency_restart = native_machine_emergency_restart,
27056diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27057index c8e41e9..64049ef 100644
27058--- a/arch/x86/kernel/reboot_fixups_32.c
27059+++ b/arch/x86/kernel/reboot_fixups_32.c
27060@@ -57,7 +57,7 @@ struct device_fixup {
27061 unsigned int vendor;
27062 unsigned int device;
27063 void (*reboot_fixup)(struct pci_dev *);
27064-};
27065+} __do_const;
27066
27067 /*
27068 * PCI ids solely used for fixups_table go here
27069diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27070index 3fd2c69..a444264 100644
27071--- a/arch/x86/kernel/relocate_kernel_64.S
27072+++ b/arch/x86/kernel/relocate_kernel_64.S
27073@@ -96,8 +96,7 @@ relocate_kernel:
27074
27075 /* jump to identity mapped page */
27076 addq $(identity_mapped - relocate_kernel), %r8
27077- pushq %r8
27078- ret
27079+ jmp *%r8
27080
27081 identity_mapped:
27082 /* set return address to 0 if not preserving context */
27083diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27084index ab4734e..c4ca0eb 100644
27085--- a/arch/x86/kernel/setup.c
27086+++ b/arch/x86/kernel/setup.c
27087@@ -110,6 +110,7 @@
27088 #include <asm/mce.h>
27089 #include <asm/alternative.h>
27090 #include <asm/prom.h>
27091+#include <asm/boot.h>
27092
27093 /*
27094 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27095@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27096 #endif
27097
27098
27099-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27100-__visible unsigned long mmu_cr4_features;
27101+#ifdef CONFIG_X86_64
27102+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27103+#elif defined(CONFIG_X86_PAE)
27104+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27105 #else
27106-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27107+__visible unsigned long mmu_cr4_features __read_only;
27108 #endif
27109
27110+void set_in_cr4(unsigned long mask)
27111+{
27112+ unsigned long cr4 = read_cr4();
27113+
27114+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27115+ return;
27116+
27117+ pax_open_kernel();
27118+ mmu_cr4_features |= mask;
27119+ pax_close_kernel();
27120+
27121+ if (trampoline_cr4_features)
27122+ *trampoline_cr4_features = mmu_cr4_features;
27123+ cr4 |= mask;
27124+ write_cr4(cr4);
27125+}
27126+EXPORT_SYMBOL(set_in_cr4);
27127+
27128+void clear_in_cr4(unsigned long mask)
27129+{
27130+ unsigned long cr4 = read_cr4();
27131+
27132+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27133+ return;
27134+
27135+ pax_open_kernel();
27136+ mmu_cr4_features &= ~mask;
27137+ pax_close_kernel();
27138+
27139+ if (trampoline_cr4_features)
27140+ *trampoline_cr4_features = mmu_cr4_features;
27141+ cr4 &= ~mask;
27142+ write_cr4(cr4);
27143+}
27144+EXPORT_SYMBOL(clear_in_cr4);
27145+
27146 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27147 int bootloader_type, bootloader_version;
27148
27149@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27150 * area (640->1Mb) as ram even though it is not.
27151 * take them out.
27152 */
27153- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27154+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27155
27156 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27157 }
27158@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27159 /* called before trim_bios_range() to spare extra sanitize */
27160 static void __init e820_add_kernel_range(void)
27161 {
27162- u64 start = __pa_symbol(_text);
27163+ u64 start = __pa_symbol(ktla_ktva(_text));
27164 u64 size = __pa_symbol(_end) - start;
27165
27166 /*
27167@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27168
27169 void __init setup_arch(char **cmdline_p)
27170 {
27171+#ifdef CONFIG_X86_32
27172+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27173+#else
27174 memblock_reserve(__pa_symbol(_text),
27175 (unsigned long)__bss_stop - (unsigned long)_text);
27176+#endif
27177
27178 early_reserve_initrd();
27179
27180@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
27181
27182 if (!boot_params.hdr.root_flags)
27183 root_mountflags &= ~MS_RDONLY;
27184- init_mm.start_code = (unsigned long) _text;
27185- init_mm.end_code = (unsigned long) _etext;
27186+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27187+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27188 init_mm.end_data = (unsigned long) _edata;
27189 init_mm.brk = _brk_end;
27190
27191 mpx_mm_init(&init_mm);
27192
27193- code_resource.start = __pa_symbol(_text);
27194- code_resource.end = __pa_symbol(_etext)-1;
27195- data_resource.start = __pa_symbol(_etext);
27196+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27197+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27198+ data_resource.start = __pa_symbol(_sdata);
27199 data_resource.end = __pa_symbol(_edata)-1;
27200 bss_resource.start = __pa_symbol(__bss_start);
27201 bss_resource.end = __pa_symbol(__bss_stop)-1;
27202diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27203index e4fcb87..9c06c55 100644
27204--- a/arch/x86/kernel/setup_percpu.c
27205+++ b/arch/x86/kernel/setup_percpu.c
27206@@ -21,19 +21,17 @@
27207 #include <asm/cpu.h>
27208 #include <asm/stackprotector.h>
27209
27210-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27211+#ifdef CONFIG_SMP
27212+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27213 EXPORT_PER_CPU_SYMBOL(cpu_number);
27214+#endif
27215
27216-#ifdef CONFIG_X86_64
27217 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27218-#else
27219-#define BOOT_PERCPU_OFFSET 0
27220-#endif
27221
27222 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27223 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27224
27225-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27226+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27227 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27228 };
27229 EXPORT_SYMBOL(__per_cpu_offset);
27230@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27231 {
27232 #ifdef CONFIG_NEED_MULTIPLE_NODES
27233 pg_data_t *last = NULL;
27234- unsigned int cpu;
27235+ int cpu;
27236
27237 for_each_possible_cpu(cpu) {
27238 int node = early_cpu_to_node(cpu);
27239@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27240 {
27241 #ifdef CONFIG_X86_32
27242 struct desc_struct gdt;
27243+ unsigned long base = per_cpu_offset(cpu);
27244
27245- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27246- 0x2 | DESCTYPE_S, 0x8);
27247- gdt.s = 1;
27248+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27249+ 0x83 | DESCTYPE_S, 0xC);
27250 write_gdt_entry(get_cpu_gdt_table(cpu),
27251 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27252 #endif
27253@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27254 /* alrighty, percpu areas up and running */
27255 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27256 for_each_possible_cpu(cpu) {
27257+#ifdef CONFIG_CC_STACKPROTECTOR
27258+#ifdef CONFIG_X86_32
27259+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27260+#endif
27261+#endif
27262 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27263 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27264 per_cpu(cpu_number, cpu) = cpu;
27265@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27266 */
27267 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27268 #endif
27269+#ifdef CONFIG_CC_STACKPROTECTOR
27270+#ifdef CONFIG_X86_32
27271+ if (!cpu)
27272+ per_cpu(stack_canary.canary, cpu) = canary;
27273+#endif
27274+#endif
27275 /*
27276 * Up to this point, the boot CPU has been using .init.data
27277 * area. Reload any changed state for the boot CPU.
27278diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27279index ed37a76..39f936e 100644
27280--- a/arch/x86/kernel/signal.c
27281+++ b/arch/x86/kernel/signal.c
27282@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27283 * Align the stack pointer according to the i386 ABI,
27284 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27285 */
27286- sp = ((sp + 4) & -16ul) - 4;
27287+ sp = ((sp - 12) & -16ul) - 4;
27288 #else /* !CONFIG_X86_32 */
27289 sp = round_down(sp, 16) - 8;
27290 #endif
27291@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27292 }
27293
27294 if (current->mm->context.vdso)
27295- restorer = current->mm->context.vdso +
27296- selected_vdso32->sym___kernel_sigreturn;
27297+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27298 else
27299- restorer = &frame->retcode;
27300+ restorer = (void __user *)&frame->retcode;
27301 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27302 restorer = ksig->ka.sa.sa_restorer;
27303
27304@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27305 * reasons and because gdb uses it as a signature to notice
27306 * signal handler stack frames.
27307 */
27308- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27309+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27310
27311 if (err)
27312 return -EFAULT;
27313@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27314 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27315
27316 /* Set up to return from userspace. */
27317- restorer = current->mm->context.vdso +
27318- selected_vdso32->sym___kernel_rt_sigreturn;
27319+ if (current->mm->context.vdso)
27320+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27321+ else
27322+ restorer = (void __user *)&frame->retcode;
27323 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27324 restorer = ksig->ka.sa.sa_restorer;
27325 put_user_ex(restorer, &frame->pretcode);
27326@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27327 * reasons and because gdb uses it as a signature to notice
27328 * signal handler stack frames.
27329 */
27330- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27331+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27332 } put_user_catch(err);
27333
27334 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27335@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27336 {
27337 int usig = signr_convert(ksig->sig);
27338 sigset_t *set = sigmask_to_save();
27339- compat_sigset_t *cset = (compat_sigset_t *) set;
27340+ sigset_t sigcopy;
27341+ compat_sigset_t *cset;
27342+
27343+ sigcopy = *set;
27344+
27345+ cset = (compat_sigset_t *) &sigcopy;
27346
27347 /* Set up the stack frame */
27348 if (is_ia32_frame()) {
27349@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27350 } else if (is_x32_frame()) {
27351 return x32_setup_rt_frame(ksig, cset, regs);
27352 } else {
27353- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27354+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27355 }
27356 }
27357
27358diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27359index be8e1bd..a3d93fa 100644
27360--- a/arch/x86/kernel/smp.c
27361+++ b/arch/x86/kernel/smp.c
27362@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27363
27364 __setup("nonmi_ipi", nonmi_ipi_setup);
27365
27366-struct smp_ops smp_ops = {
27367+struct smp_ops smp_ops __read_only = {
27368 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27369 .smp_prepare_cpus = native_smp_prepare_cpus,
27370 .smp_cpus_done = native_smp_cpus_done,
27371diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27372index 6d7022c..4feb6be 100644
27373--- a/arch/x86/kernel/smpboot.c
27374+++ b/arch/x86/kernel/smpboot.c
27375@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
27376
27377 enable_start_cpu0 = 0;
27378
27379-#ifdef CONFIG_X86_32
27380+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27381+ barrier();
27382+
27383 /* switch away from the initial page table */
27384+#ifdef CONFIG_PAX_PER_CPU_PGD
27385+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27386+#else
27387 load_cr3(swapper_pg_dir);
27388+#endif
27389 __flush_tlb_all();
27390-#endif
27391
27392- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27393- barrier();
27394 /*
27395 * Check TSC synchronization with the BP:
27396 */
27397@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27398 alternatives_enable_smp();
27399
27400 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27401- (THREAD_SIZE + task_stack_page(idle))) - 1);
27402+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27403 per_cpu(current_task, cpu) = idle;
27404+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27405
27406 #ifdef CONFIG_X86_32
27407 /* Stack for startup_32 can be just as for start_secondary onwards */
27408@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27409 clear_tsk_thread_flag(idle, TIF_FORK);
27410 initial_gs = per_cpu_offset(cpu);
27411 #endif
27412- per_cpu(kernel_stack, cpu) =
27413- (unsigned long)task_stack_page(idle) -
27414- KERNEL_STACK_OFFSET + THREAD_SIZE;
27415+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27416+ pax_open_kernel();
27417 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27418+ pax_close_kernel();
27419 initial_code = (unsigned long)start_secondary;
27420 stack_start = idle->thread.sp;
27421
27422@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27423 /* the FPU context is blank, nobody can own it */
27424 __cpu_disable_lazy_restore(cpu);
27425
27426+#ifdef CONFIG_PAX_PER_CPU_PGD
27427+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27428+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27429+ KERNEL_PGD_PTRS);
27430+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27431+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27432+ KERNEL_PGD_PTRS);
27433+#endif
27434+
27435 err = do_boot_cpu(apicid, cpu, tidle);
27436 if (err) {
27437 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27438diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27439index 9b4d51d..5d28b58 100644
27440--- a/arch/x86/kernel/step.c
27441+++ b/arch/x86/kernel/step.c
27442@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27443 struct desc_struct *desc;
27444 unsigned long base;
27445
27446- seg &= ~7UL;
27447+ seg >>= 3;
27448
27449 mutex_lock(&child->mm->context.lock);
27450- if (unlikely((seg >> 3) >= child->mm->context.size))
27451+ if (unlikely(seg >= child->mm->context.size))
27452 addr = -1L; /* bogus selector, access would fault */
27453 else {
27454 desc = child->mm->context.ldt + seg;
27455@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27456 addr += base;
27457 }
27458 mutex_unlock(&child->mm->context.lock);
27459- }
27460+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27461+ addr = ktla_ktva(addr);
27462
27463 return addr;
27464 }
27465@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27466 unsigned char opcode[15];
27467 unsigned long addr = convert_ip_to_linear(child, regs);
27468
27469+ if (addr == -EINVAL)
27470+ return 0;
27471+
27472 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27473 for (i = 0; i < copied; i++) {
27474 switch (opcode[i]) {
27475diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27476new file mode 100644
27477index 0000000..5877189
27478--- /dev/null
27479+++ b/arch/x86/kernel/sys_i386_32.c
27480@@ -0,0 +1,189 @@
27481+/*
27482+ * This file contains various random system calls that
27483+ * have a non-standard calling sequence on the Linux/i386
27484+ * platform.
27485+ */
27486+
27487+#include <linux/errno.h>
27488+#include <linux/sched.h>
27489+#include <linux/mm.h>
27490+#include <linux/fs.h>
27491+#include <linux/smp.h>
27492+#include <linux/sem.h>
27493+#include <linux/msg.h>
27494+#include <linux/shm.h>
27495+#include <linux/stat.h>
27496+#include <linux/syscalls.h>
27497+#include <linux/mman.h>
27498+#include <linux/file.h>
27499+#include <linux/utsname.h>
27500+#include <linux/ipc.h>
27501+#include <linux/elf.h>
27502+
27503+#include <linux/uaccess.h>
27504+#include <linux/unistd.h>
27505+
27506+#include <asm/syscalls.h>
27507+
27508+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27509+{
27510+ unsigned long pax_task_size = TASK_SIZE;
27511+
27512+#ifdef CONFIG_PAX_SEGMEXEC
27513+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27514+ pax_task_size = SEGMEXEC_TASK_SIZE;
27515+#endif
27516+
27517+ if (flags & MAP_FIXED)
27518+ if (len > pax_task_size || addr > pax_task_size - len)
27519+ return -EINVAL;
27520+
27521+ return 0;
27522+}
27523+
27524+/*
27525+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27526+ */
27527+static unsigned long get_align_mask(void)
27528+{
27529+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27530+ return 0;
27531+
27532+ if (!(current->flags & PF_RANDOMIZE))
27533+ return 0;
27534+
27535+ return va_align.mask;
27536+}
27537+
27538+unsigned long
27539+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27540+ unsigned long len, unsigned long pgoff, unsigned long flags)
27541+{
27542+ struct mm_struct *mm = current->mm;
27543+ struct vm_area_struct *vma;
27544+ unsigned long pax_task_size = TASK_SIZE;
27545+ struct vm_unmapped_area_info info;
27546+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27547+
27548+#ifdef CONFIG_PAX_SEGMEXEC
27549+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27550+ pax_task_size = SEGMEXEC_TASK_SIZE;
27551+#endif
27552+
27553+ pax_task_size -= PAGE_SIZE;
27554+
27555+ if (len > pax_task_size)
27556+ return -ENOMEM;
27557+
27558+ if (flags & MAP_FIXED)
27559+ return addr;
27560+
27561+#ifdef CONFIG_PAX_RANDMMAP
27562+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27563+#endif
27564+
27565+ if (addr) {
27566+ addr = PAGE_ALIGN(addr);
27567+ if (pax_task_size - len >= addr) {
27568+ vma = find_vma(mm, addr);
27569+ if (check_heap_stack_gap(vma, addr, len, offset))
27570+ return addr;
27571+ }
27572+ }
27573+
27574+ info.flags = 0;
27575+ info.length = len;
27576+ info.align_mask = filp ? get_align_mask() : 0;
27577+ info.align_offset = pgoff << PAGE_SHIFT;
27578+ info.threadstack_offset = offset;
27579+
27580+#ifdef CONFIG_PAX_PAGEEXEC
27581+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27582+ info.low_limit = 0x00110000UL;
27583+ info.high_limit = mm->start_code;
27584+
27585+#ifdef CONFIG_PAX_RANDMMAP
27586+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27587+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27588+#endif
27589+
27590+ if (info.low_limit < info.high_limit) {
27591+ addr = vm_unmapped_area(&info);
27592+ if (!IS_ERR_VALUE(addr))
27593+ return addr;
27594+ }
27595+ } else
27596+#endif
27597+
27598+ info.low_limit = mm->mmap_base;
27599+ info.high_limit = pax_task_size;
27600+
27601+ return vm_unmapped_area(&info);
27602+}
27603+
27604+unsigned long
27605+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27606+ const unsigned long len, const unsigned long pgoff,
27607+ const unsigned long flags)
27608+{
27609+ struct vm_area_struct *vma;
27610+ struct mm_struct *mm = current->mm;
27611+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27612+ struct vm_unmapped_area_info info;
27613+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27614+
27615+#ifdef CONFIG_PAX_SEGMEXEC
27616+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27617+ pax_task_size = SEGMEXEC_TASK_SIZE;
27618+#endif
27619+
27620+ pax_task_size -= PAGE_SIZE;
27621+
27622+ /* requested length too big for entire address space */
27623+ if (len > pax_task_size)
27624+ return -ENOMEM;
27625+
27626+ if (flags & MAP_FIXED)
27627+ return addr;
27628+
27629+#ifdef CONFIG_PAX_PAGEEXEC
27630+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27631+ goto bottomup;
27632+#endif
27633+
27634+#ifdef CONFIG_PAX_RANDMMAP
27635+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27636+#endif
27637+
27638+ /* requesting a specific address */
27639+ if (addr) {
27640+ addr = PAGE_ALIGN(addr);
27641+ if (pax_task_size - len >= addr) {
27642+ vma = find_vma(mm, addr);
27643+ if (check_heap_stack_gap(vma, addr, len, offset))
27644+ return addr;
27645+ }
27646+ }
27647+
27648+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27649+ info.length = len;
27650+ info.low_limit = PAGE_SIZE;
27651+ info.high_limit = mm->mmap_base;
27652+ info.align_mask = filp ? get_align_mask() : 0;
27653+ info.align_offset = pgoff << PAGE_SHIFT;
27654+ info.threadstack_offset = offset;
27655+
27656+ addr = vm_unmapped_area(&info);
27657+ if (!(addr & ~PAGE_MASK))
27658+ return addr;
27659+ VM_BUG_ON(addr != -ENOMEM);
27660+
27661+bottomup:
27662+ /*
27663+ * A failed mmap() very likely causes application failure,
27664+ * so fall back to the bottom-up function here. This scenario
27665+ * can happen with large stack limits and large mmap()
27666+ * allocations.
27667+ */
27668+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27669+}
27670diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27671index 30277e2..5664a29 100644
27672--- a/arch/x86/kernel/sys_x86_64.c
27673+++ b/arch/x86/kernel/sys_x86_64.c
27674@@ -81,8 +81,8 @@ out:
27675 return error;
27676 }
27677
27678-static void find_start_end(unsigned long flags, unsigned long *begin,
27679- unsigned long *end)
27680+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27681+ unsigned long *begin, unsigned long *end)
27682 {
27683 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27684 unsigned long new_begin;
27685@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27686 *begin = new_begin;
27687 }
27688 } else {
27689- *begin = current->mm->mmap_legacy_base;
27690+ *begin = mm->mmap_legacy_base;
27691 *end = TASK_SIZE;
27692 }
27693 }
27694@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27695 struct vm_area_struct *vma;
27696 struct vm_unmapped_area_info info;
27697 unsigned long begin, end;
27698+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27699
27700 if (flags & MAP_FIXED)
27701 return addr;
27702
27703- find_start_end(flags, &begin, &end);
27704+ find_start_end(mm, flags, &begin, &end);
27705
27706 if (len > end)
27707 return -ENOMEM;
27708
27709+#ifdef CONFIG_PAX_RANDMMAP
27710+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27711+#endif
27712+
27713 if (addr) {
27714 addr = PAGE_ALIGN(addr);
27715 vma = find_vma(mm, addr);
27716- if (end - len >= addr &&
27717- (!vma || addr + len <= vma->vm_start))
27718+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27719 return addr;
27720 }
27721
27722@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27723 info.high_limit = end;
27724 info.align_mask = filp ? get_align_mask() : 0;
27725 info.align_offset = pgoff << PAGE_SHIFT;
27726+ info.threadstack_offset = offset;
27727 return vm_unmapped_area(&info);
27728 }
27729
27730@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27731 struct mm_struct *mm = current->mm;
27732 unsigned long addr = addr0;
27733 struct vm_unmapped_area_info info;
27734+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27735
27736 /* requested length too big for entire address space */
27737 if (len > TASK_SIZE)
27738@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27739 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27740 goto bottomup;
27741
27742+#ifdef CONFIG_PAX_RANDMMAP
27743+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27744+#endif
27745+
27746 /* requesting a specific address */
27747 if (addr) {
27748 addr = PAGE_ALIGN(addr);
27749 vma = find_vma(mm, addr);
27750- if (TASK_SIZE - len >= addr &&
27751- (!vma || addr + len <= vma->vm_start))
27752+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27753 return addr;
27754 }
27755
27756@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27757 info.high_limit = mm->mmap_base;
27758 info.align_mask = filp ? get_align_mask() : 0;
27759 info.align_offset = pgoff << PAGE_SHIFT;
27760+ info.threadstack_offset = offset;
27761 addr = vm_unmapped_area(&info);
27762 if (!(addr & ~PAGE_MASK))
27763 return addr;
27764diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27765index 91a4496..bb87552 100644
27766--- a/arch/x86/kernel/tboot.c
27767+++ b/arch/x86/kernel/tboot.c
27768@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27769
27770 void tboot_shutdown(u32 shutdown_type)
27771 {
27772- void (*shutdown)(void);
27773+ void (* __noreturn shutdown)(void);
27774
27775 if (!tboot_enabled())
27776 return;
27777@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27778
27779 switch_to_tboot_pt();
27780
27781- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27782+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27783 shutdown();
27784
27785 /* should not reach here */
27786@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27787 return -ENODEV;
27788 }
27789
27790-static atomic_t ap_wfs_count;
27791+static atomic_unchecked_t ap_wfs_count;
27792
27793 static int tboot_wait_for_aps(int num_aps)
27794 {
27795@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27796 {
27797 switch (action) {
27798 case CPU_DYING:
27799- atomic_inc(&ap_wfs_count);
27800+ atomic_inc_unchecked(&ap_wfs_count);
27801 if (num_online_cpus() == 1)
27802- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27803+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27804 return NOTIFY_BAD;
27805 break;
27806 }
27807@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27808
27809 tboot_create_trampoline();
27810
27811- atomic_set(&ap_wfs_count, 0);
27812+ atomic_set_unchecked(&ap_wfs_count, 0);
27813 register_hotcpu_notifier(&tboot_cpu_notifier);
27814
27815 #ifdef CONFIG_DEBUG_FS
27816diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27817index 25adc0e..1df4349 100644
27818--- a/arch/x86/kernel/time.c
27819+++ b/arch/x86/kernel/time.c
27820@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27821 {
27822 unsigned long pc = instruction_pointer(regs);
27823
27824- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27825+ if (!user_mode(regs) && in_lock_functions(pc)) {
27826 #ifdef CONFIG_FRAME_POINTER
27827- return *(unsigned long *)(regs->bp + sizeof(long));
27828+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27829 #else
27830 unsigned long *sp =
27831 (unsigned long *)kernel_stack_pointer(regs);
27832@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27833 * or above a saved flags. Eflags has bits 22-31 zero,
27834 * kernel addresses don't.
27835 */
27836+
27837+#ifdef CONFIG_PAX_KERNEXEC
27838+ return ktla_ktva(sp[0]);
27839+#else
27840 if (sp[0] >> 22)
27841 return sp[0];
27842 if (sp[1] >> 22)
27843 return sp[1];
27844 #endif
27845+
27846+#endif
27847 }
27848 return pc;
27849 }
27850diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27851index 7fc5e84..c6e445a 100644
27852--- a/arch/x86/kernel/tls.c
27853+++ b/arch/x86/kernel/tls.c
27854@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27855 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27856 return -EINVAL;
27857
27858+#ifdef CONFIG_PAX_SEGMEXEC
27859+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27860+ return -EINVAL;
27861+#endif
27862+
27863 set_tls_desc(p, idx, &info, 1);
27864
27865 return 0;
27866@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27867
27868 if (kbuf)
27869 info = kbuf;
27870- else if (__copy_from_user(infobuf, ubuf, count))
27871+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27872 return -EFAULT;
27873 else
27874 info = infobuf;
27875diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27876index 1c113db..287b42e 100644
27877--- a/arch/x86/kernel/tracepoint.c
27878+++ b/arch/x86/kernel/tracepoint.c
27879@@ -9,11 +9,11 @@
27880 #include <linux/atomic.h>
27881
27882 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27883-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27884+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27885 (unsigned long) trace_idt_table };
27886
27887 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27888-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27889+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27890
27891 static int trace_irq_vector_refcount;
27892 static DEFINE_MUTEX(irq_vector_mutex);
27893diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27894index 88900e2..aa4149d 100644
27895--- a/arch/x86/kernel/traps.c
27896+++ b/arch/x86/kernel/traps.c
27897@@ -68,7 +68,7 @@
27898 #include <asm/proto.h>
27899
27900 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27901-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27902+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27903 #else
27904 #include <asm/processor-flags.h>
27905 #include <asm/setup.h>
27906@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27907 #endif
27908
27909 /* Must be page-aligned because the real IDT is used in a fixmap. */
27910-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27911+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27912
27913 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27914 EXPORT_SYMBOL_GPL(used_vectors);
27915@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27916 }
27917
27918 static nokprobe_inline int
27919-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27920+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27921 struct pt_regs *regs, long error_code)
27922 {
27923 #ifdef CONFIG_X86_32
27924- if (regs->flags & X86_VM_MASK) {
27925+ if (v8086_mode(regs)) {
27926 /*
27927 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27928 * On nmi (interrupt 2), do_trap should not be called.
27929@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27930 return -1;
27931 }
27932 #endif
27933- if (!user_mode(regs)) {
27934+ if (!user_mode_novm(regs)) {
27935 if (!fixup_exception(regs)) {
27936 tsk->thread.error_code = error_code;
27937 tsk->thread.trap_nr = trapnr;
27938+
27939+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27940+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27941+ str = "PAX: suspicious stack segment fault";
27942+#endif
27943+
27944 die(str, regs, error_code);
27945 }
27946+
27947+#ifdef CONFIG_PAX_REFCOUNT
27948+ if (trapnr == X86_TRAP_OF)
27949+ pax_report_refcount_overflow(regs);
27950+#endif
27951+
27952 return 0;
27953 }
27954
27955@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27956 }
27957
27958 static void
27959-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27960+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27961 long error_code, siginfo_t *info)
27962 {
27963 struct task_struct *tsk = current;
27964@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27965 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27966 printk_ratelimit()) {
27967 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27968- tsk->comm, tsk->pid, str,
27969+ tsk->comm, task_pid_nr(tsk), str,
27970 regs->ip, regs->sp, error_code);
27971 print_vma_addr(" in ", regs->ip);
27972 pr_cont("\n");
27973@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27974 tsk->thread.error_code = error_code;
27975 tsk->thread.trap_nr = X86_TRAP_DF;
27976
27977+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27978+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27979+ die("grsec: kernel stack overflow detected", regs, error_code);
27980+#endif
27981+
27982 #ifdef CONFIG_DOUBLEFAULT
27983 df_debug(regs, error_code);
27984 #endif
27985@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27986 conditional_sti(regs);
27987
27988 #ifdef CONFIG_X86_32
27989- if (regs->flags & X86_VM_MASK) {
27990+ if (v8086_mode(regs)) {
27991 local_irq_enable();
27992 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27993 goto exit;
27994@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27995 #endif
27996
27997 tsk = current;
27998- if (!user_mode(regs)) {
27999+ if (!user_mode_novm(regs)) {
28000 if (fixup_exception(regs))
28001 goto exit;
28002
28003 tsk->thread.error_code = error_code;
28004 tsk->thread.trap_nr = X86_TRAP_GP;
28005 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28006- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28007+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28008+
28009+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28010+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28011+ die("PAX: suspicious general protection fault", regs, error_code);
28012+ else
28013+#endif
28014+
28015 die("general protection fault", regs, error_code);
28016+ }
28017 goto exit;
28018 }
28019
28020+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28021+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28022+ struct mm_struct *mm = tsk->mm;
28023+ unsigned long limit;
28024+
28025+ down_write(&mm->mmap_sem);
28026+ limit = mm->context.user_cs_limit;
28027+ if (limit < TASK_SIZE) {
28028+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28029+ up_write(&mm->mmap_sem);
28030+ return;
28031+ }
28032+ up_write(&mm->mmap_sem);
28033+ }
28034+#endif
28035+
28036 tsk->thread.error_code = error_code;
28037 tsk->thread.trap_nr = X86_TRAP_GP;
28038
28039@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28040 container_of(task_pt_regs(current),
28041 struct bad_iret_stack, regs);
28042
28043+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28044+ new_stack = s;
28045+
28046 /* Copy the IRET target to the new stack. */
28047 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28048
28049 /* Copy the remainder of the stack from the current stack. */
28050 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28051
28052- BUG_ON(!user_mode_vm(&new_stack->regs));
28053+ BUG_ON(!user_mode(&new_stack->regs));
28054 return new_stack;
28055 }
28056 NOKPROBE_SYMBOL(fixup_bad_iret);
28057@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28058 /* It's safe to allow irq's after DR6 has been saved */
28059 preempt_conditional_sti(regs);
28060
28061- if (regs->flags & X86_VM_MASK) {
28062+ if (v8086_mode(regs)) {
28063 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28064 X86_TRAP_DB);
28065 preempt_conditional_cli(regs);
28066@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28067 * We already checked v86 mode above, so we can check for kernel mode
28068 * by just checking the CPL of CS.
28069 */
28070- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28071+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28072 tsk->thread.debugreg6 &= ~DR_STEP;
28073 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28074 regs->flags &= ~X86_EFLAGS_TF;
28075@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28076 return;
28077 conditional_sti(regs);
28078
28079- if (!user_mode_vm(regs))
28080+ if (!user_mode(regs))
28081 {
28082 if (!fixup_exception(regs)) {
28083 task->thread.error_code = error_code;
28084diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28085index 5054497..139f8f8 100644
28086--- a/arch/x86/kernel/tsc.c
28087+++ b/arch/x86/kernel/tsc.c
28088@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28089 */
28090 smp_wmb();
28091
28092- ACCESS_ONCE(c2n->head) = data;
28093+ ACCESS_ONCE_RW(c2n->head) = data;
28094 }
28095
28096 /*
28097diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28098index 8b96a94..792b410 100644
28099--- a/arch/x86/kernel/uprobes.c
28100+++ b/arch/x86/kernel/uprobes.c
28101@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28102 int ret = NOTIFY_DONE;
28103
28104 /* We are only interested in userspace traps */
28105- if (regs && !user_mode_vm(regs))
28106+ if (regs && !user_mode(regs))
28107 return NOTIFY_DONE;
28108
28109 switch (val) {
28110@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28111
28112 if (nleft != rasize) {
28113 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28114- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28115+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28116
28117 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28118 }
28119diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28120index b9242ba..50c5edd 100644
28121--- a/arch/x86/kernel/verify_cpu.S
28122+++ b/arch/x86/kernel/verify_cpu.S
28123@@ -20,6 +20,7 @@
28124 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28125 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28126 * arch/x86/kernel/head_32.S: processor startup
28127+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28128 *
28129 * verify_cpu, returns the status of longmode and SSE in register %eax.
28130 * 0: Success 1: Failure
28131diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28132index e8edcf5..27f9344 100644
28133--- a/arch/x86/kernel/vm86_32.c
28134+++ b/arch/x86/kernel/vm86_32.c
28135@@ -44,6 +44,7 @@
28136 #include <linux/ptrace.h>
28137 #include <linux/audit.h>
28138 #include <linux/stddef.h>
28139+#include <linux/grsecurity.h>
28140
28141 #include <asm/uaccess.h>
28142 #include <asm/io.h>
28143@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28144 do_exit(SIGSEGV);
28145 }
28146
28147- tss = &per_cpu(init_tss, get_cpu());
28148+ tss = init_tss + get_cpu();
28149 current->thread.sp0 = current->thread.saved_sp0;
28150 current->thread.sysenter_cs = __KERNEL_CS;
28151 load_sp0(tss, &current->thread);
28152@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28153
28154 if (tsk->thread.saved_sp0)
28155 return -EPERM;
28156+
28157+#ifdef CONFIG_GRKERNSEC_VM86
28158+ if (!capable(CAP_SYS_RAWIO)) {
28159+ gr_handle_vm86();
28160+ return -EPERM;
28161+ }
28162+#endif
28163+
28164 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28165 offsetof(struct kernel_vm86_struct, vm86plus) -
28166 sizeof(info.regs));
28167@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28168 int tmp;
28169 struct vm86plus_struct __user *v86;
28170
28171+#ifdef CONFIG_GRKERNSEC_VM86
28172+ if (!capable(CAP_SYS_RAWIO)) {
28173+ gr_handle_vm86();
28174+ return -EPERM;
28175+ }
28176+#endif
28177+
28178 tsk = current;
28179 switch (cmd) {
28180 case VM86_REQUEST_IRQ:
28181@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28182 tsk->thread.saved_fs = info->regs32->fs;
28183 tsk->thread.saved_gs = get_user_gs(info->regs32);
28184
28185- tss = &per_cpu(init_tss, get_cpu());
28186+ tss = init_tss + get_cpu();
28187 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28188 if (cpu_has_sep)
28189 tsk->thread.sysenter_cs = 0;
28190@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28191 goto cannot_handle;
28192 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28193 goto cannot_handle;
28194- intr_ptr = (unsigned long __user *) (i << 2);
28195+ intr_ptr = (__force unsigned long __user *) (i << 2);
28196 if (get_user(segoffs, intr_ptr))
28197 goto cannot_handle;
28198 if ((segoffs >> 16) == BIOSSEG)
28199diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28200index 00bf300..129df8e 100644
28201--- a/arch/x86/kernel/vmlinux.lds.S
28202+++ b/arch/x86/kernel/vmlinux.lds.S
28203@@ -26,6 +26,13 @@
28204 #include <asm/page_types.h>
28205 #include <asm/cache.h>
28206 #include <asm/boot.h>
28207+#include <asm/segment.h>
28208+
28209+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28210+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28211+#else
28212+#define __KERNEL_TEXT_OFFSET 0
28213+#endif
28214
28215 #undef i386 /* in case the preprocessor is a 32bit one */
28216
28217@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28218
28219 PHDRS {
28220 text PT_LOAD FLAGS(5); /* R_E */
28221+#ifdef CONFIG_X86_32
28222+ module PT_LOAD FLAGS(5); /* R_E */
28223+#endif
28224+#ifdef CONFIG_XEN
28225+ rodata PT_LOAD FLAGS(5); /* R_E */
28226+#else
28227+ rodata PT_LOAD FLAGS(4); /* R__ */
28228+#endif
28229 data PT_LOAD FLAGS(6); /* RW_ */
28230-#ifdef CONFIG_X86_64
28231+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28232 #ifdef CONFIG_SMP
28233 percpu PT_LOAD FLAGS(6); /* RW_ */
28234 #endif
28235+ text.init PT_LOAD FLAGS(5); /* R_E */
28236+ text.exit PT_LOAD FLAGS(5); /* R_E */
28237 init PT_LOAD FLAGS(7); /* RWE */
28238-#endif
28239 note PT_NOTE FLAGS(0); /* ___ */
28240 }
28241
28242 SECTIONS
28243 {
28244 #ifdef CONFIG_X86_32
28245- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28246- phys_startup_32 = startup_32 - LOAD_OFFSET;
28247+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28248 #else
28249- . = __START_KERNEL;
28250- phys_startup_64 = startup_64 - LOAD_OFFSET;
28251+ . = __START_KERNEL;
28252 #endif
28253
28254 /* Text and read-only data */
28255- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28256- _text = .;
28257+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28258 /* bootstrapping code */
28259+#ifdef CONFIG_X86_32
28260+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28261+#else
28262+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28263+#endif
28264+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28265+ _text = .;
28266 HEAD_TEXT
28267 . = ALIGN(8);
28268 _stext = .;
28269@@ -104,13 +124,47 @@ SECTIONS
28270 IRQENTRY_TEXT
28271 *(.fixup)
28272 *(.gnu.warning)
28273- /* End of text section */
28274- _etext = .;
28275 } :text = 0x9090
28276
28277- NOTES :text :note
28278+ . += __KERNEL_TEXT_OFFSET;
28279
28280- EXCEPTION_TABLE(16) :text = 0x9090
28281+#ifdef CONFIG_X86_32
28282+ . = ALIGN(PAGE_SIZE);
28283+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28284+
28285+#ifdef CONFIG_PAX_KERNEXEC
28286+ MODULES_EXEC_VADDR = .;
28287+ BYTE(0)
28288+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28289+ . = ALIGN(HPAGE_SIZE) - 1;
28290+ MODULES_EXEC_END = .;
28291+#endif
28292+
28293+ } :module
28294+#endif
28295+
28296+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28297+ /* End of text section */
28298+ BYTE(0)
28299+ _etext = . - __KERNEL_TEXT_OFFSET;
28300+ }
28301+
28302+#ifdef CONFIG_X86_32
28303+ . = ALIGN(PAGE_SIZE);
28304+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28305+ . = ALIGN(PAGE_SIZE);
28306+ *(.empty_zero_page)
28307+ *(.initial_pg_fixmap)
28308+ *(.initial_pg_pmd)
28309+ *(.initial_page_table)
28310+ *(.swapper_pg_dir)
28311+ } :rodata
28312+#endif
28313+
28314+ . = ALIGN(PAGE_SIZE);
28315+ NOTES :rodata :note
28316+
28317+ EXCEPTION_TABLE(16) :rodata
28318
28319 #if defined(CONFIG_DEBUG_RODATA)
28320 /* .text should occupy whole number of pages */
28321@@ -122,16 +176,20 @@ SECTIONS
28322
28323 /* Data */
28324 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28325+
28326+#ifdef CONFIG_PAX_KERNEXEC
28327+ . = ALIGN(HPAGE_SIZE);
28328+#else
28329+ . = ALIGN(PAGE_SIZE);
28330+#endif
28331+
28332 /* Start of data section */
28333 _sdata = .;
28334
28335 /* init_task */
28336 INIT_TASK_DATA(THREAD_SIZE)
28337
28338-#ifdef CONFIG_X86_32
28339- /* 32 bit has nosave before _edata */
28340 NOSAVE_DATA
28341-#endif
28342
28343 PAGE_ALIGNED_DATA(PAGE_SIZE)
28344
28345@@ -174,12 +232,19 @@ SECTIONS
28346 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28347
28348 /* Init code and data - will be freed after init */
28349- . = ALIGN(PAGE_SIZE);
28350 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28351+ BYTE(0)
28352+
28353+#ifdef CONFIG_PAX_KERNEXEC
28354+ . = ALIGN(HPAGE_SIZE);
28355+#else
28356+ . = ALIGN(PAGE_SIZE);
28357+#endif
28358+
28359 __init_begin = .; /* paired with __init_end */
28360- }
28361+ } :init.begin
28362
28363-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28364+#ifdef CONFIG_SMP
28365 /*
28366 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28367 * output PHDR, so the next output section - .init.text - should
28368@@ -190,12 +255,27 @@ SECTIONS
28369 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28370 #endif
28371
28372- INIT_TEXT_SECTION(PAGE_SIZE)
28373-#ifdef CONFIG_X86_64
28374- :init
28375-#endif
28376+ . = ALIGN(PAGE_SIZE);
28377+ init_begin = .;
28378+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28379+ VMLINUX_SYMBOL(_sinittext) = .;
28380+ INIT_TEXT
28381+ . = ALIGN(PAGE_SIZE);
28382+ } :text.init
28383
28384- INIT_DATA_SECTION(16)
28385+ /*
28386+ * .exit.text is discard at runtime, not link time, to deal with
28387+ * references from .altinstructions and .eh_frame
28388+ */
28389+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28390+ EXIT_TEXT
28391+ VMLINUX_SYMBOL(_einittext) = .;
28392+ . = ALIGN(16);
28393+ } :text.exit
28394+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28395+
28396+ . = ALIGN(PAGE_SIZE);
28397+ INIT_DATA_SECTION(16) :init
28398
28399 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28400 __x86_cpu_dev_start = .;
28401@@ -266,19 +346,12 @@ SECTIONS
28402 }
28403
28404 . = ALIGN(8);
28405- /*
28406- * .exit.text is discard at runtime, not link time, to deal with
28407- * references from .altinstructions and .eh_frame
28408- */
28409- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28410- EXIT_TEXT
28411- }
28412
28413 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28414 EXIT_DATA
28415 }
28416
28417-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28418+#ifndef CONFIG_SMP
28419 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28420 #endif
28421
28422@@ -297,16 +370,10 @@ SECTIONS
28423 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28424 __smp_locks = .;
28425 *(.smp_locks)
28426- . = ALIGN(PAGE_SIZE);
28427 __smp_locks_end = .;
28428+ . = ALIGN(PAGE_SIZE);
28429 }
28430
28431-#ifdef CONFIG_X86_64
28432- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28433- NOSAVE_DATA
28434- }
28435-#endif
28436-
28437 /* BSS */
28438 . = ALIGN(PAGE_SIZE);
28439 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28440@@ -322,6 +389,7 @@ SECTIONS
28441 __brk_base = .;
28442 . += 64 * 1024; /* 64k alignment slop space */
28443 *(.brk_reservation) /* areas brk users have reserved */
28444+ . = ALIGN(HPAGE_SIZE);
28445 __brk_limit = .;
28446 }
28447
28448@@ -348,13 +416,12 @@ SECTIONS
28449 * for the boot processor.
28450 */
28451 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28452-INIT_PER_CPU(gdt_page);
28453 INIT_PER_CPU(irq_stack_union);
28454
28455 /*
28456 * Build-time check on the image size:
28457 */
28458-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28459+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28460 "kernel image bigger than KERNEL_IMAGE_SIZE");
28461
28462 #ifdef CONFIG_SMP
28463diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28464index 2dcc6ff..082dc7a 100644
28465--- a/arch/x86/kernel/vsyscall_64.c
28466+++ b/arch/x86/kernel/vsyscall_64.c
28467@@ -38,15 +38,13 @@
28468 #define CREATE_TRACE_POINTS
28469 #include "vsyscall_trace.h"
28470
28471-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28472+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28473
28474 static int __init vsyscall_setup(char *str)
28475 {
28476 if (str) {
28477 if (!strcmp("emulate", str))
28478 vsyscall_mode = EMULATE;
28479- else if (!strcmp("native", str))
28480- vsyscall_mode = NATIVE;
28481 else if (!strcmp("none", str))
28482 vsyscall_mode = NONE;
28483 else
28484@@ -264,8 +262,7 @@ do_ret:
28485 return true;
28486
28487 sigsegv:
28488- force_sig(SIGSEGV, current);
28489- return true;
28490+ do_group_exit(SIGKILL);
28491 }
28492
28493 /*
28494@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28495 static struct vm_area_struct gate_vma = {
28496 .vm_start = VSYSCALL_ADDR,
28497 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28498- .vm_page_prot = PAGE_READONLY_EXEC,
28499- .vm_flags = VM_READ | VM_EXEC,
28500+ .vm_page_prot = PAGE_READONLY,
28501+ .vm_flags = VM_READ,
28502 .vm_ops = &gate_vma_ops,
28503 };
28504
28505@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28506 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28507
28508 if (vsyscall_mode != NONE)
28509- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28510- vsyscall_mode == NATIVE
28511- ? PAGE_KERNEL_VSYSCALL
28512- : PAGE_KERNEL_VVAR);
28513+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28514
28515 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28516 (unsigned long)VSYSCALL_ADDR);
28517diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28518index 04068192..4d75aa6 100644
28519--- a/arch/x86/kernel/x8664_ksyms_64.c
28520+++ b/arch/x86/kernel/x8664_ksyms_64.c
28521@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28522 EXPORT_SYMBOL(copy_user_generic_unrolled);
28523 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28524 EXPORT_SYMBOL(__copy_user_nocache);
28525-EXPORT_SYMBOL(_copy_from_user);
28526-EXPORT_SYMBOL(_copy_to_user);
28527
28528 EXPORT_SYMBOL(copy_page);
28529 EXPORT_SYMBOL(clear_page);
28530@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28531 EXPORT_SYMBOL(___preempt_schedule_context);
28532 #endif
28533 #endif
28534+
28535+#ifdef CONFIG_PAX_PER_CPU_PGD
28536+EXPORT_SYMBOL(cpu_pgd);
28537+#endif
28538diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28539index 234b072..b7ab191 100644
28540--- a/arch/x86/kernel/x86_init.c
28541+++ b/arch/x86/kernel/x86_init.c
28542@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28543 static void default_nmi_init(void) { };
28544 static int default_i8042_detect(void) { return 1; };
28545
28546-struct x86_platform_ops x86_platform = {
28547+struct x86_platform_ops x86_platform __read_only = {
28548 .calibrate_tsc = native_calibrate_tsc,
28549 .get_wallclock = mach_get_cmos_time,
28550 .set_wallclock = mach_set_rtc_mmss,
28551@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28552 EXPORT_SYMBOL_GPL(x86_platform);
28553
28554 #if defined(CONFIG_PCI_MSI)
28555-struct x86_msi_ops x86_msi = {
28556+struct x86_msi_ops x86_msi __read_only = {
28557 .setup_msi_irqs = native_setup_msi_irqs,
28558 .compose_msi_msg = native_compose_msi_msg,
28559 .teardown_msi_irq = native_teardown_msi_irq,
28560@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28561 }
28562 #endif
28563
28564-struct x86_io_apic_ops x86_io_apic_ops = {
28565+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28566 .init = native_io_apic_init_mappings,
28567 .read = native_io_apic_read,
28568 .write = native_io_apic_write,
28569diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28570index 0de1fae..298d037 100644
28571--- a/arch/x86/kernel/xsave.c
28572+++ b/arch/x86/kernel/xsave.c
28573@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28574
28575 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28576 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28577- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28578+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28579
28580 if (!use_xsave())
28581 return err;
28582
28583- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28584+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28585
28586 /*
28587 * Read the xstate_bv which we copied (directly from the cpu or
28588 * from the state in task struct) to the user buffers.
28589 */
28590- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28591+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28592
28593 /*
28594 * For legacy compatible, we always set FP/SSE bits in the bit
28595@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28596 */
28597 xstate_bv |= XSTATE_FPSSE;
28598
28599- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28600+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28601
28602 return err;
28603 }
28604@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28605 {
28606 int err;
28607
28608+ buf = (struct xsave_struct __user *)____m(buf);
28609 if (use_xsave())
28610 err = xsave_user(buf);
28611 else if (use_fxsr())
28612@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28613 */
28614 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28615 {
28616+ buf = (void __user *)____m(buf);
28617 if (use_xsave()) {
28618 if ((unsigned long)buf % 64 || fx_only) {
28619 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28620diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28621index 8a80737..bac4961 100644
28622--- a/arch/x86/kvm/cpuid.c
28623+++ b/arch/x86/kvm/cpuid.c
28624@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28625 struct kvm_cpuid2 *cpuid,
28626 struct kvm_cpuid_entry2 __user *entries)
28627 {
28628- int r;
28629+ int r, i;
28630
28631 r = -E2BIG;
28632 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28633 goto out;
28634 r = -EFAULT;
28635- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28636- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28637+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28638 goto out;
28639+ for (i = 0; i < cpuid->nent; ++i) {
28640+ struct kvm_cpuid_entry2 cpuid_entry;
28641+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28642+ goto out;
28643+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28644+ }
28645 vcpu->arch.cpuid_nent = cpuid->nent;
28646 kvm_apic_set_version(vcpu);
28647 kvm_x86_ops->cpuid_update(vcpu);
28648@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28649 struct kvm_cpuid2 *cpuid,
28650 struct kvm_cpuid_entry2 __user *entries)
28651 {
28652- int r;
28653+ int r, i;
28654
28655 r = -E2BIG;
28656 if (cpuid->nent < vcpu->arch.cpuid_nent)
28657 goto out;
28658 r = -EFAULT;
28659- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28660- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28661+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28662 goto out;
28663+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28664+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28665+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28666+ goto out;
28667+ }
28668 return 0;
28669
28670 out:
28671diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28672index de12c1d..4031e2a 100644
28673--- a/arch/x86/kvm/emulate.c
28674+++ b/arch/x86/kvm/emulate.c
28675@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28676 int cr = ctxt->modrm_reg;
28677 u64 efer = 0;
28678
28679- static u64 cr_reserved_bits[] = {
28680+ static const u64 cr_reserved_bits[] = {
28681 0xffffffff00000000ULL,
28682 0, 0, 0, /* CR3 checked later */
28683 CR4_RESERVED_BITS,
28684diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28685index d52dcf0..cec7e84 100644
28686--- a/arch/x86/kvm/lapic.c
28687+++ b/arch/x86/kvm/lapic.c
28688@@ -55,7 +55,7 @@
28689 #define APIC_BUS_CYCLE_NS 1
28690
28691 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28692-#define apic_debug(fmt, arg...)
28693+#define apic_debug(fmt, arg...) do {} while (0)
28694
28695 #define APIC_LVT_NUM 6
28696 /* 14 is the version for Xeon and Pentium 8.4.8*/
28697diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28698index fd49c86..77e1aa0 100644
28699--- a/arch/x86/kvm/paging_tmpl.h
28700+++ b/arch/x86/kvm/paging_tmpl.h
28701@@ -343,7 +343,7 @@ retry_walk:
28702 if (unlikely(kvm_is_error_hva(host_addr)))
28703 goto error;
28704
28705- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28706+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28707 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28708 goto error;
28709 walker->ptep_user[walker->level - 1] = ptep_user;
28710diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28711index 41dd038..de331cf 100644
28712--- a/arch/x86/kvm/svm.c
28713+++ b/arch/x86/kvm/svm.c
28714@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28715 int cpu = raw_smp_processor_id();
28716
28717 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28718+
28719+ pax_open_kernel();
28720 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28721+ pax_close_kernel();
28722+
28723 load_TR_desc();
28724 }
28725
28726@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28727 #endif
28728 #endif
28729
28730+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28731+ __set_fs(current_thread_info()->addr_limit);
28732+#endif
28733+
28734 reload_tss(vcpu);
28735
28736 local_irq_disable();
28737diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28738index d4c58d8..eaf2568 100644
28739--- a/arch/x86/kvm/vmx.c
28740+++ b/arch/x86/kvm/vmx.c
28741@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28742 #endif
28743 }
28744
28745-static void vmcs_clear_bits(unsigned long field, u32 mask)
28746+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28747 {
28748 vmcs_writel(field, vmcs_readl(field) & ~mask);
28749 }
28750
28751-static void vmcs_set_bits(unsigned long field, u32 mask)
28752+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28753 {
28754 vmcs_writel(field, vmcs_readl(field) | mask);
28755 }
28756@@ -1645,7 +1645,11 @@ static void reload_tss(void)
28757 struct desc_struct *descs;
28758
28759 descs = (void *)gdt->address;
28760+
28761+ pax_open_kernel();
28762 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28763+ pax_close_kernel();
28764+
28765 load_TR_desc();
28766 }
28767
28768@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28769 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28770 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28771
28772+#ifdef CONFIG_PAX_PER_CPU_PGD
28773+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28774+#endif
28775+
28776 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28777 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28778 vmx->loaded_vmcs->cpu = cpu;
28779@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28780 * reads and returns guest's timestamp counter "register"
28781 * guest_tsc = host_tsc + tsc_offset -- 21.3
28782 */
28783-static u64 guest_read_tsc(void)
28784+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28785 {
28786 u64 host_tsc, tsc_offset;
28787
28788@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28789 unsigned long cr4;
28790
28791 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28792+
28793+#ifndef CONFIG_PAX_PER_CPU_PGD
28794 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28795+#endif
28796
28797 /* Save the most likely value for this task's CR4 in the VMCS. */
28798 cr4 = read_cr4();
28799@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28800 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28801 vmx->host_idt_base = dt.address;
28802
28803- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28804+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28805
28806 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28807 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28808@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
28809 * page upon invalidation. No need to do anything if the
28810 * processor does not have the APIC_ACCESS_ADDR VMCS field.
28811 */
28812- kvm_x86_ops->set_apic_access_page_addr = NULL;
28813+ pax_open_kernel();
28814+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28815+ pax_close_kernel();
28816 }
28817
28818- if (!cpu_has_vmx_tpr_shadow())
28819- kvm_x86_ops->update_cr8_intercept = NULL;
28820+ if (!cpu_has_vmx_tpr_shadow()) {
28821+ pax_open_kernel();
28822+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28823+ pax_close_kernel();
28824+ }
28825
28826 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28827 kvm_disable_largepages();
28828@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
28829 if (!cpu_has_vmx_apicv())
28830 enable_apicv = 0;
28831
28832+ pax_open_kernel();
28833 if (enable_apicv)
28834- kvm_x86_ops->update_cr8_intercept = NULL;
28835+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28836 else {
28837- kvm_x86_ops->hwapic_irr_update = NULL;
28838- kvm_x86_ops->deliver_posted_interrupt = NULL;
28839- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28840+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28841+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28842+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28843 }
28844+ pax_close_kernel();
28845
28846 if (nested)
28847 nested_vmx_setup_ctls_msrs();
28848@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28849 "jmp 2f \n\t"
28850 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28851 "2: "
28852+
28853+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28854+ "ljmp %[cs],$3f\n\t"
28855+ "3: "
28856+#endif
28857+
28858 /* Save guest registers, load host registers, keep flags */
28859 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28860 "pop %0 \n\t"
28861@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28862 #endif
28863 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28864 [wordsize]"i"(sizeof(ulong))
28865+
28866+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28867+ ,[cs]"i"(__KERNEL_CS)
28868+#endif
28869+
28870 : "cc", "memory"
28871 #ifdef CONFIG_X86_64
28872 , "rax", "rbx", "rdi", "rsi"
28873@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28874 if (debugctlmsr)
28875 update_debugctlmsr(debugctlmsr);
28876
28877-#ifndef CONFIG_X86_64
28878+#ifdef CONFIG_X86_32
28879 /*
28880 * The sysexit path does not restore ds/es, so we must set them to
28881 * a reasonable value ourselves.
28882@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28883 * may be executed in interrupt context, which saves and restore segments
28884 * around it, nullifying its effect.
28885 */
28886- loadsegment(ds, __USER_DS);
28887- loadsegment(es, __USER_DS);
28888+ loadsegment(ds, __KERNEL_DS);
28889+ loadsegment(es, __KERNEL_DS);
28890+ loadsegment(ss, __KERNEL_DS);
28891+
28892+#ifdef CONFIG_PAX_KERNEXEC
28893+ loadsegment(fs, __KERNEL_PERCPU);
28894+#endif
28895+
28896+#ifdef CONFIG_PAX_MEMORY_UDEREF
28897+ __set_fs(current_thread_info()->addr_limit);
28898+#endif
28899+
28900 #endif
28901
28902 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28903diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28904index c259814..9a0345b 100644
28905--- a/arch/x86/kvm/x86.c
28906+++ b/arch/x86/kvm/x86.c
28907@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28908 {
28909 struct kvm *kvm = vcpu->kvm;
28910 int lm = is_long_mode(vcpu);
28911- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28912- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28913+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28914+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28915 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28916 : kvm->arch.xen_hvm_config.blob_size_32;
28917 u32 page_num = data & ~PAGE_MASK;
28918@@ -2810,6 +2810,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28919 if (n < msr_list.nmsrs)
28920 goto out;
28921 r = -EFAULT;
28922+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28923+ goto out;
28924 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28925 num_msrs_to_save * sizeof(u32)))
28926 goto out;
28927@@ -5746,7 +5748,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28928 };
28929 #endif
28930
28931-int kvm_arch_init(void *opaque)
28932+int kvm_arch_init(const void *opaque)
28933 {
28934 int r;
28935 struct kvm_x86_ops *ops = opaque;
28936diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28937index c1c1544..f90c9d5 100644
28938--- a/arch/x86/lguest/boot.c
28939+++ b/arch/x86/lguest/boot.c
28940@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28941 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28942 * Launcher to reboot us.
28943 */
28944-static void lguest_restart(char *reason)
28945+static __noreturn void lguest_restart(char *reason)
28946 {
28947 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28948+ BUG();
28949 }
28950
28951 /*G:050
28952diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28953index 00933d5..3a64af9 100644
28954--- a/arch/x86/lib/atomic64_386_32.S
28955+++ b/arch/x86/lib/atomic64_386_32.S
28956@@ -48,6 +48,10 @@ BEGIN(read)
28957 movl (v), %eax
28958 movl 4(v), %edx
28959 RET_ENDP
28960+BEGIN(read_unchecked)
28961+ movl (v), %eax
28962+ movl 4(v), %edx
28963+RET_ENDP
28964 #undef v
28965
28966 #define v %esi
28967@@ -55,6 +59,10 @@ BEGIN(set)
28968 movl %ebx, (v)
28969 movl %ecx, 4(v)
28970 RET_ENDP
28971+BEGIN(set_unchecked)
28972+ movl %ebx, (v)
28973+ movl %ecx, 4(v)
28974+RET_ENDP
28975 #undef v
28976
28977 #define v %esi
28978@@ -70,6 +78,20 @@ RET_ENDP
28979 BEGIN(add)
28980 addl %eax, (v)
28981 adcl %edx, 4(v)
28982+
28983+#ifdef CONFIG_PAX_REFCOUNT
28984+ jno 0f
28985+ subl %eax, (v)
28986+ sbbl %edx, 4(v)
28987+ int $4
28988+0:
28989+ _ASM_EXTABLE(0b, 0b)
28990+#endif
28991+
28992+RET_ENDP
28993+BEGIN(add_unchecked)
28994+ addl %eax, (v)
28995+ adcl %edx, 4(v)
28996 RET_ENDP
28997 #undef v
28998
28999@@ -77,6 +99,24 @@ RET_ENDP
29000 BEGIN(add_return)
29001 addl (v), %eax
29002 adcl 4(v), %edx
29003+
29004+#ifdef CONFIG_PAX_REFCOUNT
29005+ into
29006+1234:
29007+ _ASM_EXTABLE(1234b, 2f)
29008+#endif
29009+
29010+ movl %eax, (v)
29011+ movl %edx, 4(v)
29012+
29013+#ifdef CONFIG_PAX_REFCOUNT
29014+2:
29015+#endif
29016+
29017+RET_ENDP
29018+BEGIN(add_return_unchecked)
29019+ addl (v), %eax
29020+ adcl 4(v), %edx
29021 movl %eax, (v)
29022 movl %edx, 4(v)
29023 RET_ENDP
29024@@ -86,6 +126,20 @@ RET_ENDP
29025 BEGIN(sub)
29026 subl %eax, (v)
29027 sbbl %edx, 4(v)
29028+
29029+#ifdef CONFIG_PAX_REFCOUNT
29030+ jno 0f
29031+ addl %eax, (v)
29032+ adcl %edx, 4(v)
29033+ int $4
29034+0:
29035+ _ASM_EXTABLE(0b, 0b)
29036+#endif
29037+
29038+RET_ENDP
29039+BEGIN(sub_unchecked)
29040+ subl %eax, (v)
29041+ sbbl %edx, 4(v)
29042 RET_ENDP
29043 #undef v
29044
29045@@ -96,6 +150,27 @@ BEGIN(sub_return)
29046 sbbl $0, %edx
29047 addl (v), %eax
29048 adcl 4(v), %edx
29049+
29050+#ifdef CONFIG_PAX_REFCOUNT
29051+ into
29052+1234:
29053+ _ASM_EXTABLE(1234b, 2f)
29054+#endif
29055+
29056+ movl %eax, (v)
29057+ movl %edx, 4(v)
29058+
29059+#ifdef CONFIG_PAX_REFCOUNT
29060+2:
29061+#endif
29062+
29063+RET_ENDP
29064+BEGIN(sub_return_unchecked)
29065+ negl %edx
29066+ negl %eax
29067+ sbbl $0, %edx
29068+ addl (v), %eax
29069+ adcl 4(v), %edx
29070 movl %eax, (v)
29071 movl %edx, 4(v)
29072 RET_ENDP
29073@@ -105,6 +180,20 @@ RET_ENDP
29074 BEGIN(inc)
29075 addl $1, (v)
29076 adcl $0, 4(v)
29077+
29078+#ifdef CONFIG_PAX_REFCOUNT
29079+ jno 0f
29080+ subl $1, (v)
29081+ sbbl $0, 4(v)
29082+ int $4
29083+0:
29084+ _ASM_EXTABLE(0b, 0b)
29085+#endif
29086+
29087+RET_ENDP
29088+BEGIN(inc_unchecked)
29089+ addl $1, (v)
29090+ adcl $0, 4(v)
29091 RET_ENDP
29092 #undef v
29093
29094@@ -114,6 +203,26 @@ BEGIN(inc_return)
29095 movl 4(v), %edx
29096 addl $1, %eax
29097 adcl $0, %edx
29098+
29099+#ifdef CONFIG_PAX_REFCOUNT
29100+ into
29101+1234:
29102+ _ASM_EXTABLE(1234b, 2f)
29103+#endif
29104+
29105+ movl %eax, (v)
29106+ movl %edx, 4(v)
29107+
29108+#ifdef CONFIG_PAX_REFCOUNT
29109+2:
29110+#endif
29111+
29112+RET_ENDP
29113+BEGIN(inc_return_unchecked)
29114+ movl (v), %eax
29115+ movl 4(v), %edx
29116+ addl $1, %eax
29117+ adcl $0, %edx
29118 movl %eax, (v)
29119 movl %edx, 4(v)
29120 RET_ENDP
29121@@ -123,6 +232,20 @@ RET_ENDP
29122 BEGIN(dec)
29123 subl $1, (v)
29124 sbbl $0, 4(v)
29125+
29126+#ifdef CONFIG_PAX_REFCOUNT
29127+ jno 0f
29128+ addl $1, (v)
29129+ adcl $0, 4(v)
29130+ int $4
29131+0:
29132+ _ASM_EXTABLE(0b, 0b)
29133+#endif
29134+
29135+RET_ENDP
29136+BEGIN(dec_unchecked)
29137+ subl $1, (v)
29138+ sbbl $0, 4(v)
29139 RET_ENDP
29140 #undef v
29141
29142@@ -132,6 +255,26 @@ BEGIN(dec_return)
29143 movl 4(v), %edx
29144 subl $1, %eax
29145 sbbl $0, %edx
29146+
29147+#ifdef CONFIG_PAX_REFCOUNT
29148+ into
29149+1234:
29150+ _ASM_EXTABLE(1234b, 2f)
29151+#endif
29152+
29153+ movl %eax, (v)
29154+ movl %edx, 4(v)
29155+
29156+#ifdef CONFIG_PAX_REFCOUNT
29157+2:
29158+#endif
29159+
29160+RET_ENDP
29161+BEGIN(dec_return_unchecked)
29162+ movl (v), %eax
29163+ movl 4(v), %edx
29164+ subl $1, %eax
29165+ sbbl $0, %edx
29166 movl %eax, (v)
29167 movl %edx, 4(v)
29168 RET_ENDP
29169@@ -143,6 +286,13 @@ BEGIN(add_unless)
29170 adcl %edx, %edi
29171 addl (v), %eax
29172 adcl 4(v), %edx
29173+
29174+#ifdef CONFIG_PAX_REFCOUNT
29175+ into
29176+1234:
29177+ _ASM_EXTABLE(1234b, 2f)
29178+#endif
29179+
29180 cmpl %eax, %ecx
29181 je 3f
29182 1:
29183@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29184 1:
29185 addl $1, %eax
29186 adcl $0, %edx
29187+
29188+#ifdef CONFIG_PAX_REFCOUNT
29189+ into
29190+1234:
29191+ _ASM_EXTABLE(1234b, 2f)
29192+#endif
29193+
29194 movl %eax, (v)
29195 movl %edx, 4(v)
29196 movl $1, %eax
29197@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29198 movl 4(v), %edx
29199 subl $1, %eax
29200 sbbl $0, %edx
29201+
29202+#ifdef CONFIG_PAX_REFCOUNT
29203+ into
29204+1234:
29205+ _ASM_EXTABLE(1234b, 1f)
29206+#endif
29207+
29208 js 1f
29209 movl %eax, (v)
29210 movl %edx, 4(v)
29211diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29212index f5cc9eb..51fa319 100644
29213--- a/arch/x86/lib/atomic64_cx8_32.S
29214+++ b/arch/x86/lib/atomic64_cx8_32.S
29215@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29216 CFI_STARTPROC
29217
29218 read64 %ecx
29219+ pax_force_retaddr
29220 ret
29221 CFI_ENDPROC
29222 ENDPROC(atomic64_read_cx8)
29223
29224+ENTRY(atomic64_read_unchecked_cx8)
29225+ CFI_STARTPROC
29226+
29227+ read64 %ecx
29228+ pax_force_retaddr
29229+ ret
29230+ CFI_ENDPROC
29231+ENDPROC(atomic64_read_unchecked_cx8)
29232+
29233 ENTRY(atomic64_set_cx8)
29234 CFI_STARTPROC
29235
29236@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29237 cmpxchg8b (%esi)
29238 jne 1b
29239
29240+ pax_force_retaddr
29241 ret
29242 CFI_ENDPROC
29243 ENDPROC(atomic64_set_cx8)
29244
29245+ENTRY(atomic64_set_unchecked_cx8)
29246+ CFI_STARTPROC
29247+
29248+1:
29249+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29250+ * are atomic on 586 and newer */
29251+ cmpxchg8b (%esi)
29252+ jne 1b
29253+
29254+ pax_force_retaddr
29255+ ret
29256+ CFI_ENDPROC
29257+ENDPROC(atomic64_set_unchecked_cx8)
29258+
29259 ENTRY(atomic64_xchg_cx8)
29260 CFI_STARTPROC
29261
29262@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29263 cmpxchg8b (%esi)
29264 jne 1b
29265
29266+ pax_force_retaddr
29267 ret
29268 CFI_ENDPROC
29269 ENDPROC(atomic64_xchg_cx8)
29270
29271-.macro addsub_return func ins insc
29272-ENTRY(atomic64_\func\()_return_cx8)
29273+.macro addsub_return func ins insc unchecked=""
29274+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29275 CFI_STARTPROC
29276 SAVE ebp
29277 SAVE ebx
29278@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29279 movl %edx, %ecx
29280 \ins\()l %esi, %ebx
29281 \insc\()l %edi, %ecx
29282+
29283+.ifb \unchecked
29284+#ifdef CONFIG_PAX_REFCOUNT
29285+ into
29286+2:
29287+ _ASM_EXTABLE(2b, 3f)
29288+#endif
29289+.endif
29290+
29291 LOCK_PREFIX
29292 cmpxchg8b (%ebp)
29293 jne 1b
29294-
29295-10:
29296 movl %ebx, %eax
29297 movl %ecx, %edx
29298+
29299+.ifb \unchecked
29300+#ifdef CONFIG_PAX_REFCOUNT
29301+3:
29302+#endif
29303+.endif
29304+
29305 RESTORE edi
29306 RESTORE esi
29307 RESTORE ebx
29308 RESTORE ebp
29309+ pax_force_retaddr
29310 ret
29311 CFI_ENDPROC
29312-ENDPROC(atomic64_\func\()_return_cx8)
29313+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29314 .endm
29315
29316 addsub_return add add adc
29317 addsub_return sub sub sbb
29318+addsub_return add add adc _unchecked
29319+addsub_return sub sub sbb _unchecked
29320
29321-.macro incdec_return func ins insc
29322-ENTRY(atomic64_\func\()_return_cx8)
29323+.macro incdec_return func ins insc unchecked=""
29324+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29325 CFI_STARTPROC
29326 SAVE ebx
29327
29328@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29329 movl %edx, %ecx
29330 \ins\()l $1, %ebx
29331 \insc\()l $0, %ecx
29332+
29333+.ifb \unchecked
29334+#ifdef CONFIG_PAX_REFCOUNT
29335+ into
29336+2:
29337+ _ASM_EXTABLE(2b, 3f)
29338+#endif
29339+.endif
29340+
29341 LOCK_PREFIX
29342 cmpxchg8b (%esi)
29343 jne 1b
29344
29345-10:
29346 movl %ebx, %eax
29347 movl %ecx, %edx
29348+
29349+.ifb \unchecked
29350+#ifdef CONFIG_PAX_REFCOUNT
29351+3:
29352+#endif
29353+.endif
29354+
29355 RESTORE ebx
29356+ pax_force_retaddr
29357 ret
29358 CFI_ENDPROC
29359-ENDPROC(atomic64_\func\()_return_cx8)
29360+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29361 .endm
29362
29363 incdec_return inc add adc
29364 incdec_return dec sub sbb
29365+incdec_return inc add adc _unchecked
29366+incdec_return dec sub sbb _unchecked
29367
29368 ENTRY(atomic64_dec_if_positive_cx8)
29369 CFI_STARTPROC
29370@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29371 movl %edx, %ecx
29372 subl $1, %ebx
29373 sbb $0, %ecx
29374+
29375+#ifdef CONFIG_PAX_REFCOUNT
29376+ into
29377+1234:
29378+ _ASM_EXTABLE(1234b, 2f)
29379+#endif
29380+
29381 js 2f
29382 LOCK_PREFIX
29383 cmpxchg8b (%esi)
29384@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29385 movl %ebx, %eax
29386 movl %ecx, %edx
29387 RESTORE ebx
29388+ pax_force_retaddr
29389 ret
29390 CFI_ENDPROC
29391 ENDPROC(atomic64_dec_if_positive_cx8)
29392@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29393 movl %edx, %ecx
29394 addl %ebp, %ebx
29395 adcl %edi, %ecx
29396+
29397+#ifdef CONFIG_PAX_REFCOUNT
29398+ into
29399+1234:
29400+ _ASM_EXTABLE(1234b, 3f)
29401+#endif
29402+
29403 LOCK_PREFIX
29404 cmpxchg8b (%esi)
29405 jne 1b
29406@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29407 CFI_ADJUST_CFA_OFFSET -8
29408 RESTORE ebx
29409 RESTORE ebp
29410+ pax_force_retaddr
29411 ret
29412 4:
29413 cmpl %edx, 4(%esp)
29414@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29415 xorl %ecx, %ecx
29416 addl $1, %ebx
29417 adcl %edx, %ecx
29418+
29419+#ifdef CONFIG_PAX_REFCOUNT
29420+ into
29421+1234:
29422+ _ASM_EXTABLE(1234b, 3f)
29423+#endif
29424+
29425 LOCK_PREFIX
29426 cmpxchg8b (%esi)
29427 jne 1b
29428@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29429 movl $1, %eax
29430 3:
29431 RESTORE ebx
29432+ pax_force_retaddr
29433 ret
29434 CFI_ENDPROC
29435 ENDPROC(atomic64_inc_not_zero_cx8)
29436diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29437index e78b8eee..7e173a8 100644
29438--- a/arch/x86/lib/checksum_32.S
29439+++ b/arch/x86/lib/checksum_32.S
29440@@ -29,7 +29,8 @@
29441 #include <asm/dwarf2.h>
29442 #include <asm/errno.h>
29443 #include <asm/asm.h>
29444-
29445+#include <asm/segment.h>
29446+
29447 /*
29448 * computes a partial checksum, e.g. for TCP/UDP fragments
29449 */
29450@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29451
29452 #define ARGBASE 16
29453 #define FP 12
29454-
29455-ENTRY(csum_partial_copy_generic)
29456+
29457+ENTRY(csum_partial_copy_generic_to_user)
29458 CFI_STARTPROC
29459+
29460+#ifdef CONFIG_PAX_MEMORY_UDEREF
29461+ pushl_cfi %gs
29462+ popl_cfi %es
29463+ jmp csum_partial_copy_generic
29464+#endif
29465+
29466+ENTRY(csum_partial_copy_generic_from_user)
29467+
29468+#ifdef CONFIG_PAX_MEMORY_UDEREF
29469+ pushl_cfi %gs
29470+ popl_cfi %ds
29471+#endif
29472+
29473+ENTRY(csum_partial_copy_generic)
29474 subl $4,%esp
29475 CFI_ADJUST_CFA_OFFSET 4
29476 pushl_cfi %edi
29477@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29478 jmp 4f
29479 SRC(1: movw (%esi), %bx )
29480 addl $2, %esi
29481-DST( movw %bx, (%edi) )
29482+DST( movw %bx, %es:(%edi) )
29483 addl $2, %edi
29484 addw %bx, %ax
29485 adcl $0, %eax
29486@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29487 SRC(1: movl (%esi), %ebx )
29488 SRC( movl 4(%esi), %edx )
29489 adcl %ebx, %eax
29490-DST( movl %ebx, (%edi) )
29491+DST( movl %ebx, %es:(%edi) )
29492 adcl %edx, %eax
29493-DST( movl %edx, 4(%edi) )
29494+DST( movl %edx, %es:4(%edi) )
29495
29496 SRC( movl 8(%esi), %ebx )
29497 SRC( movl 12(%esi), %edx )
29498 adcl %ebx, %eax
29499-DST( movl %ebx, 8(%edi) )
29500+DST( movl %ebx, %es:8(%edi) )
29501 adcl %edx, %eax
29502-DST( movl %edx, 12(%edi) )
29503+DST( movl %edx, %es:12(%edi) )
29504
29505 SRC( movl 16(%esi), %ebx )
29506 SRC( movl 20(%esi), %edx )
29507 adcl %ebx, %eax
29508-DST( movl %ebx, 16(%edi) )
29509+DST( movl %ebx, %es:16(%edi) )
29510 adcl %edx, %eax
29511-DST( movl %edx, 20(%edi) )
29512+DST( movl %edx, %es:20(%edi) )
29513
29514 SRC( movl 24(%esi), %ebx )
29515 SRC( movl 28(%esi), %edx )
29516 adcl %ebx, %eax
29517-DST( movl %ebx, 24(%edi) )
29518+DST( movl %ebx, %es:24(%edi) )
29519 adcl %edx, %eax
29520-DST( movl %edx, 28(%edi) )
29521+DST( movl %edx, %es:28(%edi) )
29522
29523 lea 32(%esi), %esi
29524 lea 32(%edi), %edi
29525@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29526 shrl $2, %edx # This clears CF
29527 SRC(3: movl (%esi), %ebx )
29528 adcl %ebx, %eax
29529-DST( movl %ebx, (%edi) )
29530+DST( movl %ebx, %es:(%edi) )
29531 lea 4(%esi), %esi
29532 lea 4(%edi), %edi
29533 dec %edx
29534@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29535 jb 5f
29536 SRC( movw (%esi), %cx )
29537 leal 2(%esi), %esi
29538-DST( movw %cx, (%edi) )
29539+DST( movw %cx, %es:(%edi) )
29540 leal 2(%edi), %edi
29541 je 6f
29542 shll $16,%ecx
29543 SRC(5: movb (%esi), %cl )
29544-DST( movb %cl, (%edi) )
29545+DST( movb %cl, %es:(%edi) )
29546 6: addl %ecx, %eax
29547 adcl $0, %eax
29548 7:
29549@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29550
29551 6001:
29552 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29553- movl $-EFAULT, (%ebx)
29554+ movl $-EFAULT, %ss:(%ebx)
29555
29556 # zero the complete destination - computing the rest
29557 # is too much work
29558@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29559
29560 6002:
29561 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29562- movl $-EFAULT,(%ebx)
29563+ movl $-EFAULT,%ss:(%ebx)
29564 jmp 5000b
29565
29566 .previous
29567
29568+ pushl_cfi %ss
29569+ popl_cfi %ds
29570+ pushl_cfi %ss
29571+ popl_cfi %es
29572 popl_cfi %ebx
29573 CFI_RESTORE ebx
29574 popl_cfi %esi
29575@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29576 popl_cfi %ecx # equivalent to addl $4,%esp
29577 ret
29578 CFI_ENDPROC
29579-ENDPROC(csum_partial_copy_generic)
29580+ENDPROC(csum_partial_copy_generic_to_user)
29581
29582 #else
29583
29584 /* Version for PentiumII/PPro */
29585
29586 #define ROUND1(x) \
29587+ nop; nop; nop; \
29588 SRC(movl x(%esi), %ebx ) ; \
29589 addl %ebx, %eax ; \
29590- DST(movl %ebx, x(%edi) ) ;
29591+ DST(movl %ebx, %es:x(%edi)) ;
29592
29593 #define ROUND(x) \
29594+ nop; nop; nop; \
29595 SRC(movl x(%esi), %ebx ) ; \
29596 adcl %ebx, %eax ; \
29597- DST(movl %ebx, x(%edi) ) ;
29598+ DST(movl %ebx, %es:x(%edi)) ;
29599
29600 #define ARGBASE 12
29601-
29602-ENTRY(csum_partial_copy_generic)
29603+
29604+ENTRY(csum_partial_copy_generic_to_user)
29605 CFI_STARTPROC
29606+
29607+#ifdef CONFIG_PAX_MEMORY_UDEREF
29608+ pushl_cfi %gs
29609+ popl_cfi %es
29610+ jmp csum_partial_copy_generic
29611+#endif
29612+
29613+ENTRY(csum_partial_copy_generic_from_user)
29614+
29615+#ifdef CONFIG_PAX_MEMORY_UDEREF
29616+ pushl_cfi %gs
29617+ popl_cfi %ds
29618+#endif
29619+
29620+ENTRY(csum_partial_copy_generic)
29621 pushl_cfi %ebx
29622 CFI_REL_OFFSET ebx, 0
29623 pushl_cfi %edi
29624@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29625 subl %ebx, %edi
29626 lea -1(%esi),%edx
29627 andl $-32,%edx
29628- lea 3f(%ebx,%ebx), %ebx
29629+ lea 3f(%ebx,%ebx,2), %ebx
29630 testl %esi, %esi
29631 jmp *%ebx
29632 1: addl $64,%esi
29633@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29634 jb 5f
29635 SRC( movw (%esi), %dx )
29636 leal 2(%esi), %esi
29637-DST( movw %dx, (%edi) )
29638+DST( movw %dx, %es:(%edi) )
29639 leal 2(%edi), %edi
29640 je 6f
29641 shll $16,%edx
29642 5:
29643 SRC( movb (%esi), %dl )
29644-DST( movb %dl, (%edi) )
29645+DST( movb %dl, %es:(%edi) )
29646 6: addl %edx, %eax
29647 adcl $0, %eax
29648 7:
29649 .section .fixup, "ax"
29650 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29651- movl $-EFAULT, (%ebx)
29652+ movl $-EFAULT, %ss:(%ebx)
29653 # zero the complete destination (computing the rest is too much work)
29654 movl ARGBASE+8(%esp),%edi # dst
29655 movl ARGBASE+12(%esp),%ecx # len
29656@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29657 rep; stosb
29658 jmp 7b
29659 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29660- movl $-EFAULT, (%ebx)
29661+ movl $-EFAULT, %ss:(%ebx)
29662 jmp 7b
29663 .previous
29664
29665+#ifdef CONFIG_PAX_MEMORY_UDEREF
29666+ pushl_cfi %ss
29667+ popl_cfi %ds
29668+ pushl_cfi %ss
29669+ popl_cfi %es
29670+#endif
29671+
29672 popl_cfi %esi
29673 CFI_RESTORE esi
29674 popl_cfi %edi
29675@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29676 CFI_RESTORE ebx
29677 ret
29678 CFI_ENDPROC
29679-ENDPROC(csum_partial_copy_generic)
29680+ENDPROC(csum_partial_copy_generic_to_user)
29681
29682 #undef ROUND
29683 #undef ROUND1
29684diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29685index f2145cf..cea889d 100644
29686--- a/arch/x86/lib/clear_page_64.S
29687+++ b/arch/x86/lib/clear_page_64.S
29688@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29689 movl $4096/8,%ecx
29690 xorl %eax,%eax
29691 rep stosq
29692+ pax_force_retaddr
29693 ret
29694 CFI_ENDPROC
29695 ENDPROC(clear_page_c)
29696@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29697 movl $4096,%ecx
29698 xorl %eax,%eax
29699 rep stosb
29700+ pax_force_retaddr
29701 ret
29702 CFI_ENDPROC
29703 ENDPROC(clear_page_c_e)
29704@@ -43,6 +45,7 @@ ENTRY(clear_page)
29705 leaq 64(%rdi),%rdi
29706 jnz .Lloop
29707 nop
29708+ pax_force_retaddr
29709 ret
29710 CFI_ENDPROC
29711 .Lclear_page_end:
29712@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29713
29714 #include <asm/cpufeature.h>
29715
29716- .section .altinstr_replacement,"ax"
29717+ .section .altinstr_replacement,"a"
29718 1: .byte 0xeb /* jmp <disp8> */
29719 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29720 2: .byte 0xeb /* jmp <disp8> */
29721diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29722index 40a1725..5d12ac4 100644
29723--- a/arch/x86/lib/cmpxchg16b_emu.S
29724+++ b/arch/x86/lib/cmpxchg16b_emu.S
29725@@ -8,6 +8,7 @@
29726 #include <linux/linkage.h>
29727 #include <asm/dwarf2.h>
29728 #include <asm/percpu.h>
29729+#include <asm/alternative-asm.h>
29730
29731 .text
29732
29733@@ -46,12 +47,14 @@ CFI_STARTPROC
29734 CFI_REMEMBER_STATE
29735 popfq_cfi
29736 mov $1, %al
29737+ pax_force_retaddr
29738 ret
29739
29740 CFI_RESTORE_STATE
29741 .Lnot_same:
29742 popfq_cfi
29743 xor %al,%al
29744+ pax_force_retaddr
29745 ret
29746
29747 CFI_ENDPROC
29748diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29749index 176cca6..e0d658e 100644
29750--- a/arch/x86/lib/copy_page_64.S
29751+++ b/arch/x86/lib/copy_page_64.S
29752@@ -9,6 +9,7 @@ copy_page_rep:
29753 CFI_STARTPROC
29754 movl $4096/8, %ecx
29755 rep movsq
29756+ pax_force_retaddr
29757 ret
29758 CFI_ENDPROC
29759 ENDPROC(copy_page_rep)
29760@@ -24,8 +25,8 @@ ENTRY(copy_page)
29761 CFI_ADJUST_CFA_OFFSET 2*8
29762 movq %rbx, (%rsp)
29763 CFI_REL_OFFSET rbx, 0
29764- movq %r12, 1*8(%rsp)
29765- CFI_REL_OFFSET r12, 1*8
29766+ movq %r13, 1*8(%rsp)
29767+ CFI_REL_OFFSET r13, 1*8
29768
29769 movl $(4096/64)-5, %ecx
29770 .p2align 4
29771@@ -38,7 +39,7 @@ ENTRY(copy_page)
29772 movq 0x8*4(%rsi), %r9
29773 movq 0x8*5(%rsi), %r10
29774 movq 0x8*6(%rsi), %r11
29775- movq 0x8*7(%rsi), %r12
29776+ movq 0x8*7(%rsi), %r13
29777
29778 prefetcht0 5*64(%rsi)
29779
29780@@ -49,7 +50,7 @@ ENTRY(copy_page)
29781 movq %r9, 0x8*4(%rdi)
29782 movq %r10, 0x8*5(%rdi)
29783 movq %r11, 0x8*6(%rdi)
29784- movq %r12, 0x8*7(%rdi)
29785+ movq %r13, 0x8*7(%rdi)
29786
29787 leaq 64 (%rsi), %rsi
29788 leaq 64 (%rdi), %rdi
29789@@ -68,7 +69,7 @@ ENTRY(copy_page)
29790 movq 0x8*4(%rsi), %r9
29791 movq 0x8*5(%rsi), %r10
29792 movq 0x8*6(%rsi), %r11
29793- movq 0x8*7(%rsi), %r12
29794+ movq 0x8*7(%rsi), %r13
29795
29796 movq %rax, 0x8*0(%rdi)
29797 movq %rbx, 0x8*1(%rdi)
29798@@ -77,7 +78,7 @@ ENTRY(copy_page)
29799 movq %r9, 0x8*4(%rdi)
29800 movq %r10, 0x8*5(%rdi)
29801 movq %r11, 0x8*6(%rdi)
29802- movq %r12, 0x8*7(%rdi)
29803+ movq %r13, 0x8*7(%rdi)
29804
29805 leaq 64(%rdi), %rdi
29806 leaq 64(%rsi), %rsi
29807@@ -85,10 +86,11 @@ ENTRY(copy_page)
29808
29809 movq (%rsp), %rbx
29810 CFI_RESTORE rbx
29811- movq 1*8(%rsp), %r12
29812- CFI_RESTORE r12
29813+ movq 1*8(%rsp), %r13
29814+ CFI_RESTORE r13
29815 addq $2*8, %rsp
29816 CFI_ADJUST_CFA_OFFSET -2*8
29817+ pax_force_retaddr
29818 ret
29819 .Lcopy_page_end:
29820 CFI_ENDPROC
29821@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29822
29823 #include <asm/cpufeature.h>
29824
29825- .section .altinstr_replacement,"ax"
29826+ .section .altinstr_replacement,"a"
29827 1: .byte 0xeb /* jmp <disp8> */
29828 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29829 2:
29830diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29831index dee945d..a84067b 100644
29832--- a/arch/x86/lib/copy_user_64.S
29833+++ b/arch/x86/lib/copy_user_64.S
29834@@ -18,31 +18,7 @@
29835 #include <asm/alternative-asm.h>
29836 #include <asm/asm.h>
29837 #include <asm/smap.h>
29838-
29839-/*
29840- * By placing feature2 after feature1 in altinstructions section, we logically
29841- * implement:
29842- * If CPU has feature2, jmp to alt2 is used
29843- * else if CPU has feature1, jmp to alt1 is used
29844- * else jmp to orig is used.
29845- */
29846- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29847-0:
29848- .byte 0xe9 /* 32bit jump */
29849- .long \orig-1f /* by default jump to orig */
29850-1:
29851- .section .altinstr_replacement,"ax"
29852-2: .byte 0xe9 /* near jump with 32bit immediate */
29853- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29854-3: .byte 0xe9 /* near jump with 32bit immediate */
29855- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29856- .previous
29857-
29858- .section .altinstructions,"a"
29859- altinstruction_entry 0b,2b,\feature1,5,5
29860- altinstruction_entry 0b,3b,\feature2,5,5
29861- .previous
29862- .endm
29863+#include <asm/pgtable.h>
29864
29865 .macro ALIGN_DESTINATION
29866 #ifdef FIX_ALIGNMENT
29867@@ -70,52 +46,6 @@
29868 #endif
29869 .endm
29870
29871-/* Standard copy_to_user with segment limit checking */
29872-ENTRY(_copy_to_user)
29873- CFI_STARTPROC
29874- GET_THREAD_INFO(%rax)
29875- movq %rdi,%rcx
29876- addq %rdx,%rcx
29877- jc bad_to_user
29878- cmpq TI_addr_limit(%rax),%rcx
29879- ja bad_to_user
29880- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29881- copy_user_generic_unrolled,copy_user_generic_string, \
29882- copy_user_enhanced_fast_string
29883- CFI_ENDPROC
29884-ENDPROC(_copy_to_user)
29885-
29886-/* Standard copy_from_user with segment limit checking */
29887-ENTRY(_copy_from_user)
29888- CFI_STARTPROC
29889- GET_THREAD_INFO(%rax)
29890- movq %rsi,%rcx
29891- addq %rdx,%rcx
29892- jc bad_from_user
29893- cmpq TI_addr_limit(%rax),%rcx
29894- ja bad_from_user
29895- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29896- copy_user_generic_unrolled,copy_user_generic_string, \
29897- copy_user_enhanced_fast_string
29898- CFI_ENDPROC
29899-ENDPROC(_copy_from_user)
29900-
29901- .section .fixup,"ax"
29902- /* must zero dest */
29903-ENTRY(bad_from_user)
29904-bad_from_user:
29905- CFI_STARTPROC
29906- movl %edx,%ecx
29907- xorl %eax,%eax
29908- rep
29909- stosb
29910-bad_to_user:
29911- movl %edx,%eax
29912- ret
29913- CFI_ENDPROC
29914-ENDPROC(bad_from_user)
29915- .previous
29916-
29917 /*
29918 * copy_user_generic_unrolled - memory copy with exception handling.
29919 * This version is for CPUs like P4 that don't have efficient micro
29920@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29921 */
29922 ENTRY(copy_user_generic_unrolled)
29923 CFI_STARTPROC
29924+ ASM_PAX_OPEN_USERLAND
29925 ASM_STAC
29926 cmpl $8,%edx
29927 jb 20f /* less then 8 bytes, go to byte copy loop */
29928@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29929 jnz 21b
29930 23: xor %eax,%eax
29931 ASM_CLAC
29932+ ASM_PAX_CLOSE_USERLAND
29933+ pax_force_retaddr
29934 ret
29935
29936 .section .fixup,"ax"
29937@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29938 */
29939 ENTRY(copy_user_generic_string)
29940 CFI_STARTPROC
29941+ ASM_PAX_OPEN_USERLAND
29942 ASM_STAC
29943 cmpl $8,%edx
29944 jb 2f /* less than 8 bytes, go to byte copy loop */
29945@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29946 movsb
29947 xorl %eax,%eax
29948 ASM_CLAC
29949+ ASM_PAX_CLOSE_USERLAND
29950+ pax_force_retaddr
29951 ret
29952
29953 .section .fixup,"ax"
29954@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29955 */
29956 ENTRY(copy_user_enhanced_fast_string)
29957 CFI_STARTPROC
29958+ ASM_PAX_OPEN_USERLAND
29959 ASM_STAC
29960 movl %edx,%ecx
29961 1: rep
29962 movsb
29963 xorl %eax,%eax
29964 ASM_CLAC
29965+ ASM_PAX_CLOSE_USERLAND
29966+ pax_force_retaddr
29967 ret
29968
29969 .section .fixup,"ax"
29970diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29971index 6a4f43c..c70fb52 100644
29972--- a/arch/x86/lib/copy_user_nocache_64.S
29973+++ b/arch/x86/lib/copy_user_nocache_64.S
29974@@ -8,6 +8,7 @@
29975
29976 #include <linux/linkage.h>
29977 #include <asm/dwarf2.h>
29978+#include <asm/alternative-asm.h>
29979
29980 #define FIX_ALIGNMENT 1
29981
29982@@ -16,6 +17,7 @@
29983 #include <asm/thread_info.h>
29984 #include <asm/asm.h>
29985 #include <asm/smap.h>
29986+#include <asm/pgtable.h>
29987
29988 .macro ALIGN_DESTINATION
29989 #ifdef FIX_ALIGNMENT
29990@@ -49,6 +51,16 @@
29991 */
29992 ENTRY(__copy_user_nocache)
29993 CFI_STARTPROC
29994+
29995+#ifdef CONFIG_PAX_MEMORY_UDEREF
29996+ mov pax_user_shadow_base,%rcx
29997+ cmp %rcx,%rsi
29998+ jae 1f
29999+ add %rcx,%rsi
30000+1:
30001+#endif
30002+
30003+ ASM_PAX_OPEN_USERLAND
30004 ASM_STAC
30005 cmpl $8,%edx
30006 jb 20f /* less then 8 bytes, go to byte copy loop */
30007@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30008 jnz 21b
30009 23: xorl %eax,%eax
30010 ASM_CLAC
30011+ ASM_PAX_CLOSE_USERLAND
30012 sfence
30013+ pax_force_retaddr
30014 ret
30015
30016 .section .fixup,"ax"
30017diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30018index 2419d5f..fe52d0e 100644
30019--- a/arch/x86/lib/csum-copy_64.S
30020+++ b/arch/x86/lib/csum-copy_64.S
30021@@ -9,6 +9,7 @@
30022 #include <asm/dwarf2.h>
30023 #include <asm/errno.h>
30024 #include <asm/asm.h>
30025+#include <asm/alternative-asm.h>
30026
30027 /*
30028 * Checksum copy with exception handling.
30029@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30030 CFI_ADJUST_CFA_OFFSET 7*8
30031 movq %rbx, 2*8(%rsp)
30032 CFI_REL_OFFSET rbx, 2*8
30033- movq %r12, 3*8(%rsp)
30034- CFI_REL_OFFSET r12, 3*8
30035+ movq %r15, 3*8(%rsp)
30036+ CFI_REL_OFFSET r15, 3*8
30037 movq %r14, 4*8(%rsp)
30038 CFI_REL_OFFSET r14, 4*8
30039 movq %r13, 5*8(%rsp)
30040@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30041 movl %edx, %ecx
30042
30043 xorl %r9d, %r9d
30044- movq %rcx, %r12
30045+ movq %rcx, %r15
30046
30047- shrq $6, %r12
30048+ shrq $6, %r15
30049 jz .Lhandle_tail /* < 64 */
30050
30051 clc
30052
30053 /* main loop. clear in 64 byte blocks */
30054 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30055- /* r11: temp3, rdx: temp4, r12 loopcnt */
30056+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30057 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30058 .p2align 4
30059 .Lloop:
30060@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30061 adcq %r14, %rax
30062 adcq %r13, %rax
30063
30064- decl %r12d
30065+ decl %r15d
30066
30067 dest
30068 movq %rbx, (%rsi)
30069@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30070 .Lende:
30071 movq 2*8(%rsp), %rbx
30072 CFI_RESTORE rbx
30073- movq 3*8(%rsp), %r12
30074- CFI_RESTORE r12
30075+ movq 3*8(%rsp), %r15
30076+ CFI_RESTORE r15
30077 movq 4*8(%rsp), %r14
30078 CFI_RESTORE r14
30079 movq 5*8(%rsp), %r13
30080@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30081 CFI_RESTORE rbp
30082 addq $7*8, %rsp
30083 CFI_ADJUST_CFA_OFFSET -7*8
30084+ pax_force_retaddr
30085 ret
30086 CFI_RESTORE_STATE
30087
30088diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30089index 1318f75..44c30fd 100644
30090--- a/arch/x86/lib/csum-wrappers_64.c
30091+++ b/arch/x86/lib/csum-wrappers_64.c
30092@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30093 len -= 2;
30094 }
30095 }
30096+ pax_open_userland();
30097 stac();
30098- isum = csum_partial_copy_generic((__force const void *)src,
30099+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30100 dst, len, isum, errp, NULL);
30101 clac();
30102+ pax_close_userland();
30103 if (unlikely(*errp))
30104 goto out_err;
30105
30106@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30107 }
30108
30109 *errp = 0;
30110+ pax_open_userland();
30111 stac();
30112- ret = csum_partial_copy_generic(src, (void __force *)dst,
30113+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30114 len, isum, NULL, errp);
30115 clac();
30116+ pax_close_userland();
30117 return ret;
30118 }
30119 EXPORT_SYMBOL(csum_partial_copy_to_user);
30120diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30121index a451235..1daa956 100644
30122--- a/arch/x86/lib/getuser.S
30123+++ b/arch/x86/lib/getuser.S
30124@@ -33,17 +33,40 @@
30125 #include <asm/thread_info.h>
30126 #include <asm/asm.h>
30127 #include <asm/smap.h>
30128+#include <asm/segment.h>
30129+#include <asm/pgtable.h>
30130+#include <asm/alternative-asm.h>
30131+
30132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30133+#define __copyuser_seg gs;
30134+#else
30135+#define __copyuser_seg
30136+#endif
30137
30138 .text
30139 ENTRY(__get_user_1)
30140 CFI_STARTPROC
30141+
30142+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30143 GET_THREAD_INFO(%_ASM_DX)
30144 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30145 jae bad_get_user
30146 ASM_STAC
30147-1: movzbl (%_ASM_AX),%edx
30148+
30149+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30150+ mov pax_user_shadow_base,%_ASM_DX
30151+ cmp %_ASM_DX,%_ASM_AX
30152+ jae 1234f
30153+ add %_ASM_DX,%_ASM_AX
30154+1234:
30155+#endif
30156+
30157+#endif
30158+
30159+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30160 xor %eax,%eax
30161 ASM_CLAC
30162+ pax_force_retaddr
30163 ret
30164 CFI_ENDPROC
30165 ENDPROC(__get_user_1)
30166@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30167 ENTRY(__get_user_2)
30168 CFI_STARTPROC
30169 add $1,%_ASM_AX
30170+
30171+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30172 jc bad_get_user
30173 GET_THREAD_INFO(%_ASM_DX)
30174 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30175 jae bad_get_user
30176 ASM_STAC
30177-2: movzwl -1(%_ASM_AX),%edx
30178+
30179+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30180+ mov pax_user_shadow_base,%_ASM_DX
30181+ cmp %_ASM_DX,%_ASM_AX
30182+ jae 1234f
30183+ add %_ASM_DX,%_ASM_AX
30184+1234:
30185+#endif
30186+
30187+#endif
30188+
30189+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30190 xor %eax,%eax
30191 ASM_CLAC
30192+ pax_force_retaddr
30193 ret
30194 CFI_ENDPROC
30195 ENDPROC(__get_user_2)
30196@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30197 ENTRY(__get_user_4)
30198 CFI_STARTPROC
30199 add $3,%_ASM_AX
30200+
30201+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30202 jc bad_get_user
30203 GET_THREAD_INFO(%_ASM_DX)
30204 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30205 jae bad_get_user
30206 ASM_STAC
30207-3: movl -3(%_ASM_AX),%edx
30208+
30209+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30210+ mov pax_user_shadow_base,%_ASM_DX
30211+ cmp %_ASM_DX,%_ASM_AX
30212+ jae 1234f
30213+ add %_ASM_DX,%_ASM_AX
30214+1234:
30215+#endif
30216+
30217+#endif
30218+
30219+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30220 xor %eax,%eax
30221 ASM_CLAC
30222+ pax_force_retaddr
30223 ret
30224 CFI_ENDPROC
30225 ENDPROC(__get_user_4)
30226@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30227 GET_THREAD_INFO(%_ASM_DX)
30228 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30229 jae bad_get_user
30230+
30231+#ifdef CONFIG_PAX_MEMORY_UDEREF
30232+ mov pax_user_shadow_base,%_ASM_DX
30233+ cmp %_ASM_DX,%_ASM_AX
30234+ jae 1234f
30235+ add %_ASM_DX,%_ASM_AX
30236+1234:
30237+#endif
30238+
30239 ASM_STAC
30240 4: movq -7(%_ASM_AX),%rdx
30241 xor %eax,%eax
30242 ASM_CLAC
30243+ pax_force_retaddr
30244 ret
30245 #else
30246 add $7,%_ASM_AX
30247@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30248 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30249 jae bad_get_user_8
30250 ASM_STAC
30251-4: movl -7(%_ASM_AX),%edx
30252-5: movl -3(%_ASM_AX),%ecx
30253+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30254+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30255 xor %eax,%eax
30256 ASM_CLAC
30257+ pax_force_retaddr
30258 ret
30259 #endif
30260 CFI_ENDPROC
30261@@ -113,6 +175,7 @@ bad_get_user:
30262 xor %edx,%edx
30263 mov $(-EFAULT),%_ASM_AX
30264 ASM_CLAC
30265+ pax_force_retaddr
30266 ret
30267 CFI_ENDPROC
30268 END(bad_get_user)
30269@@ -124,6 +187,7 @@ bad_get_user_8:
30270 xor %ecx,%ecx
30271 mov $(-EFAULT),%_ASM_AX
30272 ASM_CLAC
30273+ pax_force_retaddr
30274 ret
30275 CFI_ENDPROC
30276 END(bad_get_user_8)
30277diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30278index 1313ae6..84f25ea 100644
30279--- a/arch/x86/lib/insn.c
30280+++ b/arch/x86/lib/insn.c
30281@@ -20,8 +20,10 @@
30282
30283 #ifdef __KERNEL__
30284 #include <linux/string.h>
30285+#include <asm/pgtable_types.h>
30286 #else
30287 #include <string.h>
30288+#define ktla_ktva(addr) addr
30289 #endif
30290 #include <asm/inat.h>
30291 #include <asm/insn.h>
30292@@ -53,9 +55,9 @@
30293 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30294 {
30295 memset(insn, 0, sizeof(*insn));
30296- insn->kaddr = kaddr;
30297- insn->end_kaddr = kaddr + buf_len;
30298- insn->next_byte = kaddr;
30299+ insn->kaddr = ktla_ktva(kaddr);
30300+ insn->end_kaddr = insn->kaddr + buf_len;
30301+ insn->next_byte = insn->kaddr;
30302 insn->x86_64 = x86_64 ? 1 : 0;
30303 insn->opnd_bytes = 4;
30304 if (x86_64)
30305diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30306index 05a95e7..326f2fa 100644
30307--- a/arch/x86/lib/iomap_copy_64.S
30308+++ b/arch/x86/lib/iomap_copy_64.S
30309@@ -17,6 +17,7 @@
30310
30311 #include <linux/linkage.h>
30312 #include <asm/dwarf2.h>
30313+#include <asm/alternative-asm.h>
30314
30315 /*
30316 * override generic version in lib/iomap_copy.c
30317@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30318 CFI_STARTPROC
30319 movl %edx,%ecx
30320 rep movsd
30321+ pax_force_retaddr
30322 ret
30323 CFI_ENDPROC
30324 ENDPROC(__iowrite32_copy)
30325diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30326index 56313a3..0db417e 100644
30327--- a/arch/x86/lib/memcpy_64.S
30328+++ b/arch/x86/lib/memcpy_64.S
30329@@ -24,7 +24,7 @@
30330 * This gets patched over the unrolled variant (below) via the
30331 * alternative instructions framework:
30332 */
30333- .section .altinstr_replacement, "ax", @progbits
30334+ .section .altinstr_replacement, "a", @progbits
30335 .Lmemcpy_c:
30336 movq %rdi, %rax
30337 movq %rdx, %rcx
30338@@ -33,6 +33,7 @@
30339 rep movsq
30340 movl %edx, %ecx
30341 rep movsb
30342+ pax_force_retaddr
30343 ret
30344 .Lmemcpy_e:
30345 .previous
30346@@ -44,11 +45,12 @@
30347 * This gets patched over the unrolled variant (below) via the
30348 * alternative instructions framework:
30349 */
30350- .section .altinstr_replacement, "ax", @progbits
30351+ .section .altinstr_replacement, "a", @progbits
30352 .Lmemcpy_c_e:
30353 movq %rdi, %rax
30354 movq %rdx, %rcx
30355 rep movsb
30356+ pax_force_retaddr
30357 ret
30358 .Lmemcpy_e_e:
30359 .previous
30360@@ -136,6 +138,7 @@ ENTRY(memcpy)
30361 movq %r9, 1*8(%rdi)
30362 movq %r10, -2*8(%rdi, %rdx)
30363 movq %r11, -1*8(%rdi, %rdx)
30364+ pax_force_retaddr
30365 retq
30366 .p2align 4
30367 .Lless_16bytes:
30368@@ -148,6 +151,7 @@ ENTRY(memcpy)
30369 movq -1*8(%rsi, %rdx), %r9
30370 movq %r8, 0*8(%rdi)
30371 movq %r9, -1*8(%rdi, %rdx)
30372+ pax_force_retaddr
30373 retq
30374 .p2align 4
30375 .Lless_8bytes:
30376@@ -161,6 +165,7 @@ ENTRY(memcpy)
30377 movl -4(%rsi, %rdx), %r8d
30378 movl %ecx, (%rdi)
30379 movl %r8d, -4(%rdi, %rdx)
30380+ pax_force_retaddr
30381 retq
30382 .p2align 4
30383 .Lless_3bytes:
30384@@ -179,6 +184,7 @@ ENTRY(memcpy)
30385 movb %cl, (%rdi)
30386
30387 .Lend:
30388+ pax_force_retaddr
30389 retq
30390 CFI_ENDPROC
30391 ENDPROC(memcpy)
30392diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30393index 65268a6..dd1de11 100644
30394--- a/arch/x86/lib/memmove_64.S
30395+++ b/arch/x86/lib/memmove_64.S
30396@@ -202,14 +202,16 @@ ENTRY(memmove)
30397 movb (%rsi), %r11b
30398 movb %r11b, (%rdi)
30399 13:
30400+ pax_force_retaddr
30401 retq
30402 CFI_ENDPROC
30403
30404- .section .altinstr_replacement,"ax"
30405+ .section .altinstr_replacement,"a"
30406 .Lmemmove_begin_forward_efs:
30407 /* Forward moving data. */
30408 movq %rdx, %rcx
30409 rep movsb
30410+ pax_force_retaddr
30411 retq
30412 .Lmemmove_end_forward_efs:
30413 .previous
30414diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30415index 2dcb380..2eb79fe 100644
30416--- a/arch/x86/lib/memset_64.S
30417+++ b/arch/x86/lib/memset_64.S
30418@@ -16,7 +16,7 @@
30419 *
30420 * rax original destination
30421 */
30422- .section .altinstr_replacement, "ax", @progbits
30423+ .section .altinstr_replacement, "a", @progbits
30424 .Lmemset_c:
30425 movq %rdi,%r9
30426 movq %rdx,%rcx
30427@@ -30,6 +30,7 @@
30428 movl %edx,%ecx
30429 rep stosb
30430 movq %r9,%rax
30431+ pax_force_retaddr
30432 ret
30433 .Lmemset_e:
30434 .previous
30435@@ -45,13 +46,14 @@
30436 *
30437 * rax original destination
30438 */
30439- .section .altinstr_replacement, "ax", @progbits
30440+ .section .altinstr_replacement, "a", @progbits
30441 .Lmemset_c_e:
30442 movq %rdi,%r9
30443 movb %sil,%al
30444 movq %rdx,%rcx
30445 rep stosb
30446 movq %r9,%rax
30447+ pax_force_retaddr
30448 ret
30449 .Lmemset_e_e:
30450 .previous
30451@@ -118,6 +120,7 @@ ENTRY(__memset)
30452
30453 .Lende:
30454 movq %r10,%rax
30455+ pax_force_retaddr
30456 ret
30457
30458 CFI_RESTORE_STATE
30459diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30460index c9f2d9b..e7fd2c0 100644
30461--- a/arch/x86/lib/mmx_32.c
30462+++ b/arch/x86/lib/mmx_32.c
30463@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30464 {
30465 void *p;
30466 int i;
30467+ unsigned long cr0;
30468
30469 if (unlikely(in_interrupt()))
30470 return __memcpy(to, from, len);
30471@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30472 kernel_fpu_begin();
30473
30474 __asm__ __volatile__ (
30475- "1: prefetch (%0)\n" /* This set is 28 bytes */
30476- " prefetch 64(%0)\n"
30477- " prefetch 128(%0)\n"
30478- " prefetch 192(%0)\n"
30479- " prefetch 256(%0)\n"
30480+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30481+ " prefetch 64(%1)\n"
30482+ " prefetch 128(%1)\n"
30483+ " prefetch 192(%1)\n"
30484+ " prefetch 256(%1)\n"
30485 "2: \n"
30486 ".section .fixup, \"ax\"\n"
30487- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30488+ "3: \n"
30489+
30490+#ifdef CONFIG_PAX_KERNEXEC
30491+ " movl %%cr0, %0\n"
30492+ " movl %0, %%eax\n"
30493+ " andl $0xFFFEFFFF, %%eax\n"
30494+ " movl %%eax, %%cr0\n"
30495+#endif
30496+
30497+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30498+
30499+#ifdef CONFIG_PAX_KERNEXEC
30500+ " movl %0, %%cr0\n"
30501+#endif
30502+
30503 " jmp 2b\n"
30504 ".previous\n"
30505 _ASM_EXTABLE(1b, 3b)
30506- : : "r" (from));
30507+ : "=&r" (cr0) : "r" (from) : "ax");
30508
30509 for ( ; i > 5; i--) {
30510 __asm__ __volatile__ (
30511- "1: prefetch 320(%0)\n"
30512- "2: movq (%0), %%mm0\n"
30513- " movq 8(%0), %%mm1\n"
30514- " movq 16(%0), %%mm2\n"
30515- " movq 24(%0), %%mm3\n"
30516- " movq %%mm0, (%1)\n"
30517- " movq %%mm1, 8(%1)\n"
30518- " movq %%mm2, 16(%1)\n"
30519- " movq %%mm3, 24(%1)\n"
30520- " movq 32(%0), %%mm0\n"
30521- " movq 40(%0), %%mm1\n"
30522- " movq 48(%0), %%mm2\n"
30523- " movq 56(%0), %%mm3\n"
30524- " movq %%mm0, 32(%1)\n"
30525- " movq %%mm1, 40(%1)\n"
30526- " movq %%mm2, 48(%1)\n"
30527- " movq %%mm3, 56(%1)\n"
30528+ "1: prefetch 320(%1)\n"
30529+ "2: movq (%1), %%mm0\n"
30530+ " movq 8(%1), %%mm1\n"
30531+ " movq 16(%1), %%mm2\n"
30532+ " movq 24(%1), %%mm3\n"
30533+ " movq %%mm0, (%2)\n"
30534+ " movq %%mm1, 8(%2)\n"
30535+ " movq %%mm2, 16(%2)\n"
30536+ " movq %%mm3, 24(%2)\n"
30537+ " movq 32(%1), %%mm0\n"
30538+ " movq 40(%1), %%mm1\n"
30539+ " movq 48(%1), %%mm2\n"
30540+ " movq 56(%1), %%mm3\n"
30541+ " movq %%mm0, 32(%2)\n"
30542+ " movq %%mm1, 40(%2)\n"
30543+ " movq %%mm2, 48(%2)\n"
30544+ " movq %%mm3, 56(%2)\n"
30545 ".section .fixup, \"ax\"\n"
30546- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30547+ "3:\n"
30548+
30549+#ifdef CONFIG_PAX_KERNEXEC
30550+ " movl %%cr0, %0\n"
30551+ " movl %0, %%eax\n"
30552+ " andl $0xFFFEFFFF, %%eax\n"
30553+ " movl %%eax, %%cr0\n"
30554+#endif
30555+
30556+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30557+
30558+#ifdef CONFIG_PAX_KERNEXEC
30559+ " movl %0, %%cr0\n"
30560+#endif
30561+
30562 " jmp 2b\n"
30563 ".previous\n"
30564 _ASM_EXTABLE(1b, 3b)
30565- : : "r" (from), "r" (to) : "memory");
30566+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30567
30568 from += 64;
30569 to += 64;
30570@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30571 static void fast_copy_page(void *to, void *from)
30572 {
30573 int i;
30574+ unsigned long cr0;
30575
30576 kernel_fpu_begin();
30577
30578@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30579 * but that is for later. -AV
30580 */
30581 __asm__ __volatile__(
30582- "1: prefetch (%0)\n"
30583- " prefetch 64(%0)\n"
30584- " prefetch 128(%0)\n"
30585- " prefetch 192(%0)\n"
30586- " prefetch 256(%0)\n"
30587+ "1: prefetch (%1)\n"
30588+ " prefetch 64(%1)\n"
30589+ " prefetch 128(%1)\n"
30590+ " prefetch 192(%1)\n"
30591+ " prefetch 256(%1)\n"
30592 "2: \n"
30593 ".section .fixup, \"ax\"\n"
30594- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30595+ "3: \n"
30596+
30597+#ifdef CONFIG_PAX_KERNEXEC
30598+ " movl %%cr0, %0\n"
30599+ " movl %0, %%eax\n"
30600+ " andl $0xFFFEFFFF, %%eax\n"
30601+ " movl %%eax, %%cr0\n"
30602+#endif
30603+
30604+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30605+
30606+#ifdef CONFIG_PAX_KERNEXEC
30607+ " movl %0, %%cr0\n"
30608+#endif
30609+
30610 " jmp 2b\n"
30611 ".previous\n"
30612- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30613+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30614
30615 for (i = 0; i < (4096-320)/64; i++) {
30616 __asm__ __volatile__ (
30617- "1: prefetch 320(%0)\n"
30618- "2: movq (%0), %%mm0\n"
30619- " movntq %%mm0, (%1)\n"
30620- " movq 8(%0), %%mm1\n"
30621- " movntq %%mm1, 8(%1)\n"
30622- " movq 16(%0), %%mm2\n"
30623- " movntq %%mm2, 16(%1)\n"
30624- " movq 24(%0), %%mm3\n"
30625- " movntq %%mm3, 24(%1)\n"
30626- " movq 32(%0), %%mm4\n"
30627- " movntq %%mm4, 32(%1)\n"
30628- " movq 40(%0), %%mm5\n"
30629- " movntq %%mm5, 40(%1)\n"
30630- " movq 48(%0), %%mm6\n"
30631- " movntq %%mm6, 48(%1)\n"
30632- " movq 56(%0), %%mm7\n"
30633- " movntq %%mm7, 56(%1)\n"
30634+ "1: prefetch 320(%1)\n"
30635+ "2: movq (%1), %%mm0\n"
30636+ " movntq %%mm0, (%2)\n"
30637+ " movq 8(%1), %%mm1\n"
30638+ " movntq %%mm1, 8(%2)\n"
30639+ " movq 16(%1), %%mm2\n"
30640+ " movntq %%mm2, 16(%2)\n"
30641+ " movq 24(%1), %%mm3\n"
30642+ " movntq %%mm3, 24(%2)\n"
30643+ " movq 32(%1), %%mm4\n"
30644+ " movntq %%mm4, 32(%2)\n"
30645+ " movq 40(%1), %%mm5\n"
30646+ " movntq %%mm5, 40(%2)\n"
30647+ " movq 48(%1), %%mm6\n"
30648+ " movntq %%mm6, 48(%2)\n"
30649+ " movq 56(%1), %%mm7\n"
30650+ " movntq %%mm7, 56(%2)\n"
30651 ".section .fixup, \"ax\"\n"
30652- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30653+ "3:\n"
30654+
30655+#ifdef CONFIG_PAX_KERNEXEC
30656+ " movl %%cr0, %0\n"
30657+ " movl %0, %%eax\n"
30658+ " andl $0xFFFEFFFF, %%eax\n"
30659+ " movl %%eax, %%cr0\n"
30660+#endif
30661+
30662+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30663+
30664+#ifdef CONFIG_PAX_KERNEXEC
30665+ " movl %0, %%cr0\n"
30666+#endif
30667+
30668 " jmp 2b\n"
30669 ".previous\n"
30670- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30671+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30672
30673 from += 64;
30674 to += 64;
30675@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30676 static void fast_copy_page(void *to, void *from)
30677 {
30678 int i;
30679+ unsigned long cr0;
30680
30681 kernel_fpu_begin();
30682
30683 __asm__ __volatile__ (
30684- "1: prefetch (%0)\n"
30685- " prefetch 64(%0)\n"
30686- " prefetch 128(%0)\n"
30687- " prefetch 192(%0)\n"
30688- " prefetch 256(%0)\n"
30689+ "1: prefetch (%1)\n"
30690+ " prefetch 64(%1)\n"
30691+ " prefetch 128(%1)\n"
30692+ " prefetch 192(%1)\n"
30693+ " prefetch 256(%1)\n"
30694 "2: \n"
30695 ".section .fixup, \"ax\"\n"
30696- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30697+ "3: \n"
30698+
30699+#ifdef CONFIG_PAX_KERNEXEC
30700+ " movl %%cr0, %0\n"
30701+ " movl %0, %%eax\n"
30702+ " andl $0xFFFEFFFF, %%eax\n"
30703+ " movl %%eax, %%cr0\n"
30704+#endif
30705+
30706+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30707+
30708+#ifdef CONFIG_PAX_KERNEXEC
30709+ " movl %0, %%cr0\n"
30710+#endif
30711+
30712 " jmp 2b\n"
30713 ".previous\n"
30714- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30715+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30716
30717 for (i = 0; i < 4096/64; i++) {
30718 __asm__ __volatile__ (
30719- "1: prefetch 320(%0)\n"
30720- "2: movq (%0), %%mm0\n"
30721- " movq 8(%0), %%mm1\n"
30722- " movq 16(%0), %%mm2\n"
30723- " movq 24(%0), %%mm3\n"
30724- " movq %%mm0, (%1)\n"
30725- " movq %%mm1, 8(%1)\n"
30726- " movq %%mm2, 16(%1)\n"
30727- " movq %%mm3, 24(%1)\n"
30728- " movq 32(%0), %%mm0\n"
30729- " movq 40(%0), %%mm1\n"
30730- " movq 48(%0), %%mm2\n"
30731- " movq 56(%0), %%mm3\n"
30732- " movq %%mm0, 32(%1)\n"
30733- " movq %%mm1, 40(%1)\n"
30734- " movq %%mm2, 48(%1)\n"
30735- " movq %%mm3, 56(%1)\n"
30736+ "1: prefetch 320(%1)\n"
30737+ "2: movq (%1), %%mm0\n"
30738+ " movq 8(%1), %%mm1\n"
30739+ " movq 16(%1), %%mm2\n"
30740+ " movq 24(%1), %%mm3\n"
30741+ " movq %%mm0, (%2)\n"
30742+ " movq %%mm1, 8(%2)\n"
30743+ " movq %%mm2, 16(%2)\n"
30744+ " movq %%mm3, 24(%2)\n"
30745+ " movq 32(%1), %%mm0\n"
30746+ " movq 40(%1), %%mm1\n"
30747+ " movq 48(%1), %%mm2\n"
30748+ " movq 56(%1), %%mm3\n"
30749+ " movq %%mm0, 32(%2)\n"
30750+ " movq %%mm1, 40(%2)\n"
30751+ " movq %%mm2, 48(%2)\n"
30752+ " movq %%mm3, 56(%2)\n"
30753 ".section .fixup, \"ax\"\n"
30754- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30755+ "3:\n"
30756+
30757+#ifdef CONFIG_PAX_KERNEXEC
30758+ " movl %%cr0, %0\n"
30759+ " movl %0, %%eax\n"
30760+ " andl $0xFFFEFFFF, %%eax\n"
30761+ " movl %%eax, %%cr0\n"
30762+#endif
30763+
30764+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30765+
30766+#ifdef CONFIG_PAX_KERNEXEC
30767+ " movl %0, %%cr0\n"
30768+#endif
30769+
30770 " jmp 2b\n"
30771 ".previous\n"
30772 _ASM_EXTABLE(1b, 3b)
30773- : : "r" (from), "r" (to) : "memory");
30774+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30775
30776 from += 64;
30777 to += 64;
30778diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30779index f6d13ee..d789440 100644
30780--- a/arch/x86/lib/msr-reg.S
30781+++ b/arch/x86/lib/msr-reg.S
30782@@ -3,6 +3,7 @@
30783 #include <asm/dwarf2.h>
30784 #include <asm/asm.h>
30785 #include <asm/msr.h>
30786+#include <asm/alternative-asm.h>
30787
30788 #ifdef CONFIG_X86_64
30789 /*
30790@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30791 movl %edi, 28(%r10)
30792 popq_cfi %rbp
30793 popq_cfi %rbx
30794+ pax_force_retaddr
30795 ret
30796 3:
30797 CFI_RESTORE_STATE
30798diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30799index fc6ba17..d4d989d 100644
30800--- a/arch/x86/lib/putuser.S
30801+++ b/arch/x86/lib/putuser.S
30802@@ -16,7 +16,9 @@
30803 #include <asm/errno.h>
30804 #include <asm/asm.h>
30805 #include <asm/smap.h>
30806-
30807+#include <asm/segment.h>
30808+#include <asm/pgtable.h>
30809+#include <asm/alternative-asm.h>
30810
30811 /*
30812 * __put_user_X
30813@@ -30,57 +32,125 @@
30814 * as they get called from within inline assembly.
30815 */
30816
30817-#define ENTER CFI_STARTPROC ; \
30818- GET_THREAD_INFO(%_ASM_BX)
30819-#define EXIT ASM_CLAC ; \
30820- ret ; \
30821+#define ENTER CFI_STARTPROC
30822+#define EXIT ASM_CLAC ; \
30823+ pax_force_retaddr ; \
30824+ ret ; \
30825 CFI_ENDPROC
30826
30827+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30828+#define _DEST %_ASM_CX,%_ASM_BX
30829+#else
30830+#define _DEST %_ASM_CX
30831+#endif
30832+
30833+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30834+#define __copyuser_seg gs;
30835+#else
30836+#define __copyuser_seg
30837+#endif
30838+
30839 .text
30840 ENTRY(__put_user_1)
30841 ENTER
30842+
30843+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30844+ GET_THREAD_INFO(%_ASM_BX)
30845 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30846 jae bad_put_user
30847 ASM_STAC
30848-1: movb %al,(%_ASM_CX)
30849+
30850+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30851+ mov pax_user_shadow_base,%_ASM_BX
30852+ cmp %_ASM_BX,%_ASM_CX
30853+ jb 1234f
30854+ xor %ebx,%ebx
30855+1234:
30856+#endif
30857+
30858+#endif
30859+
30860+1: __copyuser_seg movb %al,(_DEST)
30861 xor %eax,%eax
30862 EXIT
30863 ENDPROC(__put_user_1)
30864
30865 ENTRY(__put_user_2)
30866 ENTER
30867+
30868+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30869+ GET_THREAD_INFO(%_ASM_BX)
30870 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30871 sub $1,%_ASM_BX
30872 cmp %_ASM_BX,%_ASM_CX
30873 jae bad_put_user
30874 ASM_STAC
30875-2: movw %ax,(%_ASM_CX)
30876+
30877+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30878+ mov pax_user_shadow_base,%_ASM_BX
30879+ cmp %_ASM_BX,%_ASM_CX
30880+ jb 1234f
30881+ xor %ebx,%ebx
30882+1234:
30883+#endif
30884+
30885+#endif
30886+
30887+2: __copyuser_seg movw %ax,(_DEST)
30888 xor %eax,%eax
30889 EXIT
30890 ENDPROC(__put_user_2)
30891
30892 ENTRY(__put_user_4)
30893 ENTER
30894+
30895+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30896+ GET_THREAD_INFO(%_ASM_BX)
30897 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30898 sub $3,%_ASM_BX
30899 cmp %_ASM_BX,%_ASM_CX
30900 jae bad_put_user
30901 ASM_STAC
30902-3: movl %eax,(%_ASM_CX)
30903+
30904+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30905+ mov pax_user_shadow_base,%_ASM_BX
30906+ cmp %_ASM_BX,%_ASM_CX
30907+ jb 1234f
30908+ xor %ebx,%ebx
30909+1234:
30910+#endif
30911+
30912+#endif
30913+
30914+3: __copyuser_seg movl %eax,(_DEST)
30915 xor %eax,%eax
30916 EXIT
30917 ENDPROC(__put_user_4)
30918
30919 ENTRY(__put_user_8)
30920 ENTER
30921+
30922+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30923+ GET_THREAD_INFO(%_ASM_BX)
30924 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30925 sub $7,%_ASM_BX
30926 cmp %_ASM_BX,%_ASM_CX
30927 jae bad_put_user
30928 ASM_STAC
30929-4: mov %_ASM_AX,(%_ASM_CX)
30930+
30931+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30932+ mov pax_user_shadow_base,%_ASM_BX
30933+ cmp %_ASM_BX,%_ASM_CX
30934+ jb 1234f
30935+ xor %ebx,%ebx
30936+1234:
30937+#endif
30938+
30939+#endif
30940+
30941+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30942 #ifdef CONFIG_X86_32
30943-5: movl %edx,4(%_ASM_CX)
30944+5: __copyuser_seg movl %edx,4(_DEST)
30945 #endif
30946 xor %eax,%eax
30947 EXIT
30948diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30949index 5dff5f0..cadebf4 100644
30950--- a/arch/x86/lib/rwsem.S
30951+++ b/arch/x86/lib/rwsem.S
30952@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30953 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30954 CFI_RESTORE __ASM_REG(dx)
30955 restore_common_regs
30956+ pax_force_retaddr
30957 ret
30958 CFI_ENDPROC
30959 ENDPROC(call_rwsem_down_read_failed)
30960@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30961 movq %rax,%rdi
30962 call rwsem_down_write_failed
30963 restore_common_regs
30964+ pax_force_retaddr
30965 ret
30966 CFI_ENDPROC
30967 ENDPROC(call_rwsem_down_write_failed)
30968@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30969 movq %rax,%rdi
30970 call rwsem_wake
30971 restore_common_regs
30972-1: ret
30973+1: pax_force_retaddr
30974+ ret
30975 CFI_ENDPROC
30976 ENDPROC(call_rwsem_wake)
30977
30978@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30979 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30980 CFI_RESTORE __ASM_REG(dx)
30981 restore_common_regs
30982+ pax_force_retaddr
30983 ret
30984 CFI_ENDPROC
30985 ENDPROC(call_rwsem_downgrade_wake)
30986diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
30987index b30b5eb..2b57052 100644
30988--- a/arch/x86/lib/thunk_64.S
30989+++ b/arch/x86/lib/thunk_64.S
30990@@ -9,6 +9,7 @@
30991 #include <asm/dwarf2.h>
30992 #include <asm/calling.h>
30993 #include <asm/asm.h>
30994+#include <asm/alternative-asm.h>
30995
30996 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
30997 .macro THUNK name, func, put_ret_addr_in_rdi=0
30998@@ -16,11 +17,11 @@
30999 \name:
31000 CFI_STARTPROC
31001
31002- /* this one pushes 9 elems, the next one would be %rIP */
31003- SAVE_ARGS
31004+ /* this one pushes 15+1 elems, the next one would be %rIP */
31005+ SAVE_ARGS 8
31006
31007 .if \put_ret_addr_in_rdi
31008- movq_cfi_restore 9*8, rdi
31009+ movq_cfi_restore RIP, rdi
31010 .endif
31011
31012 call \func
31013@@ -47,9 +48,10 @@
31014
31015 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31016 CFI_STARTPROC
31017- SAVE_ARGS
31018+ SAVE_ARGS 8
31019 restore:
31020- RESTORE_ARGS
31021+ RESTORE_ARGS 1,8
31022+ pax_force_retaddr
31023 ret
31024 CFI_ENDPROC
31025 _ASM_NOKPROBE(restore)
31026diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31027index e2f5e21..4b22130 100644
31028--- a/arch/x86/lib/usercopy_32.c
31029+++ b/arch/x86/lib/usercopy_32.c
31030@@ -42,11 +42,13 @@ do { \
31031 int __d0; \
31032 might_fault(); \
31033 __asm__ __volatile__( \
31034+ __COPYUSER_SET_ES \
31035 ASM_STAC "\n" \
31036 "0: rep; stosl\n" \
31037 " movl %2,%0\n" \
31038 "1: rep; stosb\n" \
31039 "2: " ASM_CLAC "\n" \
31040+ __COPYUSER_RESTORE_ES \
31041 ".section .fixup,\"ax\"\n" \
31042 "3: lea 0(%2,%0,4),%0\n" \
31043 " jmp 2b\n" \
31044@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31045
31046 #ifdef CONFIG_X86_INTEL_USERCOPY
31047 static unsigned long
31048-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31049+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31050 {
31051 int d0, d1;
31052 __asm__ __volatile__(
31053@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31054 " .align 2,0x90\n"
31055 "3: movl 0(%4), %%eax\n"
31056 "4: movl 4(%4), %%edx\n"
31057- "5: movl %%eax, 0(%3)\n"
31058- "6: movl %%edx, 4(%3)\n"
31059+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31060+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31061 "7: movl 8(%4), %%eax\n"
31062 "8: movl 12(%4),%%edx\n"
31063- "9: movl %%eax, 8(%3)\n"
31064- "10: movl %%edx, 12(%3)\n"
31065+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31066+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31067 "11: movl 16(%4), %%eax\n"
31068 "12: movl 20(%4), %%edx\n"
31069- "13: movl %%eax, 16(%3)\n"
31070- "14: movl %%edx, 20(%3)\n"
31071+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31072+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31073 "15: movl 24(%4), %%eax\n"
31074 "16: movl 28(%4), %%edx\n"
31075- "17: movl %%eax, 24(%3)\n"
31076- "18: movl %%edx, 28(%3)\n"
31077+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31078+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31079 "19: movl 32(%4), %%eax\n"
31080 "20: movl 36(%4), %%edx\n"
31081- "21: movl %%eax, 32(%3)\n"
31082- "22: movl %%edx, 36(%3)\n"
31083+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31084+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31085 "23: movl 40(%4), %%eax\n"
31086 "24: movl 44(%4), %%edx\n"
31087- "25: movl %%eax, 40(%3)\n"
31088- "26: movl %%edx, 44(%3)\n"
31089+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31090+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31091 "27: movl 48(%4), %%eax\n"
31092 "28: movl 52(%4), %%edx\n"
31093- "29: movl %%eax, 48(%3)\n"
31094- "30: movl %%edx, 52(%3)\n"
31095+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31096+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31097 "31: movl 56(%4), %%eax\n"
31098 "32: movl 60(%4), %%edx\n"
31099- "33: movl %%eax, 56(%3)\n"
31100- "34: movl %%edx, 60(%3)\n"
31101+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31102+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31103 " addl $-64, %0\n"
31104 " addl $64, %4\n"
31105 " addl $64, %3\n"
31106@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31107 " shrl $2, %0\n"
31108 " andl $3, %%eax\n"
31109 " cld\n"
31110+ __COPYUSER_SET_ES
31111 "99: rep; movsl\n"
31112 "36: movl %%eax, %0\n"
31113 "37: rep; movsb\n"
31114 "100:\n"
31115+ __COPYUSER_RESTORE_ES
31116+ ".section .fixup,\"ax\"\n"
31117+ "101: lea 0(%%eax,%0,4),%0\n"
31118+ " jmp 100b\n"
31119+ ".previous\n"
31120+ _ASM_EXTABLE(1b,100b)
31121+ _ASM_EXTABLE(2b,100b)
31122+ _ASM_EXTABLE(3b,100b)
31123+ _ASM_EXTABLE(4b,100b)
31124+ _ASM_EXTABLE(5b,100b)
31125+ _ASM_EXTABLE(6b,100b)
31126+ _ASM_EXTABLE(7b,100b)
31127+ _ASM_EXTABLE(8b,100b)
31128+ _ASM_EXTABLE(9b,100b)
31129+ _ASM_EXTABLE(10b,100b)
31130+ _ASM_EXTABLE(11b,100b)
31131+ _ASM_EXTABLE(12b,100b)
31132+ _ASM_EXTABLE(13b,100b)
31133+ _ASM_EXTABLE(14b,100b)
31134+ _ASM_EXTABLE(15b,100b)
31135+ _ASM_EXTABLE(16b,100b)
31136+ _ASM_EXTABLE(17b,100b)
31137+ _ASM_EXTABLE(18b,100b)
31138+ _ASM_EXTABLE(19b,100b)
31139+ _ASM_EXTABLE(20b,100b)
31140+ _ASM_EXTABLE(21b,100b)
31141+ _ASM_EXTABLE(22b,100b)
31142+ _ASM_EXTABLE(23b,100b)
31143+ _ASM_EXTABLE(24b,100b)
31144+ _ASM_EXTABLE(25b,100b)
31145+ _ASM_EXTABLE(26b,100b)
31146+ _ASM_EXTABLE(27b,100b)
31147+ _ASM_EXTABLE(28b,100b)
31148+ _ASM_EXTABLE(29b,100b)
31149+ _ASM_EXTABLE(30b,100b)
31150+ _ASM_EXTABLE(31b,100b)
31151+ _ASM_EXTABLE(32b,100b)
31152+ _ASM_EXTABLE(33b,100b)
31153+ _ASM_EXTABLE(34b,100b)
31154+ _ASM_EXTABLE(35b,100b)
31155+ _ASM_EXTABLE(36b,100b)
31156+ _ASM_EXTABLE(37b,100b)
31157+ _ASM_EXTABLE(99b,101b)
31158+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31159+ : "1"(to), "2"(from), "0"(size)
31160+ : "eax", "edx", "memory");
31161+ return size;
31162+}
31163+
31164+static unsigned long
31165+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31166+{
31167+ int d0, d1;
31168+ __asm__ __volatile__(
31169+ " .align 2,0x90\n"
31170+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31171+ " cmpl $67, %0\n"
31172+ " jbe 3f\n"
31173+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31174+ " .align 2,0x90\n"
31175+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31176+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31177+ "5: movl %%eax, 0(%3)\n"
31178+ "6: movl %%edx, 4(%3)\n"
31179+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31180+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31181+ "9: movl %%eax, 8(%3)\n"
31182+ "10: movl %%edx, 12(%3)\n"
31183+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31184+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31185+ "13: movl %%eax, 16(%3)\n"
31186+ "14: movl %%edx, 20(%3)\n"
31187+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31188+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31189+ "17: movl %%eax, 24(%3)\n"
31190+ "18: movl %%edx, 28(%3)\n"
31191+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31192+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31193+ "21: movl %%eax, 32(%3)\n"
31194+ "22: movl %%edx, 36(%3)\n"
31195+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31196+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31197+ "25: movl %%eax, 40(%3)\n"
31198+ "26: movl %%edx, 44(%3)\n"
31199+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31200+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31201+ "29: movl %%eax, 48(%3)\n"
31202+ "30: movl %%edx, 52(%3)\n"
31203+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31204+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31205+ "33: movl %%eax, 56(%3)\n"
31206+ "34: movl %%edx, 60(%3)\n"
31207+ " addl $-64, %0\n"
31208+ " addl $64, %4\n"
31209+ " addl $64, %3\n"
31210+ " cmpl $63, %0\n"
31211+ " ja 1b\n"
31212+ "35: movl %0, %%eax\n"
31213+ " shrl $2, %0\n"
31214+ " andl $3, %%eax\n"
31215+ " cld\n"
31216+ "99: rep; "__copyuser_seg" movsl\n"
31217+ "36: movl %%eax, %0\n"
31218+ "37: rep; "__copyuser_seg" movsb\n"
31219+ "100:\n"
31220 ".section .fixup,\"ax\"\n"
31221 "101: lea 0(%%eax,%0,4),%0\n"
31222 " jmp 100b\n"
31223@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31224 int d0, d1;
31225 __asm__ __volatile__(
31226 " .align 2,0x90\n"
31227- "0: movl 32(%4), %%eax\n"
31228+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31229 " cmpl $67, %0\n"
31230 " jbe 2f\n"
31231- "1: movl 64(%4), %%eax\n"
31232+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31233 " .align 2,0x90\n"
31234- "2: movl 0(%4), %%eax\n"
31235- "21: movl 4(%4), %%edx\n"
31236+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31237+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31238 " movl %%eax, 0(%3)\n"
31239 " movl %%edx, 4(%3)\n"
31240- "3: movl 8(%4), %%eax\n"
31241- "31: movl 12(%4),%%edx\n"
31242+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31243+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31244 " movl %%eax, 8(%3)\n"
31245 " movl %%edx, 12(%3)\n"
31246- "4: movl 16(%4), %%eax\n"
31247- "41: movl 20(%4), %%edx\n"
31248+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31249+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31250 " movl %%eax, 16(%3)\n"
31251 " movl %%edx, 20(%3)\n"
31252- "10: movl 24(%4), %%eax\n"
31253- "51: movl 28(%4), %%edx\n"
31254+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31255+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31256 " movl %%eax, 24(%3)\n"
31257 " movl %%edx, 28(%3)\n"
31258- "11: movl 32(%4), %%eax\n"
31259- "61: movl 36(%4), %%edx\n"
31260+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31261+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31262 " movl %%eax, 32(%3)\n"
31263 " movl %%edx, 36(%3)\n"
31264- "12: movl 40(%4), %%eax\n"
31265- "71: movl 44(%4), %%edx\n"
31266+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31267+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31268 " movl %%eax, 40(%3)\n"
31269 " movl %%edx, 44(%3)\n"
31270- "13: movl 48(%4), %%eax\n"
31271- "81: movl 52(%4), %%edx\n"
31272+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31273+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31274 " movl %%eax, 48(%3)\n"
31275 " movl %%edx, 52(%3)\n"
31276- "14: movl 56(%4), %%eax\n"
31277- "91: movl 60(%4), %%edx\n"
31278+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31279+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31280 " movl %%eax, 56(%3)\n"
31281 " movl %%edx, 60(%3)\n"
31282 " addl $-64, %0\n"
31283@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31284 " shrl $2, %0\n"
31285 " andl $3, %%eax\n"
31286 " cld\n"
31287- "6: rep; movsl\n"
31288+ "6: rep; "__copyuser_seg" movsl\n"
31289 " movl %%eax,%0\n"
31290- "7: rep; movsb\n"
31291+ "7: rep; "__copyuser_seg" movsb\n"
31292 "8:\n"
31293 ".section .fixup,\"ax\"\n"
31294 "9: lea 0(%%eax,%0,4),%0\n"
31295@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31296
31297 __asm__ __volatile__(
31298 " .align 2,0x90\n"
31299- "0: movl 32(%4), %%eax\n"
31300+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31301 " cmpl $67, %0\n"
31302 " jbe 2f\n"
31303- "1: movl 64(%4), %%eax\n"
31304+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31305 " .align 2,0x90\n"
31306- "2: movl 0(%4), %%eax\n"
31307- "21: movl 4(%4), %%edx\n"
31308+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31309+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31310 " movnti %%eax, 0(%3)\n"
31311 " movnti %%edx, 4(%3)\n"
31312- "3: movl 8(%4), %%eax\n"
31313- "31: movl 12(%4),%%edx\n"
31314+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31315+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31316 " movnti %%eax, 8(%3)\n"
31317 " movnti %%edx, 12(%3)\n"
31318- "4: movl 16(%4), %%eax\n"
31319- "41: movl 20(%4), %%edx\n"
31320+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31321+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31322 " movnti %%eax, 16(%3)\n"
31323 " movnti %%edx, 20(%3)\n"
31324- "10: movl 24(%4), %%eax\n"
31325- "51: movl 28(%4), %%edx\n"
31326+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31327+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31328 " movnti %%eax, 24(%3)\n"
31329 " movnti %%edx, 28(%3)\n"
31330- "11: movl 32(%4), %%eax\n"
31331- "61: movl 36(%4), %%edx\n"
31332+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31333+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31334 " movnti %%eax, 32(%3)\n"
31335 " movnti %%edx, 36(%3)\n"
31336- "12: movl 40(%4), %%eax\n"
31337- "71: movl 44(%4), %%edx\n"
31338+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31339+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31340 " movnti %%eax, 40(%3)\n"
31341 " movnti %%edx, 44(%3)\n"
31342- "13: movl 48(%4), %%eax\n"
31343- "81: movl 52(%4), %%edx\n"
31344+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31345+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31346 " movnti %%eax, 48(%3)\n"
31347 " movnti %%edx, 52(%3)\n"
31348- "14: movl 56(%4), %%eax\n"
31349- "91: movl 60(%4), %%edx\n"
31350+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31351+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31352 " movnti %%eax, 56(%3)\n"
31353 " movnti %%edx, 60(%3)\n"
31354 " addl $-64, %0\n"
31355@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31356 " shrl $2, %0\n"
31357 " andl $3, %%eax\n"
31358 " cld\n"
31359- "6: rep; movsl\n"
31360+ "6: rep; "__copyuser_seg" movsl\n"
31361 " movl %%eax,%0\n"
31362- "7: rep; movsb\n"
31363+ "7: rep; "__copyuser_seg" movsb\n"
31364 "8:\n"
31365 ".section .fixup,\"ax\"\n"
31366 "9: lea 0(%%eax,%0,4),%0\n"
31367@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31368
31369 __asm__ __volatile__(
31370 " .align 2,0x90\n"
31371- "0: movl 32(%4), %%eax\n"
31372+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31373 " cmpl $67, %0\n"
31374 " jbe 2f\n"
31375- "1: movl 64(%4), %%eax\n"
31376+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31377 " .align 2,0x90\n"
31378- "2: movl 0(%4), %%eax\n"
31379- "21: movl 4(%4), %%edx\n"
31380+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31381+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31382 " movnti %%eax, 0(%3)\n"
31383 " movnti %%edx, 4(%3)\n"
31384- "3: movl 8(%4), %%eax\n"
31385- "31: movl 12(%4),%%edx\n"
31386+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31387+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31388 " movnti %%eax, 8(%3)\n"
31389 " movnti %%edx, 12(%3)\n"
31390- "4: movl 16(%4), %%eax\n"
31391- "41: movl 20(%4), %%edx\n"
31392+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31393+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31394 " movnti %%eax, 16(%3)\n"
31395 " movnti %%edx, 20(%3)\n"
31396- "10: movl 24(%4), %%eax\n"
31397- "51: movl 28(%4), %%edx\n"
31398+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31399+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31400 " movnti %%eax, 24(%3)\n"
31401 " movnti %%edx, 28(%3)\n"
31402- "11: movl 32(%4), %%eax\n"
31403- "61: movl 36(%4), %%edx\n"
31404+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31405+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31406 " movnti %%eax, 32(%3)\n"
31407 " movnti %%edx, 36(%3)\n"
31408- "12: movl 40(%4), %%eax\n"
31409- "71: movl 44(%4), %%edx\n"
31410+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31411+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31412 " movnti %%eax, 40(%3)\n"
31413 " movnti %%edx, 44(%3)\n"
31414- "13: movl 48(%4), %%eax\n"
31415- "81: movl 52(%4), %%edx\n"
31416+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31417+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31418 " movnti %%eax, 48(%3)\n"
31419 " movnti %%edx, 52(%3)\n"
31420- "14: movl 56(%4), %%eax\n"
31421- "91: movl 60(%4), %%edx\n"
31422+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31423+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31424 " movnti %%eax, 56(%3)\n"
31425 " movnti %%edx, 60(%3)\n"
31426 " addl $-64, %0\n"
31427@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31428 " shrl $2, %0\n"
31429 " andl $3, %%eax\n"
31430 " cld\n"
31431- "6: rep; movsl\n"
31432+ "6: rep; "__copyuser_seg" movsl\n"
31433 " movl %%eax,%0\n"
31434- "7: rep; movsb\n"
31435+ "7: rep; "__copyuser_seg" movsb\n"
31436 "8:\n"
31437 ".section .fixup,\"ax\"\n"
31438 "9: lea 0(%%eax,%0,4),%0\n"
31439@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31440 */
31441 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31442 unsigned long size);
31443-unsigned long __copy_user_intel(void __user *to, const void *from,
31444+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31445+ unsigned long size);
31446+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31447 unsigned long size);
31448 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31449 const void __user *from, unsigned long size);
31450 #endif /* CONFIG_X86_INTEL_USERCOPY */
31451
31452 /* Generic arbitrary sized copy. */
31453-#define __copy_user(to, from, size) \
31454+#define __copy_user(to, from, size, prefix, set, restore) \
31455 do { \
31456 int __d0, __d1, __d2; \
31457 __asm__ __volatile__( \
31458+ set \
31459 " cmp $7,%0\n" \
31460 " jbe 1f\n" \
31461 " movl %1,%0\n" \
31462 " negl %0\n" \
31463 " andl $7,%0\n" \
31464 " subl %0,%3\n" \
31465- "4: rep; movsb\n" \
31466+ "4: rep; "prefix"movsb\n" \
31467 " movl %3,%0\n" \
31468 " shrl $2,%0\n" \
31469 " andl $3,%3\n" \
31470 " .align 2,0x90\n" \
31471- "0: rep; movsl\n" \
31472+ "0: rep; "prefix"movsl\n" \
31473 " movl %3,%0\n" \
31474- "1: rep; movsb\n" \
31475+ "1: rep; "prefix"movsb\n" \
31476 "2:\n" \
31477+ restore \
31478 ".section .fixup,\"ax\"\n" \
31479 "5: addl %3,%0\n" \
31480 " jmp 2b\n" \
31481@@ -538,14 +650,14 @@ do { \
31482 " negl %0\n" \
31483 " andl $7,%0\n" \
31484 " subl %0,%3\n" \
31485- "4: rep; movsb\n" \
31486+ "4: rep; "__copyuser_seg"movsb\n" \
31487 " movl %3,%0\n" \
31488 " shrl $2,%0\n" \
31489 " andl $3,%3\n" \
31490 " .align 2,0x90\n" \
31491- "0: rep; movsl\n" \
31492+ "0: rep; "__copyuser_seg"movsl\n" \
31493 " movl %3,%0\n" \
31494- "1: rep; movsb\n" \
31495+ "1: rep; "__copyuser_seg"movsb\n" \
31496 "2:\n" \
31497 ".section .fixup,\"ax\"\n" \
31498 "5: addl %3,%0\n" \
31499@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31500 {
31501 stac();
31502 if (movsl_is_ok(to, from, n))
31503- __copy_user(to, from, n);
31504+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31505 else
31506- n = __copy_user_intel(to, from, n);
31507+ n = __generic_copy_to_user_intel(to, from, n);
31508 clac();
31509 return n;
31510 }
31511@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31512 {
31513 stac();
31514 if (movsl_is_ok(to, from, n))
31515- __copy_user(to, from, n);
31516+ __copy_user(to, from, n, __copyuser_seg, "", "");
31517 else
31518- n = __copy_user_intel((void __user *)to,
31519- (const void *)from, n);
31520+ n = __generic_copy_from_user_intel(to, from, n);
31521 clac();
31522 return n;
31523 }
31524@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31525 if (n > 64 && cpu_has_xmm2)
31526 n = __copy_user_intel_nocache(to, from, n);
31527 else
31528- __copy_user(to, from, n);
31529+ __copy_user(to, from, n, __copyuser_seg, "", "");
31530 #else
31531- __copy_user(to, from, n);
31532+ __copy_user(to, from, n, __copyuser_seg, "", "");
31533 #endif
31534 clac();
31535 return n;
31536 }
31537 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31538
31539-/**
31540- * copy_to_user: - Copy a block of data into user space.
31541- * @to: Destination address, in user space.
31542- * @from: Source address, in kernel space.
31543- * @n: Number of bytes to copy.
31544- *
31545- * Context: User context only. This function may sleep.
31546- *
31547- * Copy data from kernel space to user space.
31548- *
31549- * Returns number of bytes that could not be copied.
31550- * On success, this will be zero.
31551- */
31552-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31553+#ifdef CONFIG_PAX_MEMORY_UDEREF
31554+void __set_fs(mm_segment_t x)
31555 {
31556- if (access_ok(VERIFY_WRITE, to, n))
31557- n = __copy_to_user(to, from, n);
31558- return n;
31559+ switch (x.seg) {
31560+ case 0:
31561+ loadsegment(gs, 0);
31562+ break;
31563+ case TASK_SIZE_MAX:
31564+ loadsegment(gs, __USER_DS);
31565+ break;
31566+ case -1UL:
31567+ loadsegment(gs, __KERNEL_DS);
31568+ break;
31569+ default:
31570+ BUG();
31571+ }
31572 }
31573-EXPORT_SYMBOL(_copy_to_user);
31574+EXPORT_SYMBOL(__set_fs);
31575
31576-/**
31577- * copy_from_user: - Copy a block of data from user space.
31578- * @to: Destination address, in kernel space.
31579- * @from: Source address, in user space.
31580- * @n: Number of bytes to copy.
31581- *
31582- * Context: User context only. This function may sleep.
31583- *
31584- * Copy data from user space to kernel space.
31585- *
31586- * Returns number of bytes that could not be copied.
31587- * On success, this will be zero.
31588- *
31589- * If some data could not be copied, this function will pad the copied
31590- * data to the requested size using zero bytes.
31591- */
31592-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31593+void set_fs(mm_segment_t x)
31594 {
31595- if (access_ok(VERIFY_READ, from, n))
31596- n = __copy_from_user(to, from, n);
31597- else
31598- memset(to, 0, n);
31599- return n;
31600+ current_thread_info()->addr_limit = x;
31601+ __set_fs(x);
31602 }
31603-EXPORT_SYMBOL(_copy_from_user);
31604+EXPORT_SYMBOL(set_fs);
31605+#endif
31606diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31607index c905e89..01ab928 100644
31608--- a/arch/x86/lib/usercopy_64.c
31609+++ b/arch/x86/lib/usercopy_64.c
31610@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31611 might_fault();
31612 /* no memory constraint because it doesn't change any memory gcc knows
31613 about */
31614+ pax_open_userland();
31615 stac();
31616 asm volatile(
31617 " testq %[size8],%[size8]\n"
31618@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31619 _ASM_EXTABLE(0b,3b)
31620 _ASM_EXTABLE(1b,2b)
31621 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31622- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31623+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31624 [zero] "r" (0UL), [eight] "r" (8UL));
31625 clac();
31626+ pax_close_userland();
31627 return size;
31628 }
31629 EXPORT_SYMBOL(__clear_user);
31630@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31631 }
31632 EXPORT_SYMBOL(clear_user);
31633
31634-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31635+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31636 {
31637- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31638- return copy_user_generic((__force void *)to, (__force void *)from, len);
31639- }
31640- return len;
31641+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31642+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31643+ return len;
31644 }
31645 EXPORT_SYMBOL(copy_in_user);
31646
31647@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31648 * it is not necessary to optimize tail handling.
31649 */
31650 __visible unsigned long
31651-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31652+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31653 {
31654 char c;
31655 unsigned zero_len;
31656
31657+ clac();
31658+ pax_close_userland();
31659 for (; len; --len, to++) {
31660 if (__get_user_nocheck(c, from++, sizeof(char)))
31661 break;
31662@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31663 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31664 if (__put_user_nocheck(c, to++, sizeof(char)))
31665 break;
31666- clac();
31667 return len;
31668 }
31669diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31670index ecfdc46..55b9309 100644
31671--- a/arch/x86/mm/Makefile
31672+++ b/arch/x86/mm/Makefile
31673@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31674 obj-$(CONFIG_MEMTEST) += memtest.o
31675
31676 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31677+
31678+quote:="
31679+obj-$(CONFIG_X86_64) += uderef_64.o
31680+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31681diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31682index 903ec1e..c4166b2 100644
31683--- a/arch/x86/mm/extable.c
31684+++ b/arch/x86/mm/extable.c
31685@@ -6,12 +6,24 @@
31686 static inline unsigned long
31687 ex_insn_addr(const struct exception_table_entry *x)
31688 {
31689- return (unsigned long)&x->insn + x->insn;
31690+ unsigned long reloc = 0;
31691+
31692+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31693+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31694+#endif
31695+
31696+ return (unsigned long)&x->insn + x->insn + reloc;
31697 }
31698 static inline unsigned long
31699 ex_fixup_addr(const struct exception_table_entry *x)
31700 {
31701- return (unsigned long)&x->fixup + x->fixup;
31702+ unsigned long reloc = 0;
31703+
31704+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31705+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31706+#endif
31707+
31708+ return (unsigned long)&x->fixup + x->fixup + reloc;
31709 }
31710
31711 int fixup_exception(struct pt_regs *regs)
31712@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31713 unsigned long new_ip;
31714
31715 #ifdef CONFIG_PNPBIOS
31716- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31717+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31718 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31719 extern u32 pnp_bios_is_utter_crap;
31720 pnp_bios_is_utter_crap = 1;
31721@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31722 i += 4;
31723 p->fixup -= i;
31724 i += 4;
31725+
31726+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31727+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31728+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31729+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31730+#endif
31731+
31732 }
31733 }
31734
31735diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31736index e3ff27a..f38f7c0 100644
31737--- a/arch/x86/mm/fault.c
31738+++ b/arch/x86/mm/fault.c
31739@@ -13,12 +13,19 @@
31740 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31741 #include <linux/prefetch.h> /* prefetchw */
31742 #include <linux/context_tracking.h> /* exception_enter(), ... */
31743+#include <linux/unistd.h>
31744+#include <linux/compiler.h>
31745
31746 #include <asm/traps.h> /* dotraplinkage, ... */
31747 #include <asm/pgalloc.h> /* pgd_*(), ... */
31748 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31749 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31750 #include <asm/vsyscall.h> /* emulate_vsyscall */
31751+#include <asm/tlbflush.h>
31752+
31753+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31754+#include <asm/stacktrace.h>
31755+#endif
31756
31757 #define CREATE_TRACE_POINTS
31758 #include <asm/trace/exceptions.h>
31759@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31760 int ret = 0;
31761
31762 /* kprobe_running() needs smp_processor_id() */
31763- if (kprobes_built_in() && !user_mode_vm(regs)) {
31764+ if (kprobes_built_in() && !user_mode(regs)) {
31765 preempt_disable();
31766 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31767 ret = 1;
31768@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31769 return !instr_lo || (instr_lo>>1) == 1;
31770 case 0x00:
31771 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31772- if (probe_kernel_address(instr, opcode))
31773+ if (user_mode(regs)) {
31774+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31775+ return 0;
31776+ } else if (probe_kernel_address(instr, opcode))
31777 return 0;
31778
31779 *prefetch = (instr_lo == 0xF) &&
31780@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31781 while (instr < max_instr) {
31782 unsigned char opcode;
31783
31784- if (probe_kernel_address(instr, opcode))
31785+ if (user_mode(regs)) {
31786+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31787+ break;
31788+ } else if (probe_kernel_address(instr, opcode))
31789 break;
31790
31791 instr++;
31792@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31793 force_sig_info(si_signo, &info, tsk);
31794 }
31795
31796+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31797+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31798+#endif
31799+
31800+#ifdef CONFIG_PAX_EMUTRAMP
31801+static int pax_handle_fetch_fault(struct pt_regs *regs);
31802+#endif
31803+
31804+#ifdef CONFIG_PAX_PAGEEXEC
31805+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31806+{
31807+ pgd_t *pgd;
31808+ pud_t *pud;
31809+ pmd_t *pmd;
31810+
31811+ pgd = pgd_offset(mm, address);
31812+ if (!pgd_present(*pgd))
31813+ return NULL;
31814+ pud = pud_offset(pgd, address);
31815+ if (!pud_present(*pud))
31816+ return NULL;
31817+ pmd = pmd_offset(pud, address);
31818+ if (!pmd_present(*pmd))
31819+ return NULL;
31820+ return pmd;
31821+}
31822+#endif
31823+
31824 DEFINE_SPINLOCK(pgd_lock);
31825 LIST_HEAD(pgd_list);
31826
31827@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31828 for (address = VMALLOC_START & PMD_MASK;
31829 address >= TASK_SIZE && address < FIXADDR_TOP;
31830 address += PMD_SIZE) {
31831+
31832+#ifdef CONFIG_PAX_PER_CPU_PGD
31833+ unsigned long cpu;
31834+#else
31835 struct page *page;
31836+#endif
31837
31838 spin_lock(&pgd_lock);
31839+
31840+#ifdef CONFIG_PAX_PER_CPU_PGD
31841+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31842+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31843+ pmd_t *ret;
31844+
31845+ ret = vmalloc_sync_one(pgd, address);
31846+ if (!ret)
31847+ break;
31848+ pgd = get_cpu_pgd(cpu, kernel);
31849+#else
31850 list_for_each_entry(page, &pgd_list, lru) {
31851+ pgd_t *pgd;
31852 spinlock_t *pgt_lock;
31853 pmd_t *ret;
31854
31855@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31856 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31857
31858 spin_lock(pgt_lock);
31859- ret = vmalloc_sync_one(page_address(page), address);
31860+ pgd = page_address(page);
31861+#endif
31862+
31863+ ret = vmalloc_sync_one(pgd, address);
31864+
31865+#ifndef CONFIG_PAX_PER_CPU_PGD
31866 spin_unlock(pgt_lock);
31867+#endif
31868
31869 if (!ret)
31870 break;
31871@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31872 * an interrupt in the middle of a task switch..
31873 */
31874 pgd_paddr = read_cr3();
31875+
31876+#ifdef CONFIG_PAX_PER_CPU_PGD
31877+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31878+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31879+#endif
31880+
31881 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31882 if (!pmd_k)
31883 return -1;
31884@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31885 * happen within a race in page table update. In the later
31886 * case just flush:
31887 */
31888- pgd = pgd_offset(current->active_mm, address);
31889+
31890 pgd_ref = pgd_offset_k(address);
31891 if (pgd_none(*pgd_ref))
31892 return -1;
31893
31894+#ifdef CONFIG_PAX_PER_CPU_PGD
31895+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31896+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31897+ if (pgd_none(*pgd)) {
31898+ set_pgd(pgd, *pgd_ref);
31899+ arch_flush_lazy_mmu_mode();
31900+ } else {
31901+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31902+ }
31903+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31904+#else
31905+ pgd = pgd_offset(current->active_mm, address);
31906+#endif
31907+
31908 if (pgd_none(*pgd)) {
31909 set_pgd(pgd, *pgd_ref);
31910 arch_flush_lazy_mmu_mode();
31911@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31912 static int is_errata100(struct pt_regs *regs, unsigned long address)
31913 {
31914 #ifdef CONFIG_X86_64
31915- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31916+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31917 return 1;
31918 #endif
31919 return 0;
31920@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31921 }
31922
31923 static const char nx_warning[] = KERN_CRIT
31924-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31925+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31926 static const char smep_warning[] = KERN_CRIT
31927-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31928+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31929
31930 static void
31931 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31932@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31933 if (!oops_may_print())
31934 return;
31935
31936- if (error_code & PF_INSTR) {
31937+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31938 unsigned int level;
31939 pgd_t *pgd;
31940 pte_t *pte;
31941@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31942 pte = lookup_address_in_pgd(pgd, address, &level);
31943
31944 if (pte && pte_present(*pte) && !pte_exec(*pte))
31945- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31946+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31947 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31948 (pgd_flags(*pgd) & _PAGE_USER) &&
31949 (read_cr4() & X86_CR4_SMEP))
31950- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31951+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31952 }
31953
31954+#ifdef CONFIG_PAX_KERNEXEC
31955+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31956+ if (current->signal->curr_ip)
31957+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31958+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31959+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31960+ else
31961+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31962+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31963+ }
31964+#endif
31965+
31966 printk(KERN_ALERT "BUG: unable to handle kernel ");
31967 if (address < PAGE_SIZE)
31968 printk(KERN_CONT "NULL pointer dereference");
31969@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31970 return;
31971 }
31972 #endif
31973+
31974+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31975+ if (pax_is_fetch_fault(regs, error_code, address)) {
31976+
31977+#ifdef CONFIG_PAX_EMUTRAMP
31978+ switch (pax_handle_fetch_fault(regs)) {
31979+ case 2:
31980+ return;
31981+ }
31982+#endif
31983+
31984+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31985+ do_group_exit(SIGKILL);
31986+ }
31987+#endif
31988+
31989 /* Kernel addresses are always protection faults: */
31990 if (address >= TASK_SIZE)
31991 error_code |= PF_PROT;
31992@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
31993 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
31994 printk(KERN_ERR
31995 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
31996- tsk->comm, tsk->pid, address);
31997+ tsk->comm, task_pid_nr(tsk), address);
31998 code = BUS_MCEERR_AR;
31999 }
32000 #endif
32001@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32002 return 1;
32003 }
32004
32005+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32006+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32007+{
32008+ pte_t *pte;
32009+ pmd_t *pmd;
32010+ spinlock_t *ptl;
32011+ unsigned char pte_mask;
32012+
32013+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32014+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32015+ return 0;
32016+
32017+ /* PaX: it's our fault, let's handle it if we can */
32018+
32019+ /* PaX: take a look at read faults before acquiring any locks */
32020+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32021+ /* instruction fetch attempt from a protected page in user mode */
32022+ up_read(&mm->mmap_sem);
32023+
32024+#ifdef CONFIG_PAX_EMUTRAMP
32025+ switch (pax_handle_fetch_fault(regs)) {
32026+ case 2:
32027+ return 1;
32028+ }
32029+#endif
32030+
32031+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32032+ do_group_exit(SIGKILL);
32033+ }
32034+
32035+ pmd = pax_get_pmd(mm, address);
32036+ if (unlikely(!pmd))
32037+ return 0;
32038+
32039+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32040+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32041+ pte_unmap_unlock(pte, ptl);
32042+ return 0;
32043+ }
32044+
32045+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32046+ /* write attempt to a protected page in user mode */
32047+ pte_unmap_unlock(pte, ptl);
32048+ return 0;
32049+ }
32050+
32051+#ifdef CONFIG_SMP
32052+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32053+#else
32054+ if (likely(address > get_limit(regs->cs)))
32055+#endif
32056+ {
32057+ set_pte(pte, pte_mkread(*pte));
32058+ __flush_tlb_one(address);
32059+ pte_unmap_unlock(pte, ptl);
32060+ up_read(&mm->mmap_sem);
32061+ return 1;
32062+ }
32063+
32064+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32065+
32066+ /*
32067+ * PaX: fill DTLB with user rights and retry
32068+ */
32069+ __asm__ __volatile__ (
32070+ "orb %2,(%1)\n"
32071+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32072+/*
32073+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32074+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32075+ * page fault when examined during a TLB load attempt. this is true not only
32076+ * for PTEs holding a non-present entry but also present entries that will
32077+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32078+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32079+ * for our target pages since their PTEs are simply not in the TLBs at all.
32080+
32081+ * the best thing in omitting it is that we gain around 15-20% speed in the
32082+ * fast path of the page fault handler and can get rid of tracing since we
32083+ * can no longer flush unintended entries.
32084+ */
32085+ "invlpg (%0)\n"
32086+#endif
32087+ __copyuser_seg"testb $0,(%0)\n"
32088+ "xorb %3,(%1)\n"
32089+ :
32090+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32091+ : "memory", "cc");
32092+ pte_unmap_unlock(pte, ptl);
32093+ up_read(&mm->mmap_sem);
32094+ return 1;
32095+}
32096+#endif
32097+
32098 /*
32099 * Handle a spurious fault caused by a stale TLB entry.
32100 *
32101@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32102 static inline int
32103 access_error(unsigned long error_code, struct vm_area_struct *vma)
32104 {
32105+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32106+ return 1;
32107+
32108 if (error_code & PF_WRITE) {
32109 /* write, present and write, not present: */
32110 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32111@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32112 if (error_code & PF_USER)
32113 return false;
32114
32115- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32116+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32117 return false;
32118
32119 return true;
32120@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32121 tsk = current;
32122 mm = tsk->mm;
32123
32124+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32125+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32126+ if (!search_exception_tables(regs->ip)) {
32127+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32128+ bad_area_nosemaphore(regs, error_code, address);
32129+ return;
32130+ }
32131+ if (address < pax_user_shadow_base) {
32132+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32133+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32134+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32135+ } else
32136+ address -= pax_user_shadow_base;
32137+ }
32138+#endif
32139+
32140 /*
32141 * Detect and handle instructions that would cause a page fault for
32142 * both a tracked kernel page and a userspace page.
32143@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32144 * User-mode registers count as a user access even for any
32145 * potential system fault or CPU buglet:
32146 */
32147- if (user_mode_vm(regs)) {
32148+ if (user_mode(regs)) {
32149 local_irq_enable();
32150 error_code |= PF_USER;
32151 flags |= FAULT_FLAG_USER;
32152@@ -1187,6 +1411,11 @@ retry:
32153 might_sleep();
32154 }
32155
32156+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32157+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32158+ return;
32159+#endif
32160+
32161 vma = find_vma(mm, address);
32162 if (unlikely(!vma)) {
32163 bad_area(regs, error_code, address);
32164@@ -1198,18 +1427,24 @@ retry:
32165 bad_area(regs, error_code, address);
32166 return;
32167 }
32168- if (error_code & PF_USER) {
32169- /*
32170- * Accessing the stack below %sp is always a bug.
32171- * The large cushion allows instructions like enter
32172- * and pusha to work. ("enter $65535, $31" pushes
32173- * 32 pointers and then decrements %sp by 65535.)
32174- */
32175- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32176- bad_area(regs, error_code, address);
32177- return;
32178- }
32179+ /*
32180+ * Accessing the stack below %sp is always a bug.
32181+ * The large cushion allows instructions like enter
32182+ * and pusha to work. ("enter $65535, $31" pushes
32183+ * 32 pointers and then decrements %sp by 65535.)
32184+ */
32185+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32186+ bad_area(regs, error_code, address);
32187+ return;
32188 }
32189+
32190+#ifdef CONFIG_PAX_SEGMEXEC
32191+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32192+ bad_area(regs, error_code, address);
32193+ return;
32194+ }
32195+#endif
32196+
32197 if (unlikely(expand_stack(vma, address))) {
32198 bad_area(regs, error_code, address);
32199 return;
32200@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32201 }
32202 NOKPROBE_SYMBOL(trace_do_page_fault);
32203 #endif /* CONFIG_TRACING */
32204+
32205+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32206+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32207+{
32208+ struct mm_struct *mm = current->mm;
32209+ unsigned long ip = regs->ip;
32210+
32211+ if (v8086_mode(regs))
32212+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32213+
32214+#ifdef CONFIG_PAX_PAGEEXEC
32215+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32216+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32217+ return true;
32218+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32219+ return true;
32220+ return false;
32221+ }
32222+#endif
32223+
32224+#ifdef CONFIG_PAX_SEGMEXEC
32225+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32226+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32227+ return true;
32228+ return false;
32229+ }
32230+#endif
32231+
32232+ return false;
32233+}
32234+#endif
32235+
32236+#ifdef CONFIG_PAX_EMUTRAMP
32237+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32238+{
32239+ int err;
32240+
32241+ do { /* PaX: libffi trampoline emulation */
32242+ unsigned char mov, jmp;
32243+ unsigned int addr1, addr2;
32244+
32245+#ifdef CONFIG_X86_64
32246+ if ((regs->ip + 9) >> 32)
32247+ break;
32248+#endif
32249+
32250+ err = get_user(mov, (unsigned char __user *)regs->ip);
32251+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32252+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32253+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32254+
32255+ if (err)
32256+ break;
32257+
32258+ if (mov == 0xB8 && jmp == 0xE9) {
32259+ regs->ax = addr1;
32260+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32261+ return 2;
32262+ }
32263+ } while (0);
32264+
32265+ do { /* PaX: gcc trampoline emulation #1 */
32266+ unsigned char mov1, mov2;
32267+ unsigned short jmp;
32268+ unsigned int addr1, addr2;
32269+
32270+#ifdef CONFIG_X86_64
32271+ if ((regs->ip + 11) >> 32)
32272+ break;
32273+#endif
32274+
32275+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32276+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32277+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32278+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32279+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32280+
32281+ if (err)
32282+ break;
32283+
32284+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32285+ regs->cx = addr1;
32286+ regs->ax = addr2;
32287+ regs->ip = addr2;
32288+ return 2;
32289+ }
32290+ } while (0);
32291+
32292+ do { /* PaX: gcc trampoline emulation #2 */
32293+ unsigned char mov, jmp;
32294+ unsigned int addr1, addr2;
32295+
32296+#ifdef CONFIG_X86_64
32297+ if ((regs->ip + 9) >> 32)
32298+ break;
32299+#endif
32300+
32301+ err = get_user(mov, (unsigned char __user *)regs->ip);
32302+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32303+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32304+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32305+
32306+ if (err)
32307+ break;
32308+
32309+ if (mov == 0xB9 && jmp == 0xE9) {
32310+ regs->cx = addr1;
32311+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32312+ return 2;
32313+ }
32314+ } while (0);
32315+
32316+ return 1; /* PaX in action */
32317+}
32318+
32319+#ifdef CONFIG_X86_64
32320+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32321+{
32322+ int err;
32323+
32324+ do { /* PaX: libffi trampoline emulation */
32325+ unsigned short mov1, mov2, jmp1;
32326+ unsigned char stcclc, jmp2;
32327+ unsigned long addr1, addr2;
32328+
32329+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32330+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32331+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32332+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32333+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32334+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32335+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32336+
32337+ if (err)
32338+ break;
32339+
32340+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32341+ regs->r11 = addr1;
32342+ regs->r10 = addr2;
32343+ if (stcclc == 0xF8)
32344+ regs->flags &= ~X86_EFLAGS_CF;
32345+ else
32346+ regs->flags |= X86_EFLAGS_CF;
32347+ regs->ip = addr1;
32348+ return 2;
32349+ }
32350+ } while (0);
32351+
32352+ do { /* PaX: gcc trampoline emulation #1 */
32353+ unsigned short mov1, mov2, jmp1;
32354+ unsigned char jmp2;
32355+ unsigned int addr1;
32356+ unsigned long addr2;
32357+
32358+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32359+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32360+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32361+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32362+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32363+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32364+
32365+ if (err)
32366+ break;
32367+
32368+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32369+ regs->r11 = addr1;
32370+ regs->r10 = addr2;
32371+ regs->ip = addr1;
32372+ return 2;
32373+ }
32374+ } while (0);
32375+
32376+ do { /* PaX: gcc trampoline emulation #2 */
32377+ unsigned short mov1, mov2, jmp1;
32378+ unsigned char jmp2;
32379+ unsigned long addr1, addr2;
32380+
32381+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32382+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32383+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32384+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32385+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32386+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32387+
32388+ if (err)
32389+ break;
32390+
32391+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32392+ regs->r11 = addr1;
32393+ regs->r10 = addr2;
32394+ regs->ip = addr1;
32395+ return 2;
32396+ }
32397+ } while (0);
32398+
32399+ return 1; /* PaX in action */
32400+}
32401+#endif
32402+
32403+/*
32404+ * PaX: decide what to do with offenders (regs->ip = fault address)
32405+ *
32406+ * returns 1 when task should be killed
32407+ * 2 when gcc trampoline was detected
32408+ */
32409+static int pax_handle_fetch_fault(struct pt_regs *regs)
32410+{
32411+ if (v8086_mode(regs))
32412+ return 1;
32413+
32414+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32415+ return 1;
32416+
32417+#ifdef CONFIG_X86_32
32418+ return pax_handle_fetch_fault_32(regs);
32419+#else
32420+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32421+ return pax_handle_fetch_fault_32(regs);
32422+ else
32423+ return pax_handle_fetch_fault_64(regs);
32424+#endif
32425+}
32426+#endif
32427+
32428+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32429+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32430+{
32431+ long i;
32432+
32433+ printk(KERN_ERR "PAX: bytes at PC: ");
32434+ for (i = 0; i < 20; i++) {
32435+ unsigned char c;
32436+ if (get_user(c, (unsigned char __force_user *)pc+i))
32437+ printk(KERN_CONT "?? ");
32438+ else
32439+ printk(KERN_CONT "%02x ", c);
32440+ }
32441+ printk("\n");
32442+
32443+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32444+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32445+ unsigned long c;
32446+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32447+#ifdef CONFIG_X86_32
32448+ printk(KERN_CONT "???????? ");
32449+#else
32450+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32451+ printk(KERN_CONT "???????? ???????? ");
32452+ else
32453+ printk(KERN_CONT "???????????????? ");
32454+#endif
32455+ } else {
32456+#ifdef CONFIG_X86_64
32457+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32458+ printk(KERN_CONT "%08x ", (unsigned int)c);
32459+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32460+ } else
32461+#endif
32462+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32463+ }
32464+ }
32465+ printk("\n");
32466+}
32467+#endif
32468+
32469+/**
32470+ * probe_kernel_write(): safely attempt to write to a location
32471+ * @dst: address to write to
32472+ * @src: pointer to the data that shall be written
32473+ * @size: size of the data chunk
32474+ *
32475+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32476+ * happens, handle that and return -EFAULT.
32477+ */
32478+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32479+{
32480+ long ret;
32481+ mm_segment_t old_fs = get_fs();
32482+
32483+ set_fs(KERNEL_DS);
32484+ pagefault_disable();
32485+ pax_open_kernel();
32486+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32487+ pax_close_kernel();
32488+ pagefault_enable();
32489+ set_fs(old_fs);
32490+
32491+ return ret ? -EFAULT : 0;
32492+}
32493diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32494index 224b142..c2c9423 100644
32495--- a/arch/x86/mm/gup.c
32496+++ b/arch/x86/mm/gup.c
32497@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32498 addr = start;
32499 len = (unsigned long) nr_pages << PAGE_SHIFT;
32500 end = start + len;
32501- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32502+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32503 (void __user *)start, len)))
32504 return 0;
32505
32506@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32507 goto slow_irqon;
32508 #endif
32509
32510+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32511+ (void __user *)start, len)))
32512+ return 0;
32513+
32514 /*
32515 * XXX: batch / limit 'nr', to avoid large irq off latency
32516 * needs some instrumenting to determine the common sizes used by
32517diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32518index 4500142..53a363c 100644
32519--- a/arch/x86/mm/highmem_32.c
32520+++ b/arch/x86/mm/highmem_32.c
32521@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32522 idx = type + KM_TYPE_NR*smp_processor_id();
32523 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32524 BUG_ON(!pte_none(*(kmap_pte-idx)));
32525+
32526+ pax_open_kernel();
32527 set_pte(kmap_pte-idx, mk_pte(page, prot));
32528+ pax_close_kernel();
32529+
32530 arch_flush_lazy_mmu_mode();
32531
32532 return (void *)vaddr;
32533diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32534index 006cc91..bf05a83 100644
32535--- a/arch/x86/mm/hugetlbpage.c
32536+++ b/arch/x86/mm/hugetlbpage.c
32537@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
32538 #ifdef CONFIG_HUGETLB_PAGE
32539 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32540 unsigned long addr, unsigned long len,
32541- unsigned long pgoff, unsigned long flags)
32542+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32543 {
32544 struct hstate *h = hstate_file(file);
32545 struct vm_unmapped_area_info info;
32546-
32547+
32548 info.flags = 0;
32549 info.length = len;
32550 info.low_limit = current->mm->mmap_legacy_base;
32551 info.high_limit = TASK_SIZE;
32552 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32553 info.align_offset = 0;
32554+ info.threadstack_offset = offset;
32555 return vm_unmapped_area(&info);
32556 }
32557
32558 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32559 unsigned long addr0, unsigned long len,
32560- unsigned long pgoff, unsigned long flags)
32561+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32562 {
32563 struct hstate *h = hstate_file(file);
32564 struct vm_unmapped_area_info info;
32565@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32566 info.high_limit = current->mm->mmap_base;
32567 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32568 info.align_offset = 0;
32569+ info.threadstack_offset = offset;
32570 addr = vm_unmapped_area(&info);
32571
32572 /*
32573@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32574 VM_BUG_ON(addr != -ENOMEM);
32575 info.flags = 0;
32576 info.low_limit = TASK_UNMAPPED_BASE;
32577+
32578+#ifdef CONFIG_PAX_RANDMMAP
32579+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32580+ info.low_limit += current->mm->delta_mmap;
32581+#endif
32582+
32583 info.high_limit = TASK_SIZE;
32584 addr = vm_unmapped_area(&info);
32585 }
32586@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32587 struct hstate *h = hstate_file(file);
32588 struct mm_struct *mm = current->mm;
32589 struct vm_area_struct *vma;
32590+ unsigned long pax_task_size = TASK_SIZE;
32591+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32592
32593 if (len & ~huge_page_mask(h))
32594 return -EINVAL;
32595- if (len > TASK_SIZE)
32596+
32597+#ifdef CONFIG_PAX_SEGMEXEC
32598+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32599+ pax_task_size = SEGMEXEC_TASK_SIZE;
32600+#endif
32601+
32602+ pax_task_size -= PAGE_SIZE;
32603+
32604+ if (len > pax_task_size)
32605 return -ENOMEM;
32606
32607 if (flags & MAP_FIXED) {
32608@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32609 return addr;
32610 }
32611
32612+#ifdef CONFIG_PAX_RANDMMAP
32613+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32614+#endif
32615+
32616 if (addr) {
32617 addr = ALIGN(addr, huge_page_size(h));
32618 vma = find_vma(mm, addr);
32619- if (TASK_SIZE - len >= addr &&
32620- (!vma || addr + len <= vma->vm_start))
32621+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32622 return addr;
32623 }
32624 if (mm->get_unmapped_area == arch_get_unmapped_area)
32625 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32626- pgoff, flags);
32627+ pgoff, flags, offset);
32628 else
32629 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32630- pgoff, flags);
32631+ pgoff, flags, offset);
32632 }
32633 #endif /* CONFIG_HUGETLB_PAGE */
32634
32635diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32636index 079c3b6..7069023 100644
32637--- a/arch/x86/mm/init.c
32638+++ b/arch/x86/mm/init.c
32639@@ -4,6 +4,7 @@
32640 #include <linux/swap.h>
32641 #include <linux/memblock.h>
32642 #include <linux/bootmem.h> /* for max_low_pfn */
32643+#include <linux/tboot.h>
32644
32645 #include <asm/cacheflush.h>
32646 #include <asm/e820.h>
32647@@ -17,6 +18,8 @@
32648 #include <asm/proto.h>
32649 #include <asm/dma.h> /* for MAX_DMA_PFN */
32650 #include <asm/microcode.h>
32651+#include <asm/desc.h>
32652+#include <asm/bios_ebda.h>
32653
32654 /*
32655 * We need to define the tracepoints somewhere, and tlb.c
32656@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
32657 early_ioremap_page_table_range_init();
32658 #endif
32659
32660+#ifdef CONFIG_PAX_PER_CPU_PGD
32661+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32662+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32663+ KERNEL_PGD_PTRS);
32664+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32665+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32666+ KERNEL_PGD_PTRS);
32667+ load_cr3(get_cpu_pgd(0, kernel));
32668+#else
32669 load_cr3(swapper_pg_dir);
32670+#endif
32671+
32672 __flush_tlb_all();
32673
32674 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32675@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
32676 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32677 * mmio resources as well as potential bios/acpi data regions.
32678 */
32679+
32680+#ifdef CONFIG_GRKERNSEC_KMEM
32681+static unsigned int ebda_start __read_only;
32682+static unsigned int ebda_end __read_only;
32683+#endif
32684+
32685 int devmem_is_allowed(unsigned long pagenr)
32686 {
32687- if (pagenr < 256)
32688+#ifdef CONFIG_GRKERNSEC_KMEM
32689+ /* allow BDA */
32690+ if (!pagenr)
32691 return 1;
32692+ /* allow EBDA */
32693+ if (pagenr >= ebda_start && pagenr < ebda_end)
32694+ return 1;
32695+ /* if tboot is in use, allow access to its hardcoded serial log range */
32696+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32697+ return 1;
32698+#else
32699+ if (!pagenr)
32700+ return 1;
32701+#ifdef CONFIG_VM86
32702+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32703+ return 1;
32704+#endif
32705+#endif
32706+
32707+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32708+ return 1;
32709+#ifdef CONFIG_GRKERNSEC_KMEM
32710+ /* throw out everything else below 1MB */
32711+ if (pagenr <= 256)
32712+ return 0;
32713+#endif
32714 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32715 return 0;
32716 if (!page_is_ram(pagenr))
32717@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32718 #endif
32719 }
32720
32721+#ifdef CONFIG_GRKERNSEC_KMEM
32722+static inline void gr_init_ebda(void)
32723+{
32724+ unsigned int ebda_addr;
32725+ unsigned int ebda_size = 0;
32726+
32727+ ebda_addr = get_bios_ebda();
32728+ if (ebda_addr) {
32729+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32730+ ebda_size <<= 10;
32731+ }
32732+ if (ebda_addr && ebda_size) {
32733+ ebda_start = ebda_addr >> PAGE_SHIFT;
32734+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32735+ } else {
32736+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32737+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32738+ }
32739+}
32740+#else
32741+static inline void gr_init_ebda(void) { }
32742+#endif
32743+
32744 void free_initmem(void)
32745 {
32746+#ifdef CONFIG_PAX_KERNEXEC
32747+#ifdef CONFIG_X86_32
32748+ /* PaX: limit KERNEL_CS to actual size */
32749+ unsigned long addr, limit;
32750+ struct desc_struct d;
32751+ int cpu;
32752+#else
32753+ pgd_t *pgd;
32754+ pud_t *pud;
32755+ pmd_t *pmd;
32756+ unsigned long addr, end;
32757+#endif
32758+#endif
32759+
32760+ gr_init_ebda();
32761+
32762+#ifdef CONFIG_PAX_KERNEXEC
32763+#ifdef CONFIG_X86_32
32764+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32765+ limit = (limit - 1UL) >> PAGE_SHIFT;
32766+
32767+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32768+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32769+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32770+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32771+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32772+ }
32773+
32774+ /* PaX: make KERNEL_CS read-only */
32775+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32776+ if (!paravirt_enabled())
32777+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32778+/*
32779+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32780+ pgd = pgd_offset_k(addr);
32781+ pud = pud_offset(pgd, addr);
32782+ pmd = pmd_offset(pud, addr);
32783+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32784+ }
32785+*/
32786+#ifdef CONFIG_X86_PAE
32787+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32788+/*
32789+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32790+ pgd = pgd_offset_k(addr);
32791+ pud = pud_offset(pgd, addr);
32792+ pmd = pmd_offset(pud, addr);
32793+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32794+ }
32795+*/
32796+#endif
32797+
32798+#ifdef CONFIG_MODULES
32799+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32800+#endif
32801+
32802+#else
32803+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32804+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32805+ pgd = pgd_offset_k(addr);
32806+ pud = pud_offset(pgd, addr);
32807+ pmd = pmd_offset(pud, addr);
32808+ if (!pmd_present(*pmd))
32809+ continue;
32810+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32811+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32812+ else
32813+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32814+ }
32815+
32816+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32817+ end = addr + KERNEL_IMAGE_SIZE;
32818+ for (; addr < end; addr += PMD_SIZE) {
32819+ pgd = pgd_offset_k(addr);
32820+ pud = pud_offset(pgd, addr);
32821+ pmd = pmd_offset(pud, addr);
32822+ if (!pmd_present(*pmd))
32823+ continue;
32824+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32825+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32826+ }
32827+#endif
32828+
32829+ flush_tlb_all();
32830+#endif
32831+
32832 free_init_pages("unused kernel",
32833 (unsigned long)(&__init_begin),
32834 (unsigned long)(&__init_end));
32835diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32836index c8140e1..59257fc 100644
32837--- a/arch/x86/mm/init_32.c
32838+++ b/arch/x86/mm/init_32.c
32839@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32840 bool __read_mostly __vmalloc_start_set = false;
32841
32842 /*
32843- * Creates a middle page table and puts a pointer to it in the
32844- * given global directory entry. This only returns the gd entry
32845- * in non-PAE compilation mode, since the middle layer is folded.
32846- */
32847-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32848-{
32849- pud_t *pud;
32850- pmd_t *pmd_table;
32851-
32852-#ifdef CONFIG_X86_PAE
32853- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32854- pmd_table = (pmd_t *)alloc_low_page();
32855- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32856- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32857- pud = pud_offset(pgd, 0);
32858- BUG_ON(pmd_table != pmd_offset(pud, 0));
32859-
32860- return pmd_table;
32861- }
32862-#endif
32863- pud = pud_offset(pgd, 0);
32864- pmd_table = pmd_offset(pud, 0);
32865-
32866- return pmd_table;
32867-}
32868-
32869-/*
32870 * Create a page table and place a pointer to it in a middle page
32871 * directory entry:
32872 */
32873@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32874 pte_t *page_table = (pte_t *)alloc_low_page();
32875
32876 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32877+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32878+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32879+#else
32880 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32881+#endif
32882 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32883 }
32884
32885 return pte_offset_kernel(pmd, 0);
32886 }
32887
32888+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32889+{
32890+ pud_t *pud;
32891+ pmd_t *pmd_table;
32892+
32893+ pud = pud_offset(pgd, 0);
32894+ pmd_table = pmd_offset(pud, 0);
32895+
32896+ return pmd_table;
32897+}
32898+
32899 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32900 {
32901 int pgd_idx = pgd_index(vaddr);
32902@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32903 int pgd_idx, pmd_idx;
32904 unsigned long vaddr;
32905 pgd_t *pgd;
32906+ pud_t *pud;
32907 pmd_t *pmd;
32908 pte_t *pte = NULL;
32909 unsigned long count = page_table_range_init_count(start, end);
32910@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32911 pgd = pgd_base + pgd_idx;
32912
32913 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32914- pmd = one_md_table_init(pgd);
32915- pmd = pmd + pmd_index(vaddr);
32916+ pud = pud_offset(pgd, vaddr);
32917+ pmd = pmd_offset(pud, vaddr);
32918+
32919+#ifdef CONFIG_X86_PAE
32920+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32921+#endif
32922+
32923 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32924 pmd++, pmd_idx++) {
32925 pte = page_table_kmap_check(one_page_table_init(pmd),
32926@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32927 }
32928 }
32929
32930-static inline int is_kernel_text(unsigned long addr)
32931+static inline int is_kernel_text(unsigned long start, unsigned long end)
32932 {
32933- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32934- return 1;
32935- return 0;
32936+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32937+ end <= ktla_ktva((unsigned long)_stext)) &&
32938+ (start >= ktla_ktva((unsigned long)_einittext) ||
32939+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32940+
32941+#ifdef CONFIG_ACPI_SLEEP
32942+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32943+#endif
32944+
32945+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32946+ return 0;
32947+ return 1;
32948 }
32949
32950 /*
32951@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32952 unsigned long last_map_addr = end;
32953 unsigned long start_pfn, end_pfn;
32954 pgd_t *pgd_base = swapper_pg_dir;
32955- int pgd_idx, pmd_idx, pte_ofs;
32956+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32957 unsigned long pfn;
32958 pgd_t *pgd;
32959+ pud_t *pud;
32960 pmd_t *pmd;
32961 pte_t *pte;
32962 unsigned pages_2m, pages_4k;
32963@@ -291,8 +295,13 @@ repeat:
32964 pfn = start_pfn;
32965 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32966 pgd = pgd_base + pgd_idx;
32967- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32968- pmd = one_md_table_init(pgd);
32969+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32970+ pud = pud_offset(pgd, 0);
32971+ pmd = pmd_offset(pud, 0);
32972+
32973+#ifdef CONFIG_X86_PAE
32974+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32975+#endif
32976
32977 if (pfn >= end_pfn)
32978 continue;
32979@@ -304,14 +313,13 @@ repeat:
32980 #endif
32981 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32982 pmd++, pmd_idx++) {
32983- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32984+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32985
32986 /*
32987 * Map with big pages if possible, otherwise
32988 * create normal page tables:
32989 */
32990 if (use_pse) {
32991- unsigned int addr2;
32992 pgprot_t prot = PAGE_KERNEL_LARGE;
32993 /*
32994 * first pass will use the same initial
32995@@ -322,11 +330,7 @@ repeat:
32996 _PAGE_PSE);
32997
32998 pfn &= PMD_MASK >> PAGE_SHIFT;
32999- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33000- PAGE_OFFSET + PAGE_SIZE-1;
33001-
33002- if (is_kernel_text(addr) ||
33003- is_kernel_text(addr2))
33004+ if (is_kernel_text(address, address + PMD_SIZE))
33005 prot = PAGE_KERNEL_LARGE_EXEC;
33006
33007 pages_2m++;
33008@@ -343,7 +347,7 @@ repeat:
33009 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33010 pte += pte_ofs;
33011 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33012- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33013+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33014 pgprot_t prot = PAGE_KERNEL;
33015 /*
33016 * first pass will use the same initial
33017@@ -351,7 +355,7 @@ repeat:
33018 */
33019 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33020
33021- if (is_kernel_text(addr))
33022+ if (is_kernel_text(address, address + PAGE_SIZE))
33023 prot = PAGE_KERNEL_EXEC;
33024
33025 pages_4k++;
33026@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33027
33028 pud = pud_offset(pgd, va);
33029 pmd = pmd_offset(pud, va);
33030- if (!pmd_present(*pmd))
33031+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33032 break;
33033
33034 /* should not be large page here */
33035@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33036
33037 static void __init pagetable_init(void)
33038 {
33039- pgd_t *pgd_base = swapper_pg_dir;
33040-
33041- permanent_kmaps_init(pgd_base);
33042+ permanent_kmaps_init(swapper_pg_dir);
33043 }
33044
33045-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33046+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33047 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33048
33049 /* user-defined highmem size */
33050@@ -787,10 +789,10 @@ void __init mem_init(void)
33051 ((unsigned long)&__init_end -
33052 (unsigned long)&__init_begin) >> 10,
33053
33054- (unsigned long)&_etext, (unsigned long)&_edata,
33055- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33056+ (unsigned long)&_sdata, (unsigned long)&_edata,
33057+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33058
33059- (unsigned long)&_text, (unsigned long)&_etext,
33060+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33061 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33062
33063 /*
33064@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33065 if (!kernel_set_to_readonly)
33066 return;
33067
33068+ start = ktla_ktva(start);
33069 pr_debug("Set kernel text: %lx - %lx for read write\n",
33070 start, start+size);
33071
33072@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33073 if (!kernel_set_to_readonly)
33074 return;
33075
33076+ start = ktla_ktva(start);
33077 pr_debug("Set kernel text: %lx - %lx for read only\n",
33078 start, start+size);
33079
33080@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33081 unsigned long start = PFN_ALIGN(_text);
33082 unsigned long size = PFN_ALIGN(_etext) - start;
33083
33084+ start = ktla_ktva(start);
33085 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33086 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33087 size >> 10);
33088diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33089index 30eb05a..ae671ac 100644
33090--- a/arch/x86/mm/init_64.c
33091+++ b/arch/x86/mm/init_64.c
33092@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33093 * around without checking the pgd every time.
33094 */
33095
33096-pteval_t __supported_pte_mask __read_mostly = ~0;
33097+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33098 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33099
33100 int force_personality32;
33101@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33102
33103 for (address = start; address <= end; address += PGDIR_SIZE) {
33104 const pgd_t *pgd_ref = pgd_offset_k(address);
33105+
33106+#ifdef CONFIG_PAX_PER_CPU_PGD
33107+ unsigned long cpu;
33108+#else
33109 struct page *page;
33110+#endif
33111
33112 /*
33113 * When it is called after memory hot remove, pgd_none()
33114@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33115 continue;
33116
33117 spin_lock(&pgd_lock);
33118+
33119+#ifdef CONFIG_PAX_PER_CPU_PGD
33120+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33121+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33122+
33123+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33124+ BUG_ON(pgd_page_vaddr(*pgd)
33125+ != pgd_page_vaddr(*pgd_ref));
33126+
33127+ if (removed) {
33128+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33129+ pgd_clear(pgd);
33130+ } else {
33131+ if (pgd_none(*pgd))
33132+ set_pgd(pgd, *pgd_ref);
33133+ }
33134+
33135+ pgd = pgd_offset_cpu(cpu, kernel, address);
33136+#else
33137 list_for_each_entry(page, &pgd_list, lru) {
33138 pgd_t *pgd;
33139 spinlock_t *pgt_lock;
33140@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33141 /* the pgt_lock only for Xen */
33142 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33143 spin_lock(pgt_lock);
33144+#endif
33145
33146 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33147 BUG_ON(pgd_page_vaddr(*pgd)
33148@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33149 set_pgd(pgd, *pgd_ref);
33150 }
33151
33152+#ifndef CONFIG_PAX_PER_CPU_PGD
33153 spin_unlock(pgt_lock);
33154+#endif
33155+
33156 }
33157 spin_unlock(&pgd_lock);
33158 }
33159@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33160 {
33161 if (pgd_none(*pgd)) {
33162 pud_t *pud = (pud_t *)spp_getpage();
33163- pgd_populate(&init_mm, pgd, pud);
33164+ pgd_populate_kernel(&init_mm, pgd, pud);
33165 if (pud != pud_offset(pgd, 0))
33166 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33167 pud, pud_offset(pgd, 0));
33168@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33169 {
33170 if (pud_none(*pud)) {
33171 pmd_t *pmd = (pmd_t *) spp_getpage();
33172- pud_populate(&init_mm, pud, pmd);
33173+ pud_populate_kernel(&init_mm, pud, pmd);
33174 if (pmd != pmd_offset(pud, 0))
33175 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33176 pmd, pmd_offset(pud, 0));
33177@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33178 pmd = fill_pmd(pud, vaddr);
33179 pte = fill_pte(pmd, vaddr);
33180
33181+ pax_open_kernel();
33182 set_pte(pte, new_pte);
33183+ pax_close_kernel();
33184
33185 /*
33186 * It's enough to flush this one mapping.
33187@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33188 pgd = pgd_offset_k((unsigned long)__va(phys));
33189 if (pgd_none(*pgd)) {
33190 pud = (pud_t *) spp_getpage();
33191- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33192- _PAGE_USER));
33193+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33194 }
33195 pud = pud_offset(pgd, (unsigned long)__va(phys));
33196 if (pud_none(*pud)) {
33197 pmd = (pmd_t *) spp_getpage();
33198- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33199- _PAGE_USER));
33200+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33201 }
33202 pmd = pmd_offset(pud, phys);
33203 BUG_ON(!pmd_none(*pmd));
33204@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33205 prot);
33206
33207 spin_lock(&init_mm.page_table_lock);
33208- pud_populate(&init_mm, pud, pmd);
33209+ pud_populate_kernel(&init_mm, pud, pmd);
33210 spin_unlock(&init_mm.page_table_lock);
33211 }
33212 __flush_tlb_all();
33213@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33214 page_size_mask);
33215
33216 spin_lock(&init_mm.page_table_lock);
33217- pgd_populate(&init_mm, pgd, pud);
33218+ pgd_populate_kernel(&init_mm, pgd, pud);
33219 spin_unlock(&init_mm.page_table_lock);
33220 pgd_changed = true;
33221 }
33222diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33223index 9ca35fc..4b2b7b7 100644
33224--- a/arch/x86/mm/iomap_32.c
33225+++ b/arch/x86/mm/iomap_32.c
33226@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33227 type = kmap_atomic_idx_push();
33228 idx = type + KM_TYPE_NR * smp_processor_id();
33229 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33230+
33231+ pax_open_kernel();
33232 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33233+ pax_close_kernel();
33234+
33235 arch_flush_lazy_mmu_mode();
33236
33237 return (void *)vaddr;
33238diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33239index fdf617c..b9e85bc 100644
33240--- a/arch/x86/mm/ioremap.c
33241+++ b/arch/x86/mm/ioremap.c
33242@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33243 unsigned long i;
33244
33245 for (i = 0; i < nr_pages; ++i)
33246- if (pfn_valid(start_pfn + i) &&
33247- !PageReserved(pfn_to_page(start_pfn + i)))
33248+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33249+ !PageReserved(pfn_to_page(start_pfn + i))))
33250 return 1;
33251
33252 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33253@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33254 *
33255 * Caller must ensure there is only one unmapping for the same pointer.
33256 */
33257-void iounmap(volatile void __iomem *addr)
33258+void iounmap(const volatile void __iomem *addr)
33259 {
33260 struct vm_struct *p, *o;
33261
33262@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33263 */
33264 void *xlate_dev_mem_ptr(phys_addr_t phys)
33265 {
33266- void *addr;
33267- unsigned long start = phys & PAGE_MASK;
33268-
33269 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33270- if (page_is_ram(start >> PAGE_SHIFT))
33271+ if (page_is_ram(phys >> PAGE_SHIFT))
33272+#ifdef CONFIG_HIGHMEM
33273+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33274+#endif
33275 return __va(phys);
33276
33277- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33278- if (addr)
33279- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33280-
33281- return addr;
33282+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33283 }
33284
33285 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33286 {
33287 if (page_is_ram(phys >> PAGE_SHIFT))
33288+#ifdef CONFIG_HIGHMEM
33289+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33290+#endif
33291 return;
33292
33293 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33294 return;
33295 }
33296
33297-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33298+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33299
33300 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33301 {
33302@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33303 early_ioremap_setup();
33304
33305 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33306- memset(bm_pte, 0, sizeof(bm_pte));
33307- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33308+ pmd_populate_user(&init_mm, pmd, bm_pte);
33309
33310 /*
33311 * The boot-ioremap range spans multiple pmds, for which
33312diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33313index b4f2e7e..96c9c3e 100644
33314--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33315+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33316@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33317 * memory (e.g. tracked pages)? For now, we need this to avoid
33318 * invoking kmemcheck for PnP BIOS calls.
33319 */
33320- if (regs->flags & X86_VM_MASK)
33321+ if (v8086_mode(regs))
33322 return false;
33323- if (regs->cs != __KERNEL_CS)
33324+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33325 return false;
33326
33327 pte = kmemcheck_pte_lookup(address);
33328diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33329index df4552b..12c129c 100644
33330--- a/arch/x86/mm/mmap.c
33331+++ b/arch/x86/mm/mmap.c
33332@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33333 * Leave an at least ~128 MB hole with possible stack randomization.
33334 */
33335 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33336-#define MAX_GAP (TASK_SIZE/6*5)
33337+#define MAX_GAP (pax_task_size/6*5)
33338
33339 static int mmap_is_legacy(void)
33340 {
33341@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33342 return rnd << PAGE_SHIFT;
33343 }
33344
33345-static unsigned long mmap_base(void)
33346+static unsigned long mmap_base(struct mm_struct *mm)
33347 {
33348 unsigned long gap = rlimit(RLIMIT_STACK);
33349+ unsigned long pax_task_size = TASK_SIZE;
33350+
33351+#ifdef CONFIG_PAX_SEGMEXEC
33352+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33353+ pax_task_size = SEGMEXEC_TASK_SIZE;
33354+#endif
33355
33356 if (gap < MIN_GAP)
33357 gap = MIN_GAP;
33358 else if (gap > MAX_GAP)
33359 gap = MAX_GAP;
33360
33361- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33362+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33363 }
33364
33365 /*
33366 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33367 * does, but not when emulating X86_32
33368 */
33369-static unsigned long mmap_legacy_base(void)
33370+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33371 {
33372- if (mmap_is_ia32())
33373+ if (mmap_is_ia32()) {
33374+
33375+#ifdef CONFIG_PAX_SEGMEXEC
33376+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33377+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33378+ else
33379+#endif
33380+
33381 return TASK_UNMAPPED_BASE;
33382- else
33383+ } else
33384 return TASK_UNMAPPED_BASE + mmap_rnd();
33385 }
33386
33387@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33388 */
33389 void arch_pick_mmap_layout(struct mm_struct *mm)
33390 {
33391- mm->mmap_legacy_base = mmap_legacy_base();
33392- mm->mmap_base = mmap_base();
33393+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33394+ mm->mmap_base = mmap_base(mm);
33395+
33396+#ifdef CONFIG_PAX_RANDMMAP
33397+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33398+ mm->mmap_legacy_base += mm->delta_mmap;
33399+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33400+ }
33401+#endif
33402
33403 if (mmap_is_legacy()) {
33404 mm->mmap_base = mm->mmap_legacy_base;
33405diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33406index 0057a7a..95c7edd 100644
33407--- a/arch/x86/mm/mmio-mod.c
33408+++ b/arch/x86/mm/mmio-mod.c
33409@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33410 break;
33411 default:
33412 {
33413- unsigned char *ip = (unsigned char *)instptr;
33414+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33415 my_trace->opcode = MMIO_UNKNOWN_OP;
33416 my_trace->width = 0;
33417 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33418@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33419 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33420 void __iomem *addr)
33421 {
33422- static atomic_t next_id;
33423+ static atomic_unchecked_t next_id;
33424 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33425 /* These are page-unaligned. */
33426 struct mmiotrace_map map = {
33427@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33428 .private = trace
33429 },
33430 .phys = offset,
33431- .id = atomic_inc_return(&next_id)
33432+ .id = atomic_inc_return_unchecked(&next_id)
33433 };
33434 map.map_id = trace->id;
33435
33436@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33437 ioremap_trace_core(offset, size, addr);
33438 }
33439
33440-static void iounmap_trace_core(volatile void __iomem *addr)
33441+static void iounmap_trace_core(const volatile void __iomem *addr)
33442 {
33443 struct mmiotrace_map map = {
33444 .phys = 0,
33445@@ -328,7 +328,7 @@ not_enabled:
33446 }
33447 }
33448
33449-void mmiotrace_iounmap(volatile void __iomem *addr)
33450+void mmiotrace_iounmap(const volatile void __iomem *addr)
33451 {
33452 might_sleep();
33453 if (is_enabled()) /* recheck and proper locking in *_core() */
33454diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33455index 1a88370..3f598b5 100644
33456--- a/arch/x86/mm/numa.c
33457+++ b/arch/x86/mm/numa.c
33458@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33459 }
33460 }
33461
33462-static int __init numa_register_memblks(struct numa_meminfo *mi)
33463+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33464 {
33465 unsigned long uninitialized_var(pfn_align);
33466 int i, nid;
33467diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33468index 536ea2f..f42c293 100644
33469--- a/arch/x86/mm/pageattr.c
33470+++ b/arch/x86/mm/pageattr.c
33471@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33472 */
33473 #ifdef CONFIG_PCI_BIOS
33474 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33475- pgprot_val(forbidden) |= _PAGE_NX;
33476+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33477 #endif
33478
33479 /*
33480@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33481 * Does not cover __inittext since that is gone later on. On
33482 * 64bit we do not enforce !NX on the low mapping
33483 */
33484- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33485- pgprot_val(forbidden) |= _PAGE_NX;
33486+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33487+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33488
33489+#ifdef CONFIG_DEBUG_RODATA
33490 /*
33491 * The .rodata section needs to be read-only. Using the pfn
33492 * catches all aliases.
33493@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33494 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33495 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33496 pgprot_val(forbidden) |= _PAGE_RW;
33497+#endif
33498
33499 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33500 /*
33501@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33502 }
33503 #endif
33504
33505+#ifdef CONFIG_PAX_KERNEXEC
33506+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33507+ pgprot_val(forbidden) |= _PAGE_RW;
33508+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33509+ }
33510+#endif
33511+
33512 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33513
33514 return prot;
33515@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33516 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33517 {
33518 /* change init_mm */
33519+ pax_open_kernel();
33520 set_pte_atomic(kpte, pte);
33521+
33522 #ifdef CONFIG_X86_32
33523 if (!SHARED_KERNEL_PMD) {
33524+
33525+#ifdef CONFIG_PAX_PER_CPU_PGD
33526+ unsigned long cpu;
33527+#else
33528 struct page *page;
33529+#endif
33530
33531+#ifdef CONFIG_PAX_PER_CPU_PGD
33532+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33533+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33534+#else
33535 list_for_each_entry(page, &pgd_list, lru) {
33536- pgd_t *pgd;
33537+ pgd_t *pgd = (pgd_t *)page_address(page);
33538+#endif
33539+
33540 pud_t *pud;
33541 pmd_t *pmd;
33542
33543- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33544+ pgd += pgd_index(address);
33545 pud = pud_offset(pgd, address);
33546 pmd = pmd_offset(pud, address);
33547 set_pte_atomic((pte_t *)pmd, pte);
33548 }
33549 }
33550 #endif
33551+ pax_close_kernel();
33552 }
33553
33554 static int
33555diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33556index 7ac6869..c0ba541 100644
33557--- a/arch/x86/mm/pat.c
33558+++ b/arch/x86/mm/pat.c
33559@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33560 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33561
33562 if (pg_flags == _PGMT_DEFAULT)
33563- return -1;
33564+ return _PAGE_CACHE_MODE_NUM;
33565 else if (pg_flags == _PGMT_WC)
33566 return _PAGE_CACHE_MODE_WC;
33567 else if (pg_flags == _PGMT_UC_MINUS)
33568@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33569
33570 page = pfn_to_page(pfn);
33571 type = get_page_memtype(page);
33572- if (type != -1) {
33573+ if (type != _PAGE_CACHE_MODE_NUM) {
33574 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33575 start, end - 1, type, req_type);
33576 if (new_type)
33577@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33578
33579 if (!entry) {
33580 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33581- current->comm, current->pid, start, end - 1);
33582+ current->comm, task_pid_nr(current), start, end - 1);
33583 return -EINVAL;
33584 }
33585
33586@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33587 page = pfn_to_page(paddr >> PAGE_SHIFT);
33588 rettype = get_page_memtype(page);
33589 /*
33590- * -1 from get_page_memtype() implies RAM page is in its
33591+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33592 * default state and not reserved, and hence of type WB
33593 */
33594- if (rettype == -1)
33595+ if (rettype == _PAGE_CACHE_MODE_NUM)
33596 rettype = _PAGE_CACHE_MODE_WB;
33597
33598 return rettype;
33599@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33600
33601 while (cursor < to) {
33602 if (!devmem_is_allowed(pfn)) {
33603- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33604- current->comm, from, to - 1);
33605+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33606+ current->comm, from, to - 1, cursor);
33607 return 0;
33608 }
33609 cursor += PAGE_SIZE;
33610@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33611 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33612 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33613 "for [mem %#010Lx-%#010Lx]\n",
33614- current->comm, current->pid,
33615+ current->comm, task_pid_nr(current),
33616 cattr_name(pcm),
33617 base, (unsigned long long)(base + size-1));
33618 return -EINVAL;
33619@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33620 pcm = lookup_memtype(paddr);
33621 if (want_pcm != pcm) {
33622 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33623- current->comm, current->pid,
33624+ current->comm, task_pid_nr(current),
33625 cattr_name(want_pcm),
33626 (unsigned long long)paddr,
33627 (unsigned long long)(paddr + size - 1),
33628@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33629 free_memtype(paddr, paddr + size);
33630 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33631 " for [mem %#010Lx-%#010Lx], got %s\n",
33632- current->comm, current->pid,
33633+ current->comm, task_pid_nr(current),
33634 cattr_name(want_pcm),
33635 (unsigned long long)paddr,
33636 (unsigned long long)(paddr + size - 1),
33637diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33638index 6582adc..fcc5d0b 100644
33639--- a/arch/x86/mm/pat_rbtree.c
33640+++ b/arch/x86/mm/pat_rbtree.c
33641@@ -161,7 +161,7 @@ success:
33642
33643 failure:
33644 printk(KERN_INFO "%s:%d conflicting memory types "
33645- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33646+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33647 end, cattr_name(found_type), cattr_name(match->type));
33648 return -EBUSY;
33649 }
33650diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33651index 9f0614d..92ae64a 100644
33652--- a/arch/x86/mm/pf_in.c
33653+++ b/arch/x86/mm/pf_in.c
33654@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33655 int i;
33656 enum reason_type rv = OTHERS;
33657
33658- p = (unsigned char *)ins_addr;
33659+ p = (unsigned char *)ktla_ktva(ins_addr);
33660 p += skip_prefix(p, &prf);
33661 p += get_opcode(p, &opcode);
33662
33663@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33664 struct prefix_bits prf;
33665 int i;
33666
33667- p = (unsigned char *)ins_addr;
33668+ p = (unsigned char *)ktla_ktva(ins_addr);
33669 p += skip_prefix(p, &prf);
33670 p += get_opcode(p, &opcode);
33671
33672@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33673 struct prefix_bits prf;
33674 int i;
33675
33676- p = (unsigned char *)ins_addr;
33677+ p = (unsigned char *)ktla_ktva(ins_addr);
33678 p += skip_prefix(p, &prf);
33679 p += get_opcode(p, &opcode);
33680
33681@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33682 struct prefix_bits prf;
33683 int i;
33684
33685- p = (unsigned char *)ins_addr;
33686+ p = (unsigned char *)ktla_ktva(ins_addr);
33687 p += skip_prefix(p, &prf);
33688 p += get_opcode(p, &opcode);
33689 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33690@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33691 struct prefix_bits prf;
33692 int i;
33693
33694- p = (unsigned char *)ins_addr;
33695+ p = (unsigned char *)ktla_ktva(ins_addr);
33696 p += skip_prefix(p, &prf);
33697 p += get_opcode(p, &opcode);
33698 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33699diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33700index 6fb6927..4fc13c0 100644
33701--- a/arch/x86/mm/pgtable.c
33702+++ b/arch/x86/mm/pgtable.c
33703@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33704 list_del(&page->lru);
33705 }
33706
33707-#define UNSHARED_PTRS_PER_PGD \
33708- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33709+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33710+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33711
33712+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33713+{
33714+ unsigned int count = USER_PGD_PTRS;
33715
33716+ if (!pax_user_shadow_base)
33717+ return;
33718+
33719+ while (count--)
33720+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33721+}
33722+#endif
33723+
33724+#ifdef CONFIG_PAX_PER_CPU_PGD
33725+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33726+{
33727+ unsigned int count = USER_PGD_PTRS;
33728+
33729+ while (count--) {
33730+ pgd_t pgd;
33731+
33732+#ifdef CONFIG_X86_64
33733+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33734+#else
33735+ pgd = *src++;
33736+#endif
33737+
33738+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33739+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33740+#endif
33741+
33742+ *dst++ = pgd;
33743+ }
33744+
33745+}
33746+#endif
33747+
33748+#ifdef CONFIG_X86_64
33749+#define pxd_t pud_t
33750+#define pyd_t pgd_t
33751+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33752+#define pgtable_pxd_page_ctor(page) true
33753+#define pgtable_pxd_page_dtor(page)
33754+#define pxd_free(mm, pud) pud_free((mm), (pud))
33755+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33756+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33757+#define PYD_SIZE PGDIR_SIZE
33758+#else
33759+#define pxd_t pmd_t
33760+#define pyd_t pud_t
33761+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33762+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33763+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33764+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33765+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33766+#define pyd_offset(mm, address) pud_offset((mm), (address))
33767+#define PYD_SIZE PUD_SIZE
33768+#endif
33769+
33770+#ifdef CONFIG_PAX_PER_CPU_PGD
33771+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33772+static inline void pgd_dtor(pgd_t *pgd) {}
33773+#else
33774 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33775 {
33776 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33777@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33778 pgd_list_del(pgd);
33779 spin_unlock(&pgd_lock);
33780 }
33781+#endif
33782
33783 /*
33784 * List of all pgd's needed for non-PAE so it can invalidate entries
33785@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33786 * -- nyc
33787 */
33788
33789-#ifdef CONFIG_X86_PAE
33790+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33791 /*
33792 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33793 * updating the top-level pagetable entries to guarantee the
33794@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33795 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33796 * and initialize the kernel pmds here.
33797 */
33798-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33799+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33800
33801 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33802 {
33803@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33804 */
33805 flush_tlb_mm(mm);
33806 }
33807+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33808+#define PREALLOCATED_PXDS USER_PGD_PTRS
33809 #else /* !CONFIG_X86_PAE */
33810
33811 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33812-#define PREALLOCATED_PMDS 0
33813+#define PREALLOCATED_PXDS 0
33814
33815 #endif /* CONFIG_X86_PAE */
33816
33817-static void free_pmds(pmd_t *pmds[])
33818+static void free_pxds(pxd_t *pxds[])
33819 {
33820 int i;
33821
33822- for(i = 0; i < PREALLOCATED_PMDS; i++)
33823- if (pmds[i]) {
33824- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33825- free_page((unsigned long)pmds[i]);
33826+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33827+ if (pxds[i]) {
33828+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33829+ free_page((unsigned long)pxds[i]);
33830 }
33831 }
33832
33833-static int preallocate_pmds(pmd_t *pmds[])
33834+static int preallocate_pxds(pxd_t *pxds[])
33835 {
33836 int i;
33837 bool failed = false;
33838
33839- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33840- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33841- if (!pmd)
33842+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33843+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33844+ if (!pxd)
33845 failed = true;
33846- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33847- free_page((unsigned long)pmd);
33848- pmd = NULL;
33849+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33850+ free_page((unsigned long)pxd);
33851+ pxd = NULL;
33852 failed = true;
33853 }
33854- pmds[i] = pmd;
33855+ pxds[i] = pxd;
33856 }
33857
33858 if (failed) {
33859- free_pmds(pmds);
33860+ free_pxds(pxds);
33861 return -ENOMEM;
33862 }
33863
33864@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33865 * preallocate which never got a corresponding vma will need to be
33866 * freed manually.
33867 */
33868-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33869+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33870 {
33871 int i;
33872
33873- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33874+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33875 pgd_t pgd = pgdp[i];
33876
33877 if (pgd_val(pgd) != 0) {
33878- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33879+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33880
33881- pgdp[i] = native_make_pgd(0);
33882+ set_pgd(pgdp + i, native_make_pgd(0));
33883
33884- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33885- pmd_free(mm, pmd);
33886+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33887+ pxd_free(mm, pxd);
33888 }
33889 }
33890 }
33891
33892-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33893+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33894 {
33895- pud_t *pud;
33896+ pyd_t *pyd;
33897 int i;
33898
33899- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33900+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33901 return;
33902
33903- pud = pud_offset(pgd, 0);
33904-
33905- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33906- pmd_t *pmd = pmds[i];
33907+#ifdef CONFIG_X86_64
33908+ pyd = pyd_offset(mm, 0L);
33909+#else
33910+ pyd = pyd_offset(pgd, 0L);
33911+#endif
33912
33913+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33914+ pxd_t *pxd = pxds[i];
33915 if (i >= KERNEL_PGD_BOUNDARY)
33916- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33917- sizeof(pmd_t) * PTRS_PER_PMD);
33918+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33919+ sizeof(pxd_t) * PTRS_PER_PMD);
33920
33921- pud_populate(mm, pud, pmd);
33922+ pyd_populate(mm, pyd, pxd);
33923 }
33924 }
33925
33926 pgd_t *pgd_alloc(struct mm_struct *mm)
33927 {
33928 pgd_t *pgd;
33929- pmd_t *pmds[PREALLOCATED_PMDS];
33930+ pxd_t *pxds[PREALLOCATED_PXDS];
33931
33932 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33933
33934@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33935
33936 mm->pgd = pgd;
33937
33938- if (preallocate_pmds(pmds) != 0)
33939+ if (preallocate_pxds(pxds) != 0)
33940 goto out_free_pgd;
33941
33942 if (paravirt_pgd_alloc(mm) != 0)
33943- goto out_free_pmds;
33944+ goto out_free_pxds;
33945
33946 /*
33947 * Make sure that pre-populating the pmds is atomic with
33948@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33949 spin_lock(&pgd_lock);
33950
33951 pgd_ctor(mm, pgd);
33952- pgd_prepopulate_pmd(mm, pgd, pmds);
33953+ pgd_prepopulate_pxd(mm, pgd, pxds);
33954
33955 spin_unlock(&pgd_lock);
33956
33957 return pgd;
33958
33959-out_free_pmds:
33960- free_pmds(pmds);
33961+out_free_pxds:
33962+ free_pxds(pxds);
33963 out_free_pgd:
33964 free_page((unsigned long)pgd);
33965 out:
33966@@ -313,7 +380,7 @@ out:
33967
33968 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33969 {
33970- pgd_mop_up_pmds(mm, pgd);
33971+ pgd_mop_up_pxds(mm, pgd);
33972 pgd_dtor(pgd);
33973 paravirt_pgd_free(mm, pgd);
33974 free_page((unsigned long)pgd);
33975diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
33976index 75cc097..79a097f 100644
33977--- a/arch/x86/mm/pgtable_32.c
33978+++ b/arch/x86/mm/pgtable_32.c
33979@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
33980 return;
33981 }
33982 pte = pte_offset_kernel(pmd, vaddr);
33983+
33984+ pax_open_kernel();
33985 if (pte_val(pteval))
33986 set_pte_at(&init_mm, vaddr, pte, pteval);
33987 else
33988 pte_clear(&init_mm, vaddr, pte);
33989+ pax_close_kernel();
33990
33991 /*
33992 * It's enough to flush this one mapping.
33993diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
33994index e666cbb..61788c45 100644
33995--- a/arch/x86/mm/physaddr.c
33996+++ b/arch/x86/mm/physaddr.c
33997@@ -10,7 +10,7 @@
33998 #ifdef CONFIG_X86_64
33999
34000 #ifdef CONFIG_DEBUG_VIRTUAL
34001-unsigned long __phys_addr(unsigned long x)
34002+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34003 {
34004 unsigned long y = x - __START_KERNEL_map;
34005
34006@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34007 #else
34008
34009 #ifdef CONFIG_DEBUG_VIRTUAL
34010-unsigned long __phys_addr(unsigned long x)
34011+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34012 {
34013 unsigned long phys_addr = x - PAGE_OFFSET;
34014 /* VMALLOC_* aren't constants */
34015diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34016index 90555bf..f5f1828 100644
34017--- a/arch/x86/mm/setup_nx.c
34018+++ b/arch/x86/mm/setup_nx.c
34019@@ -5,8 +5,10 @@
34020 #include <asm/pgtable.h>
34021 #include <asm/proto.h>
34022
34023+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34024 static int disable_nx;
34025
34026+#ifndef CONFIG_PAX_PAGEEXEC
34027 /*
34028 * noexec = on|off
34029 *
34030@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34031 return 0;
34032 }
34033 early_param("noexec", noexec_setup);
34034+#endif
34035+
34036+#endif
34037
34038 void x86_configure_nx(void)
34039 {
34040+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34041 if (cpu_has_nx && !disable_nx)
34042 __supported_pte_mask |= _PAGE_NX;
34043 else
34044+#endif
34045 __supported_pte_mask &= ~_PAGE_NX;
34046 }
34047
34048diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34049index ee61c36..e6fedeb 100644
34050--- a/arch/x86/mm/tlb.c
34051+++ b/arch/x86/mm/tlb.c
34052@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34053 BUG();
34054 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34055 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34056+
34057+#ifndef CONFIG_PAX_PER_CPU_PGD
34058 load_cr3(swapper_pg_dir);
34059+#endif
34060+
34061 /*
34062 * This gets called in the idle path where RCU
34063 * functions differently. Tracing normally
34064diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34065new file mode 100644
34066index 0000000..dace51c
34067--- /dev/null
34068+++ b/arch/x86/mm/uderef_64.c
34069@@ -0,0 +1,37 @@
34070+#include <linux/mm.h>
34071+#include <asm/pgtable.h>
34072+#include <asm/uaccess.h>
34073+
34074+#ifdef CONFIG_PAX_MEMORY_UDEREF
34075+/* PaX: due to the special call convention these functions must
34076+ * - remain leaf functions under all configurations,
34077+ * - never be called directly, only dereferenced from the wrappers.
34078+ */
34079+void __pax_open_userland(void)
34080+{
34081+ unsigned int cpu;
34082+
34083+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34084+ return;
34085+
34086+ cpu = raw_get_cpu();
34087+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34088+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34089+ raw_put_cpu_no_resched();
34090+}
34091+EXPORT_SYMBOL(__pax_open_userland);
34092+
34093+void __pax_close_userland(void)
34094+{
34095+ unsigned int cpu;
34096+
34097+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34098+ return;
34099+
34100+ cpu = raw_get_cpu();
34101+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34102+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34103+ raw_put_cpu_no_resched();
34104+}
34105+EXPORT_SYMBOL(__pax_close_userland);
34106+#endif
34107diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34108index 6440221..f84b5c7 100644
34109--- a/arch/x86/net/bpf_jit.S
34110+++ b/arch/x86/net/bpf_jit.S
34111@@ -9,6 +9,7 @@
34112 */
34113 #include <linux/linkage.h>
34114 #include <asm/dwarf2.h>
34115+#include <asm/alternative-asm.h>
34116
34117 /*
34118 * Calling convention :
34119@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34120 jle bpf_slow_path_word
34121 mov (SKBDATA,%rsi),%eax
34122 bswap %eax /* ntohl() */
34123+ pax_force_retaddr
34124 ret
34125
34126 sk_load_half:
34127@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34128 jle bpf_slow_path_half
34129 movzwl (SKBDATA,%rsi),%eax
34130 rol $8,%ax # ntohs()
34131+ pax_force_retaddr
34132 ret
34133
34134 sk_load_byte:
34135@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34136 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34137 jle bpf_slow_path_byte
34138 movzbl (SKBDATA,%rsi),%eax
34139+ pax_force_retaddr
34140 ret
34141
34142 /* rsi contains offset and can be scratched */
34143@@ -90,6 +94,7 @@ bpf_slow_path_word:
34144 js bpf_error
34145 mov - MAX_BPF_STACK + 32(%rbp),%eax
34146 bswap %eax
34147+ pax_force_retaddr
34148 ret
34149
34150 bpf_slow_path_half:
34151@@ -98,12 +103,14 @@ bpf_slow_path_half:
34152 mov - MAX_BPF_STACK + 32(%rbp),%ax
34153 rol $8,%ax
34154 movzwl %ax,%eax
34155+ pax_force_retaddr
34156 ret
34157
34158 bpf_slow_path_byte:
34159 bpf_slow_path_common(1)
34160 js bpf_error
34161 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34162+ pax_force_retaddr
34163 ret
34164
34165 #define sk_negative_common(SIZE) \
34166@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34167 sk_negative_common(4)
34168 mov (%rax), %eax
34169 bswap %eax
34170+ pax_force_retaddr
34171 ret
34172
34173 bpf_slow_path_half_neg:
34174@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34175 mov (%rax),%ax
34176 rol $8,%ax
34177 movzwl %ax,%eax
34178+ pax_force_retaddr
34179 ret
34180
34181 bpf_slow_path_byte_neg:
34182@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34183 .globl sk_load_byte_negative_offset
34184 sk_negative_common(1)
34185 movzbl (%rax), %eax
34186+ pax_force_retaddr
34187 ret
34188
34189 bpf_error:
34190@@ -156,4 +166,5 @@ bpf_error:
34191 mov - MAX_BPF_STACK + 16(%rbp),%r14
34192 mov - MAX_BPF_STACK + 24(%rbp),%r15
34193 leaveq
34194+ pax_force_retaddr
34195 ret
34196diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34197index 9875143..00f6656 100644
34198--- a/arch/x86/net/bpf_jit_comp.c
34199+++ b/arch/x86/net/bpf_jit_comp.c
34200@@ -13,7 +13,11 @@
34201 #include <linux/if_vlan.h>
34202 #include <asm/cacheflush.h>
34203
34204+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34205+int bpf_jit_enable __read_only;
34206+#else
34207 int bpf_jit_enable __read_mostly;
34208+#endif
34209
34210 /*
34211 * assembly code in arch/x86/net/bpf_jit.S
34212@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34213 static void jit_fill_hole(void *area, unsigned int size)
34214 {
34215 /* fill whole space with int3 instructions */
34216+ pax_open_kernel();
34217 memset(area, 0xcc, size);
34218+ pax_close_kernel();
34219 }
34220
34221 struct jit_context {
34222@@ -896,7 +902,9 @@ common_load:
34223 pr_err("bpf_jit_compile fatal error\n");
34224 return -EFAULT;
34225 }
34226+ pax_open_kernel();
34227 memcpy(image + proglen, temp, ilen);
34228+ pax_close_kernel();
34229 }
34230 proglen += ilen;
34231 addrs[i] = proglen;
34232@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34233
34234 if (image) {
34235 bpf_flush_icache(header, image + proglen);
34236- set_memory_ro((unsigned long)header, header->pages);
34237 prog->bpf_func = (void *)image;
34238 prog->jited = true;
34239 }
34240@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34241 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34242 struct bpf_binary_header *header = (void *)addr;
34243
34244- if (!fp->jited)
34245- goto free_filter;
34246+ if (fp->jited)
34247+ bpf_jit_binary_free(header);
34248
34249- set_memory_rw(addr, header->pages);
34250- bpf_jit_binary_free(header);
34251-
34252-free_filter:
34253 bpf_prog_unlock_free(fp);
34254 }
34255diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34256index 5d04be5..2beeaa2 100644
34257--- a/arch/x86/oprofile/backtrace.c
34258+++ b/arch/x86/oprofile/backtrace.c
34259@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34260 struct stack_frame_ia32 *fp;
34261 unsigned long bytes;
34262
34263- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34264+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34265 if (bytes != 0)
34266 return NULL;
34267
34268- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34269+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34270
34271 oprofile_add_trace(bufhead[0].return_address);
34272
34273@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34274 struct stack_frame bufhead[2];
34275 unsigned long bytes;
34276
34277- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34278+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34279 if (bytes != 0)
34280 return NULL;
34281
34282@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34283 {
34284 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34285
34286- if (!user_mode_vm(regs)) {
34287+ if (!user_mode(regs)) {
34288 unsigned long stack = kernel_stack_pointer(regs);
34289 if (depth)
34290 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34291diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34292index 1d2e639..f6ef82a 100644
34293--- a/arch/x86/oprofile/nmi_int.c
34294+++ b/arch/x86/oprofile/nmi_int.c
34295@@ -23,6 +23,7 @@
34296 #include <asm/nmi.h>
34297 #include <asm/msr.h>
34298 #include <asm/apic.h>
34299+#include <asm/pgtable.h>
34300
34301 #include "op_counter.h"
34302 #include "op_x86_model.h"
34303@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34304 if (ret)
34305 return ret;
34306
34307- if (!model->num_virt_counters)
34308- model->num_virt_counters = model->num_counters;
34309+ if (!model->num_virt_counters) {
34310+ pax_open_kernel();
34311+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34312+ pax_close_kernel();
34313+ }
34314
34315 mux_init(ops);
34316
34317diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34318index 50d86c0..7985318 100644
34319--- a/arch/x86/oprofile/op_model_amd.c
34320+++ b/arch/x86/oprofile/op_model_amd.c
34321@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34322 num_counters = AMD64_NUM_COUNTERS;
34323 }
34324
34325- op_amd_spec.num_counters = num_counters;
34326- op_amd_spec.num_controls = num_counters;
34327- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34328+ pax_open_kernel();
34329+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34330+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34331+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34332+ pax_close_kernel();
34333
34334 return 0;
34335 }
34336diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34337index d90528e..0127e2b 100644
34338--- a/arch/x86/oprofile/op_model_ppro.c
34339+++ b/arch/x86/oprofile/op_model_ppro.c
34340@@ -19,6 +19,7 @@
34341 #include <asm/msr.h>
34342 #include <asm/apic.h>
34343 #include <asm/nmi.h>
34344+#include <asm/pgtable.h>
34345
34346 #include "op_x86_model.h"
34347 #include "op_counter.h"
34348@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34349
34350 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34351
34352- op_arch_perfmon_spec.num_counters = num_counters;
34353- op_arch_perfmon_spec.num_controls = num_counters;
34354+ pax_open_kernel();
34355+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34356+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34357+ pax_close_kernel();
34358 }
34359
34360 static int arch_perfmon_init(struct oprofile_operations *ignore)
34361diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34362index 71e8a67..6a313bb 100644
34363--- a/arch/x86/oprofile/op_x86_model.h
34364+++ b/arch/x86/oprofile/op_x86_model.h
34365@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34366 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34367 struct op_msrs const * const msrs);
34368 #endif
34369-};
34370+} __do_const;
34371
34372 struct op_counter_config;
34373
34374diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34375index 44b9271..4c5a988 100644
34376--- a/arch/x86/pci/intel_mid_pci.c
34377+++ b/arch/x86/pci/intel_mid_pci.c
34378@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34379 pci_mmcfg_late_init();
34380 pcibios_enable_irq = intel_mid_pci_irq_enable;
34381 pcibios_disable_irq = intel_mid_pci_irq_disable;
34382- pci_root_ops = intel_mid_pci_ops;
34383+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34384 pci_soc_mode = 1;
34385 /* Continue with standard init */
34386 return 1;
34387diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34388index 5dc6ca5..25c03f5 100644
34389--- a/arch/x86/pci/irq.c
34390+++ b/arch/x86/pci/irq.c
34391@@ -51,7 +51,7 @@ struct irq_router {
34392 struct irq_router_handler {
34393 u16 vendor;
34394 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34395-};
34396+} __do_const;
34397
34398 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34399 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34400@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34401 return 0;
34402 }
34403
34404-static __initdata struct irq_router_handler pirq_routers[] = {
34405+static __initconst const struct irq_router_handler pirq_routers[] = {
34406 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34407 { PCI_VENDOR_ID_AL, ali_router_probe },
34408 { PCI_VENDOR_ID_ITE, ite_router_probe },
34409@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34410 static void __init pirq_find_router(struct irq_router *r)
34411 {
34412 struct irq_routing_table *rt = pirq_table;
34413- struct irq_router_handler *h;
34414+ const struct irq_router_handler *h;
34415
34416 #ifdef CONFIG_PCI_BIOS
34417 if (!rt->signature) {
34418@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34419 return 0;
34420 }
34421
34422-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34423+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34424 {
34425 .callback = fix_broken_hp_bios_irq9,
34426 .ident = "HP Pavilion N5400 Series Laptop",
34427diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34428index 9b83b90..4112152 100644
34429--- a/arch/x86/pci/pcbios.c
34430+++ b/arch/x86/pci/pcbios.c
34431@@ -79,7 +79,7 @@ union bios32 {
34432 static struct {
34433 unsigned long address;
34434 unsigned short segment;
34435-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34436+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34437
34438 /*
34439 * Returns the entry point for the given service, NULL on error
34440@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34441 unsigned long length; /* %ecx */
34442 unsigned long entry; /* %edx */
34443 unsigned long flags;
34444+ struct desc_struct d, *gdt;
34445
34446 local_irq_save(flags);
34447- __asm__("lcall *(%%edi); cld"
34448+
34449+ gdt = get_cpu_gdt_table(smp_processor_id());
34450+
34451+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34452+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34453+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34454+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34455+
34456+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34457 : "=a" (return_code),
34458 "=b" (address),
34459 "=c" (length),
34460 "=d" (entry)
34461 : "0" (service),
34462 "1" (0),
34463- "D" (&bios32_indirect));
34464+ "D" (&bios32_indirect),
34465+ "r"(__PCIBIOS_DS)
34466+ : "memory");
34467+
34468+ pax_open_kernel();
34469+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34470+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34471+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34472+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34473+ pax_close_kernel();
34474+
34475 local_irq_restore(flags);
34476
34477 switch (return_code) {
34478- case 0:
34479- return address + entry;
34480- case 0x80: /* Not present */
34481- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34482- return 0;
34483- default: /* Shouldn't happen */
34484- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34485- service, return_code);
34486+ case 0: {
34487+ int cpu;
34488+ unsigned char flags;
34489+
34490+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34491+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34492+ printk(KERN_WARNING "bios32_service: not valid\n");
34493 return 0;
34494+ }
34495+ address = address + PAGE_OFFSET;
34496+ length += 16UL; /* some BIOSs underreport this... */
34497+ flags = 4;
34498+ if (length >= 64*1024*1024) {
34499+ length >>= PAGE_SHIFT;
34500+ flags |= 8;
34501+ }
34502+
34503+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34504+ gdt = get_cpu_gdt_table(cpu);
34505+ pack_descriptor(&d, address, length, 0x9b, flags);
34506+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34507+ pack_descriptor(&d, address, length, 0x93, flags);
34508+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34509+ }
34510+ return entry;
34511+ }
34512+ case 0x80: /* Not present */
34513+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34514+ return 0;
34515+ default: /* Shouldn't happen */
34516+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34517+ service, return_code);
34518+ return 0;
34519 }
34520 }
34521
34522 static struct {
34523 unsigned long address;
34524 unsigned short segment;
34525-} pci_indirect = { 0, __KERNEL_CS };
34526+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34527
34528-static int pci_bios_present;
34529+static int pci_bios_present __read_only;
34530
34531 static int __init check_pcibios(void)
34532 {
34533@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34534 unsigned long flags, pcibios_entry;
34535
34536 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34537- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34538+ pci_indirect.address = pcibios_entry;
34539
34540 local_irq_save(flags);
34541- __asm__(
34542- "lcall *(%%edi); cld\n\t"
34543+ __asm__("movw %w6, %%ds\n\t"
34544+ "lcall *%%ss:(%%edi); cld\n\t"
34545+ "push %%ss\n\t"
34546+ "pop %%ds\n\t"
34547 "jc 1f\n\t"
34548 "xor %%ah, %%ah\n"
34549 "1:"
34550@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34551 "=b" (ebx),
34552 "=c" (ecx)
34553 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34554- "D" (&pci_indirect)
34555+ "D" (&pci_indirect),
34556+ "r" (__PCIBIOS_DS)
34557 : "memory");
34558 local_irq_restore(flags);
34559
34560@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34561
34562 switch (len) {
34563 case 1:
34564- __asm__("lcall *(%%esi); cld\n\t"
34565+ __asm__("movw %w6, %%ds\n\t"
34566+ "lcall *%%ss:(%%esi); cld\n\t"
34567+ "push %%ss\n\t"
34568+ "pop %%ds\n\t"
34569 "jc 1f\n\t"
34570 "xor %%ah, %%ah\n"
34571 "1:"
34572@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34573 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34574 "b" (bx),
34575 "D" ((long)reg),
34576- "S" (&pci_indirect));
34577+ "S" (&pci_indirect),
34578+ "r" (__PCIBIOS_DS));
34579 /*
34580 * Zero-extend the result beyond 8 bits, do not trust the
34581 * BIOS having done it:
34582@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34583 *value &= 0xff;
34584 break;
34585 case 2:
34586- __asm__("lcall *(%%esi); cld\n\t"
34587+ __asm__("movw %w6, %%ds\n\t"
34588+ "lcall *%%ss:(%%esi); cld\n\t"
34589+ "push %%ss\n\t"
34590+ "pop %%ds\n\t"
34591 "jc 1f\n\t"
34592 "xor %%ah, %%ah\n"
34593 "1:"
34594@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34595 : "1" (PCIBIOS_READ_CONFIG_WORD),
34596 "b" (bx),
34597 "D" ((long)reg),
34598- "S" (&pci_indirect));
34599+ "S" (&pci_indirect),
34600+ "r" (__PCIBIOS_DS));
34601 /*
34602 * Zero-extend the result beyond 16 bits, do not trust the
34603 * BIOS having done it:
34604@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34605 *value &= 0xffff;
34606 break;
34607 case 4:
34608- __asm__("lcall *(%%esi); cld\n\t"
34609+ __asm__("movw %w6, %%ds\n\t"
34610+ "lcall *%%ss:(%%esi); cld\n\t"
34611+ "push %%ss\n\t"
34612+ "pop %%ds\n\t"
34613 "jc 1f\n\t"
34614 "xor %%ah, %%ah\n"
34615 "1:"
34616@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34617 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34618 "b" (bx),
34619 "D" ((long)reg),
34620- "S" (&pci_indirect));
34621+ "S" (&pci_indirect),
34622+ "r" (__PCIBIOS_DS));
34623 break;
34624 }
34625
34626@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34627
34628 switch (len) {
34629 case 1:
34630- __asm__("lcall *(%%esi); cld\n\t"
34631+ __asm__("movw %w6, %%ds\n\t"
34632+ "lcall *%%ss:(%%esi); cld\n\t"
34633+ "push %%ss\n\t"
34634+ "pop %%ds\n\t"
34635 "jc 1f\n\t"
34636 "xor %%ah, %%ah\n"
34637 "1:"
34638@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34639 "c" (value),
34640 "b" (bx),
34641 "D" ((long)reg),
34642- "S" (&pci_indirect));
34643+ "S" (&pci_indirect),
34644+ "r" (__PCIBIOS_DS));
34645 break;
34646 case 2:
34647- __asm__("lcall *(%%esi); cld\n\t"
34648+ __asm__("movw %w6, %%ds\n\t"
34649+ "lcall *%%ss:(%%esi); cld\n\t"
34650+ "push %%ss\n\t"
34651+ "pop %%ds\n\t"
34652 "jc 1f\n\t"
34653 "xor %%ah, %%ah\n"
34654 "1:"
34655@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34656 "c" (value),
34657 "b" (bx),
34658 "D" ((long)reg),
34659- "S" (&pci_indirect));
34660+ "S" (&pci_indirect),
34661+ "r" (__PCIBIOS_DS));
34662 break;
34663 case 4:
34664- __asm__("lcall *(%%esi); cld\n\t"
34665+ __asm__("movw %w6, %%ds\n\t"
34666+ "lcall *%%ss:(%%esi); cld\n\t"
34667+ "push %%ss\n\t"
34668+ "pop %%ds\n\t"
34669 "jc 1f\n\t"
34670 "xor %%ah, %%ah\n"
34671 "1:"
34672@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34673 "c" (value),
34674 "b" (bx),
34675 "D" ((long)reg),
34676- "S" (&pci_indirect));
34677+ "S" (&pci_indirect),
34678+ "r" (__PCIBIOS_DS));
34679 break;
34680 }
34681
34682@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34683
34684 DBG("PCI: Fetching IRQ routing table... ");
34685 __asm__("push %%es\n\t"
34686+ "movw %w8, %%ds\n\t"
34687 "push %%ds\n\t"
34688 "pop %%es\n\t"
34689- "lcall *(%%esi); cld\n\t"
34690+ "lcall *%%ss:(%%esi); cld\n\t"
34691 "pop %%es\n\t"
34692+ "push %%ss\n\t"
34693+ "pop %%ds\n"
34694 "jc 1f\n\t"
34695 "xor %%ah, %%ah\n"
34696 "1:"
34697@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34698 "1" (0),
34699 "D" ((long) &opt),
34700 "S" (&pci_indirect),
34701- "m" (opt)
34702+ "m" (opt),
34703+ "r" (__PCIBIOS_DS)
34704 : "memory");
34705 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34706 if (ret & 0xff00)
34707@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34708 {
34709 int ret;
34710
34711- __asm__("lcall *(%%esi); cld\n\t"
34712+ __asm__("movw %w5, %%ds\n\t"
34713+ "lcall *%%ss:(%%esi); cld\n\t"
34714+ "push %%ss\n\t"
34715+ "pop %%ds\n"
34716 "jc 1f\n\t"
34717 "xor %%ah, %%ah\n"
34718 "1:"
34719@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34720 : "0" (PCIBIOS_SET_PCI_HW_INT),
34721 "b" ((dev->bus->number << 8) | dev->devfn),
34722 "c" ((irq << 8) | (pin + 10)),
34723- "S" (&pci_indirect));
34724+ "S" (&pci_indirect),
34725+ "r" (__PCIBIOS_DS));
34726 return !(ret & 0xff00);
34727 }
34728 EXPORT_SYMBOL(pcibios_set_irq_routing);
34729diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34730index 40e7cda..c7e6672 100644
34731--- a/arch/x86/platform/efi/efi_32.c
34732+++ b/arch/x86/platform/efi/efi_32.c
34733@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34734 {
34735 struct desc_ptr gdt_descr;
34736
34737+#ifdef CONFIG_PAX_KERNEXEC
34738+ struct desc_struct d;
34739+#endif
34740+
34741 local_irq_save(efi_rt_eflags);
34742
34743 load_cr3(initial_page_table);
34744 __flush_tlb_all();
34745
34746+#ifdef CONFIG_PAX_KERNEXEC
34747+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34748+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34749+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34750+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34751+#endif
34752+
34753 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34754 gdt_descr.size = GDT_SIZE - 1;
34755 load_gdt(&gdt_descr);
34756@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34757 {
34758 struct desc_ptr gdt_descr;
34759
34760+#ifdef CONFIG_PAX_KERNEXEC
34761+ struct desc_struct d;
34762+
34763+ memset(&d, 0, sizeof d);
34764+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34765+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34766+#endif
34767+
34768 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34769 gdt_descr.size = GDT_SIZE - 1;
34770 load_gdt(&gdt_descr);
34771
34772+#ifdef CONFIG_PAX_PER_CPU_PGD
34773+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34774+#else
34775 load_cr3(swapper_pg_dir);
34776+#endif
34777+
34778 __flush_tlb_all();
34779
34780 local_irq_restore(efi_rt_eflags);
34781diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34782index 17e80d8..9fa6e41 100644
34783--- a/arch/x86/platform/efi/efi_64.c
34784+++ b/arch/x86/platform/efi/efi_64.c
34785@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34786 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34787 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34788 }
34789+
34790+#ifdef CONFIG_PAX_PER_CPU_PGD
34791+ load_cr3(swapper_pg_dir);
34792+#endif
34793+
34794 __flush_tlb_all();
34795 }
34796
34797@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34798 for (pgd = 0; pgd < n_pgds; pgd++)
34799 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34800 kfree(save_pgd);
34801+
34802+#ifdef CONFIG_PAX_PER_CPU_PGD
34803+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34804+#endif
34805+
34806 __flush_tlb_all();
34807 local_irq_restore(efi_flags);
34808 early_code_mapping_set_exec(0);
34809@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34810 unsigned npages;
34811 pgd_t *pgd;
34812
34813- if (efi_enabled(EFI_OLD_MEMMAP))
34814+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34815+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34816+ * able to execute the EFI services.
34817+ */
34818+ if (__supported_pte_mask & _PAGE_NX) {
34819+ unsigned long addr = (unsigned long) __va(0);
34820+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34821+
34822+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34823+#ifdef CONFIG_PAX_PER_CPU_PGD
34824+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34825+#endif
34826+ set_pgd(pgd_offset_k(addr), pe);
34827+ }
34828+
34829 return 0;
34830+ }
34831
34832 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34833 pgd = __va(efi_scratch.efi_pgt);
34834diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34835index 040192b..7d3300f 100644
34836--- a/arch/x86/platform/efi/efi_stub_32.S
34837+++ b/arch/x86/platform/efi/efi_stub_32.S
34838@@ -6,7 +6,9 @@
34839 */
34840
34841 #include <linux/linkage.h>
34842+#include <linux/init.h>
34843 #include <asm/page_types.h>
34844+#include <asm/segment.h>
34845
34846 /*
34847 * efi_call_phys(void *, ...) is a function with variable parameters.
34848@@ -20,7 +22,7 @@
34849 * service functions will comply with gcc calling convention, too.
34850 */
34851
34852-.text
34853+__INIT
34854 ENTRY(efi_call_phys)
34855 /*
34856 * 0. The function can only be called in Linux kernel. So CS has been
34857@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34858 * The mapping of lower virtual memory has been created in prolog and
34859 * epilog.
34860 */
34861- movl $1f, %edx
34862- subl $__PAGE_OFFSET, %edx
34863- jmp *%edx
34864+#ifdef CONFIG_PAX_KERNEXEC
34865+ movl $(__KERNEXEC_EFI_DS), %edx
34866+ mov %edx, %ds
34867+ mov %edx, %es
34868+ mov %edx, %ss
34869+ addl $2f,(1f)
34870+ ljmp *(1f)
34871+
34872+__INITDATA
34873+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34874+.previous
34875+
34876+2:
34877+ subl $2b,(1b)
34878+#else
34879+ jmp 1f-__PAGE_OFFSET
34880 1:
34881+#endif
34882
34883 /*
34884 * 2. Now on the top of stack is the return
34885@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34886 * parameter 2, ..., param n. To make things easy, we save the return
34887 * address of efi_call_phys in a global variable.
34888 */
34889- popl %edx
34890- movl %edx, saved_return_addr
34891- /* get the function pointer into ECX*/
34892- popl %ecx
34893- movl %ecx, efi_rt_function_ptr
34894- movl $2f, %edx
34895- subl $__PAGE_OFFSET, %edx
34896- pushl %edx
34897+ popl (saved_return_addr)
34898+ popl (efi_rt_function_ptr)
34899
34900 /*
34901 * 3. Clear PG bit in %CR0.
34902@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34903 /*
34904 * 5. Call the physical function.
34905 */
34906- jmp *%ecx
34907+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34908
34909-2:
34910 /*
34911 * 6. After EFI runtime service returns, control will return to
34912 * following instruction. We'd better readjust stack pointer first.
34913@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34914 movl %cr0, %edx
34915 orl $0x80000000, %edx
34916 movl %edx, %cr0
34917- jmp 1f
34918-1:
34919+
34920 /*
34921 * 8. Now restore the virtual mode from flat mode by
34922 * adding EIP with PAGE_OFFSET.
34923 */
34924- movl $1f, %edx
34925- jmp *%edx
34926+#ifdef CONFIG_PAX_KERNEXEC
34927+ movl $(__KERNEL_DS), %edx
34928+ mov %edx, %ds
34929+ mov %edx, %es
34930+ mov %edx, %ss
34931+ ljmp $(__KERNEL_CS),$1f
34932+#else
34933+ jmp 1f+__PAGE_OFFSET
34934+#endif
34935 1:
34936
34937 /*
34938 * 9. Balance the stack. And because EAX contain the return value,
34939 * we'd better not clobber it.
34940 */
34941- leal efi_rt_function_ptr, %edx
34942- movl (%edx), %ecx
34943- pushl %ecx
34944+ pushl (efi_rt_function_ptr)
34945
34946 /*
34947- * 10. Push the saved return address onto the stack and return.
34948+ * 10. Return to the saved return address.
34949 */
34950- leal saved_return_addr, %edx
34951- movl (%edx), %ecx
34952- pushl %ecx
34953- ret
34954+ jmpl *(saved_return_addr)
34955 ENDPROC(efi_call_phys)
34956 .previous
34957
34958-.data
34959+__INITDATA
34960 saved_return_addr:
34961 .long 0
34962 efi_rt_function_ptr:
34963diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34964index 86d0f9e..6d499f4 100644
34965--- a/arch/x86/platform/efi/efi_stub_64.S
34966+++ b/arch/x86/platform/efi/efi_stub_64.S
34967@@ -11,6 +11,7 @@
34968 #include <asm/msr.h>
34969 #include <asm/processor-flags.h>
34970 #include <asm/page_types.h>
34971+#include <asm/alternative-asm.h>
34972
34973 #define SAVE_XMM \
34974 mov %rsp, %rax; \
34975@@ -88,6 +89,7 @@ ENTRY(efi_call)
34976 RESTORE_PGT
34977 addq $48, %rsp
34978 RESTORE_XMM
34979+ pax_force_retaddr 0, 1
34980 ret
34981 ENDPROC(efi_call)
34982
34983diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34984index 1bbedc4..eb795b5 100644
34985--- a/arch/x86/platform/intel-mid/intel-mid.c
34986+++ b/arch/x86/platform/intel-mid/intel-mid.c
34987@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
34988 {
34989 };
34990
34991-static void intel_mid_reboot(void)
34992+static void __noreturn intel_mid_reboot(void)
34993 {
34994 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
34995+ BUG();
34996 }
34997
34998 static unsigned long __init intel_mid_calibrate_tsc(void)
34999diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35000index 3c1c386..59a68ed 100644
35001--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35002+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35003@@ -13,6 +13,6 @@
35004 /* For every CPU addition a new get_<cpuname>_ops interface needs
35005 * to be added.
35006 */
35007-extern void *get_penwell_ops(void);
35008-extern void *get_cloverview_ops(void);
35009-extern void *get_tangier_ops(void);
35010+extern const void *get_penwell_ops(void);
35011+extern const void *get_cloverview_ops(void);
35012+extern const void *get_tangier_ops(void);
35013diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35014index 23381d2..8ddc10e 100644
35015--- a/arch/x86/platform/intel-mid/mfld.c
35016+++ b/arch/x86/platform/intel-mid/mfld.c
35017@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35018 pm_power_off = mfld_power_off;
35019 }
35020
35021-void *get_penwell_ops(void)
35022+const void *get_penwell_ops(void)
35023 {
35024 return &penwell_ops;
35025 }
35026
35027-void *get_cloverview_ops(void)
35028+const void *get_cloverview_ops(void)
35029 {
35030 return &penwell_ops;
35031 }
35032diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35033index aaca917..66eadbc 100644
35034--- a/arch/x86/platform/intel-mid/mrfl.c
35035+++ b/arch/x86/platform/intel-mid/mrfl.c
35036@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35037 .arch_setup = tangier_arch_setup,
35038 };
35039
35040-void *get_tangier_ops(void)
35041+const void *get_tangier_ops(void)
35042 {
35043 return &tangier_ops;
35044 }
35045diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35046index d6ee929..3637cb5 100644
35047--- a/arch/x86/platform/olpc/olpc_dt.c
35048+++ b/arch/x86/platform/olpc/olpc_dt.c
35049@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35050 return res;
35051 }
35052
35053-static struct of_pdt_ops prom_olpc_ops __initdata = {
35054+static struct of_pdt_ops prom_olpc_ops __initconst = {
35055 .nextprop = olpc_dt_nextprop,
35056 .getproplen = olpc_dt_getproplen,
35057 .getproperty = olpc_dt_getproperty,
35058diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35059index 6ec7910..ecdbb11 100644
35060--- a/arch/x86/power/cpu.c
35061+++ b/arch/x86/power/cpu.c
35062@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35063 static void fix_processor_context(void)
35064 {
35065 int cpu = smp_processor_id();
35066- struct tss_struct *t = &per_cpu(init_tss, cpu);
35067-#ifdef CONFIG_X86_64
35068- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35069- tss_desc tss;
35070-#endif
35071+ struct tss_struct *t = init_tss + cpu;
35072+
35073 set_tss_desc(cpu, t); /*
35074 * This just modifies memory; should not be
35075 * necessary. But... This is necessary, because
35076@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35077 */
35078
35079 #ifdef CONFIG_X86_64
35080- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35081- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35082- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35083-
35084 syscall_init(); /* This sets MSR_*STAR and related */
35085 #endif
35086 load_TR_desc(); /* This does ltr */
35087diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35088index bad628a..a102610 100644
35089--- a/arch/x86/realmode/init.c
35090+++ b/arch/x86/realmode/init.c
35091@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35092 __va(real_mode_header->trampoline_header);
35093
35094 #ifdef CONFIG_X86_32
35095- trampoline_header->start = __pa_symbol(startup_32_smp);
35096+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35097+
35098+#ifdef CONFIG_PAX_KERNEXEC
35099+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35100+#endif
35101+
35102+ trampoline_header->boot_cs = __BOOT_CS;
35103 trampoline_header->gdt_limit = __BOOT_DS + 7;
35104 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35105 #else
35106@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35107 *trampoline_cr4_features = read_cr4();
35108
35109 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35110- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35111+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35112 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35113 #endif
35114 }
35115diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35116index 7c0d7be..d24dc88 100644
35117--- a/arch/x86/realmode/rm/Makefile
35118+++ b/arch/x86/realmode/rm/Makefile
35119@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35120
35121 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35122 -I$(srctree)/arch/x86/boot
35123+ifdef CONSTIFY_PLUGIN
35124+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35125+endif
35126 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35127 GCOV_PROFILE := n
35128diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35129index a28221d..93c40f1 100644
35130--- a/arch/x86/realmode/rm/header.S
35131+++ b/arch/x86/realmode/rm/header.S
35132@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35133 #endif
35134 /* APM/BIOS reboot */
35135 .long pa_machine_real_restart_asm
35136-#ifdef CONFIG_X86_64
35137+#ifdef CONFIG_X86_32
35138+ .long __KERNEL_CS
35139+#else
35140 .long __KERNEL32_CS
35141 #endif
35142 END(real_mode_header)
35143diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35144index 48ddd76..c26749f 100644
35145--- a/arch/x86/realmode/rm/trampoline_32.S
35146+++ b/arch/x86/realmode/rm/trampoline_32.S
35147@@ -24,6 +24,12 @@
35148 #include <asm/page_types.h>
35149 #include "realmode.h"
35150
35151+#ifdef CONFIG_PAX_KERNEXEC
35152+#define ta(X) (X)
35153+#else
35154+#define ta(X) (pa_ ## X)
35155+#endif
35156+
35157 .text
35158 .code16
35159
35160@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35161
35162 cli # We should be safe anyway
35163
35164- movl tr_start, %eax # where we need to go
35165-
35166 movl $0xA5A5A5A5, trampoline_status
35167 # write marker for master knows we're running
35168
35169@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35170 movw $1, %dx # protected mode (PE) bit
35171 lmsw %dx # into protected mode
35172
35173- ljmpl $__BOOT_CS, $pa_startup_32
35174+ ljmpl *(trampoline_header)
35175
35176 .section ".text32","ax"
35177 .code32
35178@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35179 .balign 8
35180 GLOBAL(trampoline_header)
35181 tr_start: .space 4
35182- tr_gdt_pad: .space 2
35183+ tr_boot_cs: .space 2
35184 tr_gdt: .space 6
35185 END(trampoline_header)
35186
35187diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35188index dac7b20..72dbaca 100644
35189--- a/arch/x86/realmode/rm/trampoline_64.S
35190+++ b/arch/x86/realmode/rm/trampoline_64.S
35191@@ -93,6 +93,7 @@ ENTRY(startup_32)
35192 movl %edx, %gs
35193
35194 movl pa_tr_cr4, %eax
35195+ andl $~X86_CR4_PCIDE, %eax
35196 movl %eax, %cr4 # Enable PAE mode
35197
35198 # Setup trampoline 4 level pagetables
35199@@ -106,7 +107,7 @@ ENTRY(startup_32)
35200 wrmsr
35201
35202 # Enable paging and in turn activate Long Mode
35203- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35204+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35205 movl %eax, %cr0
35206
35207 /*
35208diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35209index 9e7e147..25a4158 100644
35210--- a/arch/x86/realmode/rm/wakeup_asm.S
35211+++ b/arch/x86/realmode/rm/wakeup_asm.S
35212@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35213 lgdtl pmode_gdt
35214
35215 /* This really couldn't... */
35216- movl pmode_entry, %eax
35217 movl pmode_cr0, %ecx
35218 movl %ecx, %cr0
35219- ljmpl $__KERNEL_CS, $pa_startup_32
35220- /* -> jmp *%eax in trampoline_32.S */
35221+
35222+ ljmpl *pmode_entry
35223 #else
35224 jmp trampoline_start
35225 #endif
35226diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35227index 604a37e..e49702a 100644
35228--- a/arch/x86/tools/Makefile
35229+++ b/arch/x86/tools/Makefile
35230@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35231
35232 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35233
35234-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35235+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35236 hostprogs-y += relocs
35237 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35238 PHONY += relocs
35239diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35240index 0c2fae8..88036b7 100644
35241--- a/arch/x86/tools/relocs.c
35242+++ b/arch/x86/tools/relocs.c
35243@@ -1,5 +1,7 @@
35244 /* This is included from relocs_32/64.c */
35245
35246+#include "../../../include/generated/autoconf.h"
35247+
35248 #define ElfW(type) _ElfW(ELF_BITS, type)
35249 #define _ElfW(bits, type) __ElfW(bits, type)
35250 #define __ElfW(bits, type) Elf##bits##_##type
35251@@ -11,6 +13,7 @@
35252 #define Elf_Sym ElfW(Sym)
35253
35254 static Elf_Ehdr ehdr;
35255+static Elf_Phdr *phdr;
35256
35257 struct relocs {
35258 uint32_t *offset;
35259@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35260 }
35261 }
35262
35263+static void read_phdrs(FILE *fp)
35264+{
35265+ unsigned int i;
35266+
35267+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35268+ if (!phdr) {
35269+ die("Unable to allocate %d program headers\n",
35270+ ehdr.e_phnum);
35271+ }
35272+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35273+ die("Seek to %d failed: %s\n",
35274+ ehdr.e_phoff, strerror(errno));
35275+ }
35276+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35277+ die("Cannot read ELF program headers: %s\n",
35278+ strerror(errno));
35279+ }
35280+ for(i = 0; i < ehdr.e_phnum; i++) {
35281+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35282+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35283+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35284+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35285+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35286+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35287+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35288+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35289+ }
35290+
35291+}
35292+
35293 static void read_shdrs(FILE *fp)
35294 {
35295- int i;
35296+ unsigned int i;
35297 Elf_Shdr shdr;
35298
35299 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35300@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35301
35302 static void read_strtabs(FILE *fp)
35303 {
35304- int i;
35305+ unsigned int i;
35306 for (i = 0; i < ehdr.e_shnum; i++) {
35307 struct section *sec = &secs[i];
35308 if (sec->shdr.sh_type != SHT_STRTAB) {
35309@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35310
35311 static void read_symtabs(FILE *fp)
35312 {
35313- int i,j;
35314+ unsigned int i,j;
35315 for (i = 0; i < ehdr.e_shnum; i++) {
35316 struct section *sec = &secs[i];
35317 if (sec->shdr.sh_type != SHT_SYMTAB) {
35318@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35319 }
35320
35321
35322-static void read_relocs(FILE *fp)
35323+static void read_relocs(FILE *fp, int use_real_mode)
35324 {
35325- int i,j;
35326+ unsigned int i,j;
35327+ uint32_t base;
35328+
35329 for (i = 0; i < ehdr.e_shnum; i++) {
35330 struct section *sec = &secs[i];
35331 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35332@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35333 die("Cannot read symbol table: %s\n",
35334 strerror(errno));
35335 }
35336+ base = 0;
35337+
35338+#ifdef CONFIG_X86_32
35339+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35340+ if (phdr[j].p_type != PT_LOAD )
35341+ continue;
35342+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35343+ continue;
35344+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35345+ break;
35346+ }
35347+#endif
35348+
35349 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35350 Elf_Rel *rel = &sec->reltab[j];
35351- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35352+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35353 rel->r_info = elf_xword_to_cpu(rel->r_info);
35354 #if (SHT_REL_TYPE == SHT_RELA)
35355 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35356@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35357
35358 static void print_absolute_symbols(void)
35359 {
35360- int i;
35361+ unsigned int i;
35362 const char *format;
35363
35364 if (ELF_BITS == 64)
35365@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35366 for (i = 0; i < ehdr.e_shnum; i++) {
35367 struct section *sec = &secs[i];
35368 char *sym_strtab;
35369- int j;
35370+ unsigned int j;
35371
35372 if (sec->shdr.sh_type != SHT_SYMTAB) {
35373 continue;
35374@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35375
35376 static void print_absolute_relocs(void)
35377 {
35378- int i, printed = 0;
35379+ unsigned int i, printed = 0;
35380 const char *format;
35381
35382 if (ELF_BITS == 64)
35383@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35384 struct section *sec_applies, *sec_symtab;
35385 char *sym_strtab;
35386 Elf_Sym *sh_symtab;
35387- int j;
35388+ unsigned int j;
35389 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35390 continue;
35391 }
35392@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35393 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35394 Elf_Sym *sym, const char *symname))
35395 {
35396- int i;
35397+ unsigned int i;
35398 /* Walk through the relocations */
35399 for (i = 0; i < ehdr.e_shnum; i++) {
35400 char *sym_strtab;
35401 Elf_Sym *sh_symtab;
35402 struct section *sec_applies, *sec_symtab;
35403- int j;
35404+ unsigned int j;
35405 struct section *sec = &secs[i];
35406
35407 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35408@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35409 {
35410 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35411 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35412+ char *sym_strtab = sec->link->link->strtab;
35413+
35414+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35415+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35416+ return 0;
35417+
35418+#ifdef CONFIG_PAX_KERNEXEC
35419+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35420+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35421+ return 0;
35422+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35423+ return 0;
35424+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35425+ return 0;
35426+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35427+ return 0;
35428+#endif
35429
35430 switch (r_type) {
35431 case R_386_NONE:
35432@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35433
35434 static void emit_relocs(int as_text, int use_real_mode)
35435 {
35436- int i;
35437+ unsigned int i;
35438 int (*write_reloc)(uint32_t, FILE *) = write32;
35439 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35440 const char *symname);
35441@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35442 {
35443 regex_init(use_real_mode);
35444 read_ehdr(fp);
35445+ read_phdrs(fp);
35446 read_shdrs(fp);
35447 read_strtabs(fp);
35448 read_symtabs(fp);
35449- read_relocs(fp);
35450+ read_relocs(fp, use_real_mode);
35451 if (ELF_BITS == 64)
35452 percpu_init();
35453 if (show_absolute_syms) {
35454diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35455index f40281e..92728c9 100644
35456--- a/arch/x86/um/mem_32.c
35457+++ b/arch/x86/um/mem_32.c
35458@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35459 gate_vma.vm_start = FIXADDR_USER_START;
35460 gate_vma.vm_end = FIXADDR_USER_END;
35461 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35462- gate_vma.vm_page_prot = __P101;
35463+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35464
35465 return 0;
35466 }
35467diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35468index 80ffa5b..a33bd15 100644
35469--- a/arch/x86/um/tls_32.c
35470+++ b/arch/x86/um/tls_32.c
35471@@ -260,7 +260,7 @@ out:
35472 if (unlikely(task == current &&
35473 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35474 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35475- "without flushed TLS.", current->pid);
35476+ "without flushed TLS.", task_pid_nr(current));
35477 }
35478
35479 return 0;
35480diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35481index 5a4affe..9e2d522 100644
35482--- a/arch/x86/vdso/Makefile
35483+++ b/arch/x86/vdso/Makefile
35484@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
35485 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35486 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35487
35488-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35489+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35490 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35491 GCOV_PROFILE := n
35492
35493diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35494index 0224987..c7d65a5 100644
35495--- a/arch/x86/vdso/vdso2c.h
35496+++ b/arch/x86/vdso/vdso2c.h
35497@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35498 unsigned long load_size = -1; /* Work around bogus warning */
35499 unsigned long mapping_size;
35500 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35501- int i;
35502+ unsigned int i;
35503 unsigned long j;
35504 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35505 *alt_sec = NULL;
35506diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35507index e904c27..b9eaa03 100644
35508--- a/arch/x86/vdso/vdso32-setup.c
35509+++ b/arch/x86/vdso/vdso32-setup.c
35510@@ -14,6 +14,7 @@
35511 #include <asm/cpufeature.h>
35512 #include <asm/processor.h>
35513 #include <asm/vdso.h>
35514+#include <asm/mman.h>
35515
35516 #ifdef CONFIG_COMPAT_VDSO
35517 #define VDSO_DEFAULT 0
35518diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35519index 1c9f750..cfddb1a 100644
35520--- a/arch/x86/vdso/vma.c
35521+++ b/arch/x86/vdso/vma.c
35522@@ -19,10 +19,7 @@
35523 #include <asm/page.h>
35524 #include <asm/hpet.h>
35525 #include <asm/desc.h>
35526-
35527-#if defined(CONFIG_X86_64)
35528-unsigned int __read_mostly vdso64_enabled = 1;
35529-#endif
35530+#include <asm/mman.h>
35531
35532 void __init init_vdso_image(const struct vdso_image *image)
35533 {
35534@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35535 .pages = no_pages,
35536 };
35537
35538+#ifdef CONFIG_PAX_RANDMMAP
35539+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35540+ calculate_addr = false;
35541+#endif
35542+
35543 if (calculate_addr) {
35544 addr = vdso_addr(current->mm->start_stack,
35545 image->size - image->sym_vvar_start);
35546@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35547 down_write(&mm->mmap_sem);
35548
35549 addr = get_unmapped_area(NULL, addr,
35550- image->size - image->sym_vvar_start, 0, 0);
35551+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35552 if (IS_ERR_VALUE(addr)) {
35553 ret = addr;
35554 goto up_fail;
35555 }
35556
35557 text_start = addr - image->sym_vvar_start;
35558- current->mm->context.vdso = (void __user *)text_start;
35559+ mm->context.vdso = text_start;
35560
35561 /*
35562 * MAYWRITE to allow gdb to COW and set breakpoints
35563@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35564 hpet_address >> PAGE_SHIFT,
35565 PAGE_SIZE,
35566 pgprot_noncached(PAGE_READONLY));
35567-
35568- if (ret)
35569- goto up_fail;
35570 }
35571 #endif
35572
35573 up_fail:
35574 if (ret)
35575- current->mm->context.vdso = NULL;
35576+ current->mm->context.vdso = 0;
35577
35578 up_write(&mm->mmap_sem);
35579 return ret;
35580@@ -191,8 +190,8 @@ static int load_vdso32(void)
35581
35582 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35583 current_thread_info()->sysenter_return =
35584- current->mm->context.vdso +
35585- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35586+ (void __force_user *)(current->mm->context.vdso +
35587+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35588
35589 return 0;
35590 }
35591@@ -201,9 +200,6 @@ static int load_vdso32(void)
35592 #ifdef CONFIG_X86_64
35593 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35594 {
35595- if (!vdso64_enabled)
35596- return 0;
35597-
35598 return map_vdso(&vdso_image_64, true);
35599 }
35600
35601@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35602 int uses_interp)
35603 {
35604 #ifdef CONFIG_X86_X32_ABI
35605- if (test_thread_flag(TIF_X32)) {
35606- if (!vdso64_enabled)
35607- return 0;
35608-
35609+ if (test_thread_flag(TIF_X32))
35610 return map_vdso(&vdso_image_x32, true);
35611- }
35612 #endif
35613
35614 return load_vdso32();
35615@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35616 #endif
35617
35618 #ifdef CONFIG_X86_64
35619-static __init int vdso_setup(char *s)
35620-{
35621- vdso64_enabled = simple_strtoul(s, NULL, 0);
35622- return 0;
35623-}
35624-__setup("vdso=", vdso_setup);
35625-#endif
35626-
35627-#ifdef CONFIG_X86_64
35628 static void vgetcpu_cpu_init(void *arg)
35629 {
35630 int cpu = smp_processor_id();
35631diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35632index e88fda8..76ce7ce 100644
35633--- a/arch/x86/xen/Kconfig
35634+++ b/arch/x86/xen/Kconfig
35635@@ -9,6 +9,7 @@ config XEN
35636 select XEN_HAVE_PVMMU
35637 depends on X86_64 || (X86_32 && X86_PAE)
35638 depends on X86_TSC
35639+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35640 help
35641 This is the Linux Xen port. Enabling this will allow the
35642 kernel to boot in a paravirtualized environment under the
35643diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35644index 78a881b..9994bbb 100644
35645--- a/arch/x86/xen/enlighten.c
35646+++ b/arch/x86/xen/enlighten.c
35647@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35648
35649 struct shared_info xen_dummy_shared_info;
35650
35651-void *xen_initial_gdt;
35652-
35653 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35654 __read_mostly int xen_have_vector_callback;
35655 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35656@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35657 {
35658 unsigned long va = dtr->address;
35659 unsigned int size = dtr->size + 1;
35660- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35661- unsigned long frames[pages];
35662+ unsigned long frames[65536 / PAGE_SIZE];
35663 int f;
35664
35665 /*
35666@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35667 {
35668 unsigned long va = dtr->address;
35669 unsigned int size = dtr->size + 1;
35670- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35671- unsigned long frames[pages];
35672+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35673 int f;
35674
35675 /*
35676@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35677 * 8-byte entries, or 16 4k pages..
35678 */
35679
35680- BUG_ON(size > 65536);
35681+ BUG_ON(size > GDT_SIZE);
35682 BUG_ON(va & ~PAGE_MASK);
35683
35684 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35685@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35686 return 0;
35687 }
35688
35689-static void set_xen_basic_apic_ops(void)
35690+static void __init set_xen_basic_apic_ops(void)
35691 {
35692 apic->read = xen_apic_read;
35693 apic->write = xen_apic_write;
35694@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35695 #endif
35696 };
35697
35698-static void xen_reboot(int reason)
35699+static __noreturn void xen_reboot(int reason)
35700 {
35701 struct sched_shutdown r = { .reason = reason };
35702
35703- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35704- BUG();
35705+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35706+ BUG();
35707 }
35708
35709-static void xen_restart(char *msg)
35710+static __noreturn void xen_restart(char *msg)
35711 {
35712 xen_reboot(SHUTDOWN_reboot);
35713 }
35714
35715-static void xen_emergency_restart(void)
35716+static __noreturn void xen_emergency_restart(void)
35717 {
35718 xen_reboot(SHUTDOWN_reboot);
35719 }
35720
35721-static void xen_machine_halt(void)
35722+static __noreturn void xen_machine_halt(void)
35723 {
35724 xen_reboot(SHUTDOWN_poweroff);
35725 }
35726
35727-static void xen_machine_power_off(void)
35728+static __noreturn void xen_machine_power_off(void)
35729 {
35730 if (pm_power_off)
35731 pm_power_off();
35732@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
35733 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35734 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35735
35736- setup_stack_canary_segment(0);
35737- switch_to_new_gdt(0);
35738+ setup_stack_canary_segment(cpu);
35739+#ifdef CONFIG_X86_64
35740+ load_percpu_segment(cpu);
35741+#endif
35742+ switch_to_new_gdt(cpu);
35743
35744 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35745 pv_cpu_ops.load_gdt = xen_load_gdt;
35746@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35747 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35748
35749 /* Work out if we support NX */
35750- x86_configure_nx();
35751+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35752+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35753+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35754+ unsigned l, h;
35755+
35756+ __supported_pte_mask |= _PAGE_NX;
35757+ rdmsr(MSR_EFER, l, h);
35758+ l |= EFER_NX;
35759+ wrmsr(MSR_EFER, l, h);
35760+ }
35761+#endif
35762
35763 /* Get mfn list */
35764 xen_build_dynamic_phys_to_machine();
35765@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35766
35767 machine_ops = xen_machine_ops;
35768
35769- /*
35770- * The only reliable way to retain the initial address of the
35771- * percpu gdt_page is to remember it here, so we can go and
35772- * mark it RW later, when the initial percpu area is freed.
35773- */
35774- xen_initial_gdt = &per_cpu(gdt_page, 0);
35775-
35776 xen_smp_init();
35777
35778 #ifdef CONFIG_ACPI_NUMA
35779diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35780index 5c1f9ac..0e15f5c 100644
35781--- a/arch/x86/xen/mmu.c
35782+++ b/arch/x86/xen/mmu.c
35783@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35784 return val;
35785 }
35786
35787-static pteval_t pte_pfn_to_mfn(pteval_t val)
35788+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35789 {
35790 if (val & _PAGE_PRESENT) {
35791 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35792@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35793 * L3_k[511] -> level2_fixmap_pgt */
35794 convert_pfn_mfn(level3_kernel_pgt);
35795
35796+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35797+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35798+ convert_pfn_mfn(level3_vmemmap_pgt);
35799 /* L3_k[511][506] -> level1_fixmap_pgt */
35800+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35801 convert_pfn_mfn(level2_fixmap_pgt);
35802 }
35803 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35804@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35805 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35806 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35807 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35808+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35809+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35810+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35811 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35812 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35813+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35814 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35815 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35816 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35817+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35818
35819 /* Pin down new L4 */
35820 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35821@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
35822 pv_mmu_ops.set_pud = xen_set_pud;
35823 #if PAGETABLE_LEVELS == 4
35824 pv_mmu_ops.set_pgd = xen_set_pgd;
35825+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35826 #endif
35827
35828 /* This will work as long as patching hasn't happened yet
35829@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35830 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35831 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35832 .set_pgd = xen_set_pgd_hyper,
35833+ .set_pgd_batched = xen_set_pgd_hyper,
35834
35835 .alloc_pud = xen_alloc_pmd_init,
35836 .release_pud = xen_release_pmd_init,
35837diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35838index 4c071ae..00e7049 100644
35839--- a/arch/x86/xen/smp.c
35840+++ b/arch/x86/xen/smp.c
35841@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35842
35843 if (xen_pv_domain()) {
35844 if (!xen_feature(XENFEAT_writable_page_tables))
35845- /* We've switched to the "real" per-cpu gdt, so make
35846- * sure the old memory can be recycled. */
35847- make_lowmem_page_readwrite(xen_initial_gdt);
35848-
35849 #ifdef CONFIG_X86_32
35850 /*
35851 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35852 * expects __USER_DS
35853 */
35854- loadsegment(ds, __USER_DS);
35855- loadsegment(es, __USER_DS);
35856+ loadsegment(ds, __KERNEL_DS);
35857+ loadsegment(es, __KERNEL_DS);
35858 #endif
35859
35860 xen_filter_cpu_maps();
35861@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35862 #ifdef CONFIG_X86_32
35863 /* Note: PVH is not yet supported on x86_32. */
35864 ctxt->user_regs.fs = __KERNEL_PERCPU;
35865- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35866+ savesegment(gs, ctxt->user_regs.gs);
35867 #endif
35868 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35869
35870@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35871 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35872 ctxt->flags = VGCF_IN_KERNEL;
35873 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35874- ctxt->user_regs.ds = __USER_DS;
35875- ctxt->user_regs.es = __USER_DS;
35876+ ctxt->user_regs.ds = __KERNEL_DS;
35877+ ctxt->user_regs.es = __KERNEL_DS;
35878 ctxt->user_regs.ss = __KERNEL_DS;
35879
35880 xen_copy_trap_info(ctxt->trap_ctxt);
35881@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35882 int rc;
35883
35884 per_cpu(current_task, cpu) = idle;
35885+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35886 #ifdef CONFIG_X86_32
35887 irq_ctx_init(cpu);
35888 #else
35889 clear_tsk_thread_flag(idle, TIF_FORK);
35890 #endif
35891- per_cpu(kernel_stack, cpu) =
35892- (unsigned long)task_stack_page(idle) -
35893- KERNEL_STACK_OFFSET + THREAD_SIZE;
35894+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35895
35896 xen_setup_runstate_info(cpu);
35897 xen_setup_timer(cpu);
35898@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35899
35900 void __init xen_smp_init(void)
35901 {
35902- smp_ops = xen_smp_ops;
35903+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35904 xen_fill_possible_map();
35905 }
35906
35907diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35908index fd92a64..1f72641 100644
35909--- a/arch/x86/xen/xen-asm_32.S
35910+++ b/arch/x86/xen/xen-asm_32.S
35911@@ -99,7 +99,7 @@ ENTRY(xen_iret)
35912 pushw %fs
35913 movl $(__KERNEL_PERCPU), %eax
35914 movl %eax, %fs
35915- movl %fs:xen_vcpu, %eax
35916+ mov PER_CPU_VAR(xen_vcpu), %eax
35917 POP_FS
35918 #else
35919 movl %ss:xen_vcpu, %eax
35920diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35921index 674b2225..f1f5dc1 100644
35922--- a/arch/x86/xen/xen-head.S
35923+++ b/arch/x86/xen/xen-head.S
35924@@ -39,6 +39,17 @@ ENTRY(startup_xen)
35925 #ifdef CONFIG_X86_32
35926 mov %esi,xen_start_info
35927 mov $init_thread_union+THREAD_SIZE,%esp
35928+#ifdef CONFIG_SMP
35929+ movl $cpu_gdt_table,%edi
35930+ movl $__per_cpu_load,%eax
35931+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35932+ rorl $16,%eax
35933+ movb %al,__KERNEL_PERCPU + 4(%edi)
35934+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35935+ movl $__per_cpu_end - 1,%eax
35936+ subl $__per_cpu_start,%eax
35937+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35938+#endif
35939 #else
35940 mov %rsi,xen_start_info
35941 mov $init_thread_union+THREAD_SIZE,%rsp
35942diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35943index 5686bd9..0c8b6ee 100644
35944--- a/arch/x86/xen/xen-ops.h
35945+++ b/arch/x86/xen/xen-ops.h
35946@@ -10,8 +10,6 @@
35947 extern const char xen_hypervisor_callback[];
35948 extern const char xen_failsafe_callback[];
35949
35950-extern void *xen_initial_gdt;
35951-
35952 struct trap_info;
35953 void xen_copy_trap_info(struct trap_info *traps);
35954
35955diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35956index 525bd3d..ef888b1 100644
35957--- a/arch/xtensa/variants/dc232b/include/variant/core.h
35958+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35959@@ -119,9 +119,9 @@
35960 ----------------------------------------------------------------------*/
35961
35962 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35963-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35964 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35965 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35966+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35967
35968 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35969 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35970diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35971index 2f33760..835e50a 100644
35972--- a/arch/xtensa/variants/fsf/include/variant/core.h
35973+++ b/arch/xtensa/variants/fsf/include/variant/core.h
35974@@ -11,6 +11,7 @@
35975 #ifndef _XTENSA_CORE_H
35976 #define _XTENSA_CORE_H
35977
35978+#include <linux/const.h>
35979
35980 /****************************************************************************
35981 Parameters Useful for Any Code, USER or PRIVILEGED
35982@@ -112,9 +113,9 @@
35983 ----------------------------------------------------------------------*/
35984
35985 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35986-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35987 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35988 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35989+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35990
35991 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
35992 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
35993diff --git a/block/bio.c b/block/bio.c
35994index 471d738..bd3da0d 100644
35995--- a/block/bio.c
35996+++ b/block/bio.c
35997@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
35998 /*
35999 * Overflow, abort
36000 */
36001- if (end < start)
36002+ if (end < start || end - start > INT_MAX - nr_pages)
36003 return ERR_PTR(-EINVAL);
36004
36005 nr_pages += end - start;
36006@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
36007 /*
36008 * Overflow, abort
36009 */
36010- if (end < start)
36011+ if (end < start || end - start > INT_MAX - nr_pages)
36012 return ERR_PTR(-EINVAL);
36013
36014 nr_pages += end - start;
36015@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36016 const int read = bio_data_dir(bio) == READ;
36017 struct bio_map_data *bmd = bio->bi_private;
36018 int i;
36019- char *p = bmd->sgvecs[0].iov_base;
36020+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36021
36022 bio_for_each_segment_all(bvec, bio, i) {
36023 char *addr = page_address(bvec->bv_page);
36024diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36025index 0736729..2ec3b48 100644
36026--- a/block/blk-iopoll.c
36027+++ b/block/blk-iopoll.c
36028@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36029 }
36030 EXPORT_SYMBOL(blk_iopoll_complete);
36031
36032-static void blk_iopoll_softirq(struct softirq_action *h)
36033+static __latent_entropy void blk_iopoll_softirq(void)
36034 {
36035 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36036 int rearm = 0, budget = blk_iopoll_budget;
36037diff --git a/block/blk-map.c b/block/blk-map.c
36038index f890d43..97b0482 100644
36039--- a/block/blk-map.c
36040+++ b/block/blk-map.c
36041@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36042 if (!len || !kbuf)
36043 return -EINVAL;
36044
36045- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36046+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36047 if (do_copy)
36048 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36049 else
36050diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36051index 53b1737..08177d2e 100644
36052--- a/block/blk-softirq.c
36053+++ b/block/blk-softirq.c
36054@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36055 * Softirq action handler - move entries to local list and loop over them
36056 * while passing them to the queue registered handler.
36057 */
36058-static void blk_done_softirq(struct softirq_action *h)
36059+static __latent_entropy void blk_done_softirq(void)
36060 {
36061 struct list_head *cpu_list, local_list;
36062
36063diff --git a/block/bsg.c b/block/bsg.c
36064index 276e869..6fe4c61 100644
36065--- a/block/bsg.c
36066+++ b/block/bsg.c
36067@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36068 struct sg_io_v4 *hdr, struct bsg_device *bd,
36069 fmode_t has_write_perm)
36070 {
36071+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36072+ unsigned char *cmdptr;
36073+
36074 if (hdr->request_len > BLK_MAX_CDB) {
36075 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36076 if (!rq->cmd)
36077 return -ENOMEM;
36078- }
36079+ cmdptr = rq->cmd;
36080+ } else
36081+ cmdptr = tmpcmd;
36082
36083- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36084+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36085 hdr->request_len))
36086 return -EFAULT;
36087
36088+ if (cmdptr != rq->cmd)
36089+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36090+
36091 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36092 if (blk_verify_command(rq->cmd, has_write_perm))
36093 return -EPERM;
36094diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36095index f678c73..f35aa18 100644
36096--- a/block/compat_ioctl.c
36097+++ b/block/compat_ioctl.c
36098@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36099 cgc = compat_alloc_user_space(sizeof(*cgc));
36100 cgc32 = compat_ptr(arg);
36101
36102- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36103+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36104 get_user(data, &cgc32->buffer) ||
36105 put_user(compat_ptr(data), &cgc->buffer) ||
36106 copy_in_user(&cgc->buflen, &cgc32->buflen,
36107@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36108 err |= __get_user(f->spec1, &uf->spec1);
36109 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36110 err |= __get_user(name, &uf->name);
36111- f->name = compat_ptr(name);
36112+ f->name = (void __force_kernel *)compat_ptr(name);
36113 if (err) {
36114 err = -EFAULT;
36115 goto out;
36116diff --git a/block/genhd.c b/block/genhd.c
36117index 0a536dc..b8f7aca 100644
36118--- a/block/genhd.c
36119+++ b/block/genhd.c
36120@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36121
36122 /*
36123 * Register device numbers dev..(dev+range-1)
36124- * range must be nonzero
36125+ * Noop if @range is zero.
36126 * The hash chain is sorted on range, so that subranges can override.
36127 */
36128 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36129 struct kobject *(*probe)(dev_t, int *, void *),
36130 int (*lock)(dev_t, void *), void *data)
36131 {
36132- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36133+ if (range)
36134+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36135 }
36136
36137 EXPORT_SYMBOL(blk_register_region);
36138
36139+/* undo blk_register_region(), noop if @range is zero */
36140 void blk_unregister_region(dev_t devt, unsigned long range)
36141 {
36142- kobj_unmap(bdev_map, devt, range);
36143+ if (range)
36144+ kobj_unmap(bdev_map, devt, range);
36145 }
36146
36147 EXPORT_SYMBOL(blk_unregister_region);
36148diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36149index 56d08fd..2e07090 100644
36150--- a/block/partitions/efi.c
36151+++ b/block/partitions/efi.c
36152@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36153 if (!gpt)
36154 return NULL;
36155
36156+ if (!le32_to_cpu(gpt->num_partition_entries))
36157+ return NULL;
36158+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36159+ if (!pte)
36160+ return NULL;
36161+
36162 count = le32_to_cpu(gpt->num_partition_entries) *
36163 le32_to_cpu(gpt->sizeof_partition_entry);
36164- if (!count)
36165- return NULL;
36166- pte = kmalloc(count, GFP_KERNEL);
36167- if (!pte)
36168- return NULL;
36169-
36170 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36171 (u8 *) pte, count) < count) {
36172 kfree(pte);
36173diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36174index 28163fa..07190a06 100644
36175--- a/block/scsi_ioctl.c
36176+++ b/block/scsi_ioctl.c
36177@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36178 return put_user(0, p);
36179 }
36180
36181-static int sg_get_timeout(struct request_queue *q)
36182+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36183 {
36184 return jiffies_to_clock_t(q->sg_timeout);
36185 }
36186@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36187 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36188 struct sg_io_hdr *hdr, fmode_t mode)
36189 {
36190- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36191+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36192+ unsigned char *cmdptr;
36193+
36194+ if (rq->cmd != rq->__cmd)
36195+ cmdptr = rq->cmd;
36196+ else
36197+ cmdptr = tmpcmd;
36198+
36199+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36200 return -EFAULT;
36201+
36202+ if (cmdptr != rq->cmd)
36203+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36204+
36205 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36206 return -EPERM;
36207
36208@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36209 int err;
36210 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36211 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36212+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36213+ unsigned char *cmdptr;
36214
36215 if (!sic)
36216 return -EINVAL;
36217@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36218 */
36219 err = -EFAULT;
36220 rq->cmd_len = cmdlen;
36221- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36222+
36223+ if (rq->cmd != rq->__cmd)
36224+ cmdptr = rq->cmd;
36225+ else
36226+ cmdptr = tmpcmd;
36227+
36228+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36229 goto error;
36230
36231+ if (rq->cmd != cmdptr)
36232+ memcpy(rq->cmd, cmdptr, cmdlen);
36233+
36234 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36235 goto error;
36236
36237diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36238index 650afac1..f3307de 100644
36239--- a/crypto/cryptd.c
36240+++ b/crypto/cryptd.c
36241@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36242
36243 struct cryptd_blkcipher_request_ctx {
36244 crypto_completion_t complete;
36245-};
36246+} __no_const;
36247
36248 struct cryptd_hash_ctx {
36249 struct crypto_shash *child;
36250@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36251
36252 struct cryptd_aead_request_ctx {
36253 crypto_completion_t complete;
36254-};
36255+} __no_const;
36256
36257 static void cryptd_queue_worker(struct work_struct *work);
36258
36259diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36260index c305d41..a96de79 100644
36261--- a/crypto/pcrypt.c
36262+++ b/crypto/pcrypt.c
36263@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36264 int ret;
36265
36266 pinst->kobj.kset = pcrypt_kset;
36267- ret = kobject_add(&pinst->kobj, NULL, name);
36268+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36269 if (!ret)
36270 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36271
36272diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36273index 6921c7f..78e1af7 100644
36274--- a/drivers/acpi/acpica/hwxfsleep.c
36275+++ b/drivers/acpi/acpica/hwxfsleep.c
36276@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36277 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36278
36279 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36280- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36281- acpi_hw_extended_sleep},
36282- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36283- acpi_hw_extended_wake_prep},
36284- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36285+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36286+ .extended_function = acpi_hw_extended_sleep},
36287+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36288+ .extended_function = acpi_hw_extended_wake_prep},
36289+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36290+ .extended_function = acpi_hw_extended_wake}
36291 };
36292
36293 /*
36294diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36295index 16129c7..8b675cd 100644
36296--- a/drivers/acpi/apei/apei-internal.h
36297+++ b/drivers/acpi/apei/apei-internal.h
36298@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36299 struct apei_exec_ins_type {
36300 u32 flags;
36301 apei_exec_ins_func_t run;
36302-};
36303+} __do_const;
36304
36305 struct apei_exec_context {
36306 u32 ip;
36307diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36308index e82d097..0c855c1 100644
36309--- a/drivers/acpi/apei/ghes.c
36310+++ b/drivers/acpi/apei/ghes.c
36311@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36312 const struct acpi_hest_generic *generic,
36313 const struct acpi_hest_generic_status *estatus)
36314 {
36315- static atomic_t seqno;
36316+ static atomic_unchecked_t seqno;
36317 unsigned int curr_seqno;
36318 char pfx_seq[64];
36319
36320@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36321 else
36322 pfx = KERN_ERR;
36323 }
36324- curr_seqno = atomic_inc_return(&seqno);
36325+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36326 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36327 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36328 pfx_seq, generic->header.source_id);
36329diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36330index a83e3c6..c3d617f 100644
36331--- a/drivers/acpi/bgrt.c
36332+++ b/drivers/acpi/bgrt.c
36333@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36334 if (!bgrt_image)
36335 return -ENODEV;
36336
36337- bin_attr_image.private = bgrt_image;
36338- bin_attr_image.size = bgrt_image_size;
36339+ pax_open_kernel();
36340+ *(void **)&bin_attr_image.private = bgrt_image;
36341+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36342+ pax_close_kernel();
36343
36344 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36345 if (!bgrt_kobj)
36346diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36347index 9b693d5..8953d54 100644
36348--- a/drivers/acpi/blacklist.c
36349+++ b/drivers/acpi/blacklist.c
36350@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36351 u32 is_critical_error;
36352 };
36353
36354-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36355+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36356
36357 /*
36358 * POLICY: If *anything* doesn't work, put it on the blacklist.
36359@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36360 return 0;
36361 }
36362
36363-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36364+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36365 {
36366 .callback = dmi_disable_osi_vista,
36367 .ident = "Fujitsu Siemens",
36368diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36369index c68e724..e863008 100644
36370--- a/drivers/acpi/custom_method.c
36371+++ b/drivers/acpi/custom_method.c
36372@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36373 struct acpi_table_header table;
36374 acpi_status status;
36375
36376+#ifdef CONFIG_GRKERNSEC_KMEM
36377+ return -EPERM;
36378+#endif
36379+
36380 if (!(*ppos)) {
36381 /* parse the table header to get the table length */
36382 if (count <= sizeof(struct acpi_table_header))
36383diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36384index c0d44d3..5ad8f9a 100644
36385--- a/drivers/acpi/device_pm.c
36386+++ b/drivers/acpi/device_pm.c
36387@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36388
36389 #endif /* CONFIG_PM_SLEEP */
36390
36391+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36392+
36393 static struct dev_pm_domain acpi_general_pm_domain = {
36394 .ops = {
36395 #ifdef CONFIG_PM
36396@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36397 #endif
36398 #endif
36399 },
36400+ .detach = acpi_dev_pm_detach
36401 };
36402
36403 /**
36404@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36405 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36406 }
36407
36408- dev->pm_domain->detach = acpi_dev_pm_detach;
36409 return 0;
36410 }
36411 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36412diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36413index 87b704e..2d1d0c1 100644
36414--- a/drivers/acpi/processor_idle.c
36415+++ b/drivers/acpi/processor_idle.c
36416@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36417 {
36418 int i, count = CPUIDLE_DRIVER_STATE_START;
36419 struct acpi_processor_cx *cx;
36420- struct cpuidle_state *state;
36421+ cpuidle_state_no_const *state;
36422 struct cpuidle_driver *drv = &acpi_idle_driver;
36423
36424 if (!pr->flags.power_setup_done)
36425diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36426index 13e577c..cef11ee 100644
36427--- a/drivers/acpi/sysfs.c
36428+++ b/drivers/acpi/sysfs.c
36429@@ -423,11 +423,11 @@ static u32 num_counters;
36430 static struct attribute **all_attrs;
36431 static u32 acpi_gpe_count;
36432
36433-static struct attribute_group interrupt_stats_attr_group = {
36434+static attribute_group_no_const interrupt_stats_attr_group = {
36435 .name = "interrupts",
36436 };
36437
36438-static struct kobj_attribute *counter_attrs;
36439+static kobj_attribute_no_const *counter_attrs;
36440
36441 static void delete_gpe_attr_array(void)
36442 {
36443diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36444index 61a9c07..ea98fa1 100644
36445--- a/drivers/ata/libahci.c
36446+++ b/drivers/ata/libahci.c
36447@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36448 }
36449 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36450
36451-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36452+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36453 struct ata_taskfile *tf, int is_cmd, u16 flags,
36454 unsigned long timeout_msec)
36455 {
36456diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36457index d1a05f9..eb70e10 100644
36458--- a/drivers/ata/libata-core.c
36459+++ b/drivers/ata/libata-core.c
36460@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36461 static void ata_dev_xfermask(struct ata_device *dev);
36462 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36463
36464-atomic_t ata_print_id = ATOMIC_INIT(0);
36465+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36466
36467 struct ata_force_param {
36468 const char *name;
36469@@ -4831,7 +4831,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36470 struct ata_port *ap;
36471 unsigned int tag;
36472
36473- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36474+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36475 ap = qc->ap;
36476
36477 qc->flags = 0;
36478@@ -4847,7 +4847,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36479 struct ata_port *ap;
36480 struct ata_link *link;
36481
36482- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36483+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36484 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36485 ap = qc->ap;
36486 link = qc->dev->link;
36487@@ -5951,6 +5951,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36488 return;
36489
36490 spin_lock(&lock);
36491+ pax_open_kernel();
36492
36493 for (cur = ops->inherits; cur; cur = cur->inherits) {
36494 void **inherit = (void **)cur;
36495@@ -5964,8 +5965,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36496 if (IS_ERR(*pp))
36497 *pp = NULL;
36498
36499- ops->inherits = NULL;
36500+ *(struct ata_port_operations **)&ops->inherits = NULL;
36501
36502+ pax_close_kernel();
36503 spin_unlock(&lock);
36504 }
36505
36506@@ -6161,7 +6163,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36507
36508 /* give ports names and add SCSI hosts */
36509 for (i = 0; i < host->n_ports; i++) {
36510- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36511+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36512 host->ports[i]->local_port_no = i + 1;
36513 }
36514
36515diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36516index 6abd17a..9961bf7 100644
36517--- a/drivers/ata/libata-scsi.c
36518+++ b/drivers/ata/libata-scsi.c
36519@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
36520
36521 if (rc)
36522 return rc;
36523- ap->print_id = atomic_inc_return(&ata_print_id);
36524+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36525 return 0;
36526 }
36527 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36528diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36529index 5f4e0cc..ff2c347 100644
36530--- a/drivers/ata/libata.h
36531+++ b/drivers/ata/libata.h
36532@@ -53,7 +53,7 @@ enum {
36533 ATA_DNXFER_QUIET = (1 << 31),
36534 };
36535
36536-extern atomic_t ata_print_id;
36537+extern atomic_unchecked_t ata_print_id;
36538 extern int atapi_passthru16;
36539 extern int libata_fua;
36540 extern int libata_noacpi;
36541diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36542index a9b0c82..207d97d 100644
36543--- a/drivers/ata/pata_arasan_cf.c
36544+++ b/drivers/ata/pata_arasan_cf.c
36545@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36546 /* Handle platform specific quirks */
36547 if (quirk) {
36548 if (quirk & CF_BROKEN_PIO) {
36549- ap->ops->set_piomode = NULL;
36550+ pax_open_kernel();
36551+ *(void **)&ap->ops->set_piomode = NULL;
36552+ pax_close_kernel();
36553 ap->pio_mask = 0;
36554 }
36555 if (quirk & CF_BROKEN_MWDMA)
36556diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36557index f9b983a..887b9d8 100644
36558--- a/drivers/atm/adummy.c
36559+++ b/drivers/atm/adummy.c
36560@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36561 vcc->pop(vcc, skb);
36562 else
36563 dev_kfree_skb_any(skb);
36564- atomic_inc(&vcc->stats->tx);
36565+ atomic_inc_unchecked(&vcc->stats->tx);
36566
36567 return 0;
36568 }
36569diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36570index f1a9198..f466a4a 100644
36571--- a/drivers/atm/ambassador.c
36572+++ b/drivers/atm/ambassador.c
36573@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36574 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36575
36576 // VC layer stats
36577- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36578+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36579
36580 // free the descriptor
36581 kfree (tx_descr);
36582@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36583 dump_skb ("<<<", vc, skb);
36584
36585 // VC layer stats
36586- atomic_inc(&atm_vcc->stats->rx);
36587+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36588 __net_timestamp(skb);
36589 // end of our responsibility
36590 atm_vcc->push (atm_vcc, skb);
36591@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36592 } else {
36593 PRINTK (KERN_INFO, "dropped over-size frame");
36594 // should we count this?
36595- atomic_inc(&atm_vcc->stats->rx_drop);
36596+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36597 }
36598
36599 } else {
36600@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36601 }
36602
36603 if (check_area (skb->data, skb->len)) {
36604- atomic_inc(&atm_vcc->stats->tx_err);
36605+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36606 return -ENOMEM; // ?
36607 }
36608
36609diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36610index 480fa6f..947067c 100644
36611--- a/drivers/atm/atmtcp.c
36612+++ b/drivers/atm/atmtcp.c
36613@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36614 if (vcc->pop) vcc->pop(vcc,skb);
36615 else dev_kfree_skb(skb);
36616 if (dev_data) return 0;
36617- atomic_inc(&vcc->stats->tx_err);
36618+ atomic_inc_unchecked(&vcc->stats->tx_err);
36619 return -ENOLINK;
36620 }
36621 size = skb->len+sizeof(struct atmtcp_hdr);
36622@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36623 if (!new_skb) {
36624 if (vcc->pop) vcc->pop(vcc,skb);
36625 else dev_kfree_skb(skb);
36626- atomic_inc(&vcc->stats->tx_err);
36627+ atomic_inc_unchecked(&vcc->stats->tx_err);
36628 return -ENOBUFS;
36629 }
36630 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36631@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36632 if (vcc->pop) vcc->pop(vcc,skb);
36633 else dev_kfree_skb(skb);
36634 out_vcc->push(out_vcc,new_skb);
36635- atomic_inc(&vcc->stats->tx);
36636- atomic_inc(&out_vcc->stats->rx);
36637+ atomic_inc_unchecked(&vcc->stats->tx);
36638+ atomic_inc_unchecked(&out_vcc->stats->rx);
36639 return 0;
36640 }
36641
36642@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36643 read_unlock(&vcc_sklist_lock);
36644 if (!out_vcc) {
36645 result = -EUNATCH;
36646- atomic_inc(&vcc->stats->tx_err);
36647+ atomic_inc_unchecked(&vcc->stats->tx_err);
36648 goto done;
36649 }
36650 skb_pull(skb,sizeof(struct atmtcp_hdr));
36651@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36652 __net_timestamp(new_skb);
36653 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36654 out_vcc->push(out_vcc,new_skb);
36655- atomic_inc(&vcc->stats->tx);
36656- atomic_inc(&out_vcc->stats->rx);
36657+ atomic_inc_unchecked(&vcc->stats->tx);
36658+ atomic_inc_unchecked(&out_vcc->stats->rx);
36659 done:
36660 if (vcc->pop) vcc->pop(vcc,skb);
36661 else dev_kfree_skb(skb);
36662diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36663index c7fab3e..68d0965 100644
36664--- a/drivers/atm/eni.c
36665+++ b/drivers/atm/eni.c
36666@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36667 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36668 vcc->dev->number);
36669 length = 0;
36670- atomic_inc(&vcc->stats->rx_err);
36671+ atomic_inc_unchecked(&vcc->stats->rx_err);
36672 }
36673 else {
36674 length = ATM_CELL_SIZE-1; /* no HEC */
36675@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36676 size);
36677 }
36678 eff = length = 0;
36679- atomic_inc(&vcc->stats->rx_err);
36680+ atomic_inc_unchecked(&vcc->stats->rx_err);
36681 }
36682 else {
36683 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36684@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36685 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36686 vcc->dev->number,vcc->vci,length,size << 2,descr);
36687 length = eff = 0;
36688- atomic_inc(&vcc->stats->rx_err);
36689+ atomic_inc_unchecked(&vcc->stats->rx_err);
36690 }
36691 }
36692 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36693@@ -770,7 +770,7 @@ rx_dequeued++;
36694 vcc->push(vcc,skb);
36695 pushed++;
36696 }
36697- atomic_inc(&vcc->stats->rx);
36698+ atomic_inc_unchecked(&vcc->stats->rx);
36699 }
36700 wake_up(&eni_dev->rx_wait);
36701 }
36702@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36703 PCI_DMA_TODEVICE);
36704 if (vcc->pop) vcc->pop(vcc,skb);
36705 else dev_kfree_skb_irq(skb);
36706- atomic_inc(&vcc->stats->tx);
36707+ atomic_inc_unchecked(&vcc->stats->tx);
36708 wake_up(&eni_dev->tx_wait);
36709 dma_complete++;
36710 }
36711diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36712index 82f2ae0..f205c02 100644
36713--- a/drivers/atm/firestream.c
36714+++ b/drivers/atm/firestream.c
36715@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36716 }
36717 }
36718
36719- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36720+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36721
36722 fs_dprintk (FS_DEBUG_TXMEM, "i");
36723 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36724@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36725 #endif
36726 skb_put (skb, qe->p1 & 0xffff);
36727 ATM_SKB(skb)->vcc = atm_vcc;
36728- atomic_inc(&atm_vcc->stats->rx);
36729+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36730 __net_timestamp(skb);
36731 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36732 atm_vcc->push (atm_vcc, skb);
36733@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36734 kfree (pe);
36735 }
36736 if (atm_vcc)
36737- atomic_inc(&atm_vcc->stats->rx_drop);
36738+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36739 break;
36740 case 0x1f: /* Reassembly abort: no buffers. */
36741 /* Silently increment error counter. */
36742 if (atm_vcc)
36743- atomic_inc(&atm_vcc->stats->rx_drop);
36744+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36745 break;
36746 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36747 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36748diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36749index d5d9eaf..65c0d53 100644
36750--- a/drivers/atm/fore200e.c
36751+++ b/drivers/atm/fore200e.c
36752@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36753 #endif
36754 /* check error condition */
36755 if (*entry->status & STATUS_ERROR)
36756- atomic_inc(&vcc->stats->tx_err);
36757+ atomic_inc_unchecked(&vcc->stats->tx_err);
36758 else
36759- atomic_inc(&vcc->stats->tx);
36760+ atomic_inc_unchecked(&vcc->stats->tx);
36761 }
36762 }
36763
36764@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36765 if (skb == NULL) {
36766 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36767
36768- atomic_inc(&vcc->stats->rx_drop);
36769+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36770 return -ENOMEM;
36771 }
36772
36773@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36774
36775 dev_kfree_skb_any(skb);
36776
36777- atomic_inc(&vcc->stats->rx_drop);
36778+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36779 return -ENOMEM;
36780 }
36781
36782 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36783
36784 vcc->push(vcc, skb);
36785- atomic_inc(&vcc->stats->rx);
36786+ atomic_inc_unchecked(&vcc->stats->rx);
36787
36788 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36789
36790@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36791 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36792 fore200e->atm_dev->number,
36793 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36794- atomic_inc(&vcc->stats->rx_err);
36795+ atomic_inc_unchecked(&vcc->stats->rx_err);
36796 }
36797 }
36798
36799@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36800 goto retry_here;
36801 }
36802
36803- atomic_inc(&vcc->stats->tx_err);
36804+ atomic_inc_unchecked(&vcc->stats->tx_err);
36805
36806 fore200e->tx_sat++;
36807 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36808diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36809index c39702b..785b73b 100644
36810--- a/drivers/atm/he.c
36811+++ b/drivers/atm/he.c
36812@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36813
36814 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36815 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36816- atomic_inc(&vcc->stats->rx_drop);
36817+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36818 goto return_host_buffers;
36819 }
36820
36821@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36822 RBRQ_LEN_ERR(he_dev->rbrq_head)
36823 ? "LEN_ERR" : "",
36824 vcc->vpi, vcc->vci);
36825- atomic_inc(&vcc->stats->rx_err);
36826+ atomic_inc_unchecked(&vcc->stats->rx_err);
36827 goto return_host_buffers;
36828 }
36829
36830@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36831 vcc->push(vcc, skb);
36832 spin_lock(&he_dev->global_lock);
36833
36834- atomic_inc(&vcc->stats->rx);
36835+ atomic_inc_unchecked(&vcc->stats->rx);
36836
36837 return_host_buffers:
36838 ++pdus_assembled;
36839@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36840 tpd->vcc->pop(tpd->vcc, tpd->skb);
36841 else
36842 dev_kfree_skb_any(tpd->skb);
36843- atomic_inc(&tpd->vcc->stats->tx_err);
36844+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36845 }
36846 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36847 return;
36848@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36849 vcc->pop(vcc, skb);
36850 else
36851 dev_kfree_skb_any(skb);
36852- atomic_inc(&vcc->stats->tx_err);
36853+ atomic_inc_unchecked(&vcc->stats->tx_err);
36854 return -EINVAL;
36855 }
36856
36857@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36858 vcc->pop(vcc, skb);
36859 else
36860 dev_kfree_skb_any(skb);
36861- atomic_inc(&vcc->stats->tx_err);
36862+ atomic_inc_unchecked(&vcc->stats->tx_err);
36863 return -EINVAL;
36864 }
36865 #endif
36866@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36867 vcc->pop(vcc, skb);
36868 else
36869 dev_kfree_skb_any(skb);
36870- atomic_inc(&vcc->stats->tx_err);
36871+ atomic_inc_unchecked(&vcc->stats->tx_err);
36872 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36873 return -ENOMEM;
36874 }
36875@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36876 vcc->pop(vcc, skb);
36877 else
36878 dev_kfree_skb_any(skb);
36879- atomic_inc(&vcc->stats->tx_err);
36880+ atomic_inc_unchecked(&vcc->stats->tx_err);
36881 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36882 return -ENOMEM;
36883 }
36884@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36885 __enqueue_tpd(he_dev, tpd, cid);
36886 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36887
36888- atomic_inc(&vcc->stats->tx);
36889+ atomic_inc_unchecked(&vcc->stats->tx);
36890
36891 return 0;
36892 }
36893diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36894index 1dc0519..1aadaf7 100644
36895--- a/drivers/atm/horizon.c
36896+++ b/drivers/atm/horizon.c
36897@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36898 {
36899 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36900 // VC layer stats
36901- atomic_inc(&vcc->stats->rx);
36902+ atomic_inc_unchecked(&vcc->stats->rx);
36903 __net_timestamp(skb);
36904 // end of our responsibility
36905 vcc->push (vcc, skb);
36906@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36907 dev->tx_iovec = NULL;
36908
36909 // VC layer stats
36910- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36911+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36912
36913 // free the skb
36914 hrz_kfree_skb (skb);
36915diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36916index 2b24ed0..b3d6acc 100644
36917--- a/drivers/atm/idt77252.c
36918+++ b/drivers/atm/idt77252.c
36919@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36920 else
36921 dev_kfree_skb(skb);
36922
36923- atomic_inc(&vcc->stats->tx);
36924+ atomic_inc_unchecked(&vcc->stats->tx);
36925 }
36926
36927 atomic_dec(&scq->used);
36928@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36929 if ((sb = dev_alloc_skb(64)) == NULL) {
36930 printk("%s: Can't allocate buffers for aal0.\n",
36931 card->name);
36932- atomic_add(i, &vcc->stats->rx_drop);
36933+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36934 break;
36935 }
36936 if (!atm_charge(vcc, sb->truesize)) {
36937 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36938 card->name);
36939- atomic_add(i - 1, &vcc->stats->rx_drop);
36940+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36941 dev_kfree_skb(sb);
36942 break;
36943 }
36944@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36945 ATM_SKB(sb)->vcc = vcc;
36946 __net_timestamp(sb);
36947 vcc->push(vcc, sb);
36948- atomic_inc(&vcc->stats->rx);
36949+ atomic_inc_unchecked(&vcc->stats->rx);
36950
36951 cell += ATM_CELL_PAYLOAD;
36952 }
36953@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36954 "(CDC: %08x)\n",
36955 card->name, len, rpp->len, readl(SAR_REG_CDC));
36956 recycle_rx_pool_skb(card, rpp);
36957- atomic_inc(&vcc->stats->rx_err);
36958+ atomic_inc_unchecked(&vcc->stats->rx_err);
36959 return;
36960 }
36961 if (stat & SAR_RSQE_CRC) {
36962 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36963 recycle_rx_pool_skb(card, rpp);
36964- atomic_inc(&vcc->stats->rx_err);
36965+ atomic_inc_unchecked(&vcc->stats->rx_err);
36966 return;
36967 }
36968 if (skb_queue_len(&rpp->queue) > 1) {
36969@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36970 RXPRINTK("%s: Can't alloc RX skb.\n",
36971 card->name);
36972 recycle_rx_pool_skb(card, rpp);
36973- atomic_inc(&vcc->stats->rx_err);
36974+ atomic_inc_unchecked(&vcc->stats->rx_err);
36975 return;
36976 }
36977 if (!atm_charge(vcc, skb->truesize)) {
36978@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36979 __net_timestamp(skb);
36980
36981 vcc->push(vcc, skb);
36982- atomic_inc(&vcc->stats->rx);
36983+ atomic_inc_unchecked(&vcc->stats->rx);
36984
36985 return;
36986 }
36987@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36988 __net_timestamp(skb);
36989
36990 vcc->push(vcc, skb);
36991- atomic_inc(&vcc->stats->rx);
36992+ atomic_inc_unchecked(&vcc->stats->rx);
36993
36994 if (skb->truesize > SAR_FB_SIZE_3)
36995 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
36996@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
36997 if (vcc->qos.aal != ATM_AAL0) {
36998 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
36999 card->name, vpi, vci);
37000- atomic_inc(&vcc->stats->rx_drop);
37001+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37002 goto drop;
37003 }
37004
37005 if ((sb = dev_alloc_skb(64)) == NULL) {
37006 printk("%s: Can't allocate buffers for AAL0.\n",
37007 card->name);
37008- atomic_inc(&vcc->stats->rx_err);
37009+ atomic_inc_unchecked(&vcc->stats->rx_err);
37010 goto drop;
37011 }
37012
37013@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37014 ATM_SKB(sb)->vcc = vcc;
37015 __net_timestamp(sb);
37016 vcc->push(vcc, sb);
37017- atomic_inc(&vcc->stats->rx);
37018+ atomic_inc_unchecked(&vcc->stats->rx);
37019
37020 drop:
37021 skb_pull(queue, 64);
37022@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37023
37024 if (vc == NULL) {
37025 printk("%s: NULL connection in send().\n", card->name);
37026- atomic_inc(&vcc->stats->tx_err);
37027+ atomic_inc_unchecked(&vcc->stats->tx_err);
37028 dev_kfree_skb(skb);
37029 return -EINVAL;
37030 }
37031 if (!test_bit(VCF_TX, &vc->flags)) {
37032 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37033- atomic_inc(&vcc->stats->tx_err);
37034+ atomic_inc_unchecked(&vcc->stats->tx_err);
37035 dev_kfree_skb(skb);
37036 return -EINVAL;
37037 }
37038@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37039 break;
37040 default:
37041 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37042- atomic_inc(&vcc->stats->tx_err);
37043+ atomic_inc_unchecked(&vcc->stats->tx_err);
37044 dev_kfree_skb(skb);
37045 return -EINVAL;
37046 }
37047
37048 if (skb_shinfo(skb)->nr_frags != 0) {
37049 printk("%s: No scatter-gather yet.\n", card->name);
37050- atomic_inc(&vcc->stats->tx_err);
37051+ atomic_inc_unchecked(&vcc->stats->tx_err);
37052 dev_kfree_skb(skb);
37053 return -EINVAL;
37054 }
37055@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37056
37057 err = queue_skb(card, vc, skb, oam);
37058 if (err) {
37059- atomic_inc(&vcc->stats->tx_err);
37060+ atomic_inc_unchecked(&vcc->stats->tx_err);
37061 dev_kfree_skb(skb);
37062 return err;
37063 }
37064@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37065 skb = dev_alloc_skb(64);
37066 if (!skb) {
37067 printk("%s: Out of memory in send_oam().\n", card->name);
37068- atomic_inc(&vcc->stats->tx_err);
37069+ atomic_inc_unchecked(&vcc->stats->tx_err);
37070 return -ENOMEM;
37071 }
37072 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37073diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37074index 4217f29..88f547a 100644
37075--- a/drivers/atm/iphase.c
37076+++ b/drivers/atm/iphase.c
37077@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37078 status = (u_short) (buf_desc_ptr->desc_mode);
37079 if (status & (RX_CER | RX_PTE | RX_OFL))
37080 {
37081- atomic_inc(&vcc->stats->rx_err);
37082+ atomic_inc_unchecked(&vcc->stats->rx_err);
37083 IF_ERR(printk("IA: bad packet, dropping it");)
37084 if (status & RX_CER) {
37085 IF_ERR(printk(" cause: packet CRC error\n");)
37086@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37087 len = dma_addr - buf_addr;
37088 if (len > iadev->rx_buf_sz) {
37089 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37090- atomic_inc(&vcc->stats->rx_err);
37091+ atomic_inc_unchecked(&vcc->stats->rx_err);
37092 goto out_free_desc;
37093 }
37094
37095@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37096 ia_vcc = INPH_IA_VCC(vcc);
37097 if (ia_vcc == NULL)
37098 {
37099- atomic_inc(&vcc->stats->rx_err);
37100+ atomic_inc_unchecked(&vcc->stats->rx_err);
37101 atm_return(vcc, skb->truesize);
37102 dev_kfree_skb_any(skb);
37103 goto INCR_DLE;
37104@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37105 if ((length > iadev->rx_buf_sz) || (length >
37106 (skb->len - sizeof(struct cpcs_trailer))))
37107 {
37108- atomic_inc(&vcc->stats->rx_err);
37109+ atomic_inc_unchecked(&vcc->stats->rx_err);
37110 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37111 length, skb->len);)
37112 atm_return(vcc, skb->truesize);
37113@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37114
37115 IF_RX(printk("rx_dle_intr: skb push");)
37116 vcc->push(vcc,skb);
37117- atomic_inc(&vcc->stats->rx);
37118+ atomic_inc_unchecked(&vcc->stats->rx);
37119 iadev->rx_pkt_cnt++;
37120 }
37121 INCR_DLE:
37122@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37123 {
37124 struct k_sonet_stats *stats;
37125 stats = &PRIV(_ia_dev[board])->sonet_stats;
37126- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37127- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37128- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37129- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37130- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37131- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37132- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37133- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37134- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37135+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37136+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37137+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37138+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37139+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37140+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37141+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37142+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37143+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37144 }
37145 ia_cmds.status = 0;
37146 break;
37147@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37148 if ((desc == 0) || (desc > iadev->num_tx_desc))
37149 {
37150 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37151- atomic_inc(&vcc->stats->tx);
37152+ atomic_inc_unchecked(&vcc->stats->tx);
37153 if (vcc->pop)
37154 vcc->pop(vcc, skb);
37155 else
37156@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37157 ATM_DESC(skb) = vcc->vci;
37158 skb_queue_tail(&iadev->tx_dma_q, skb);
37159
37160- atomic_inc(&vcc->stats->tx);
37161+ atomic_inc_unchecked(&vcc->stats->tx);
37162 iadev->tx_pkt_cnt++;
37163 /* Increment transaction counter */
37164 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37165
37166 #if 0
37167 /* add flow control logic */
37168- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37169+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37170 if (iavcc->vc_desc_cnt > 10) {
37171 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37172 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37173diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37174index 93eaf8d..b4ca7da 100644
37175--- a/drivers/atm/lanai.c
37176+++ b/drivers/atm/lanai.c
37177@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37178 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37179 lanai_endtx(lanai, lvcc);
37180 lanai_free_skb(lvcc->tx.atmvcc, skb);
37181- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37182+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37183 }
37184
37185 /* Try to fill the buffer - don't call unless there is backlog */
37186@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37187 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37188 __net_timestamp(skb);
37189 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37190- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37191+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37192 out:
37193 lvcc->rx.buf.ptr = end;
37194 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37195@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37196 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37197 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37198 lanai->stats.service_rxnotaal5++;
37199- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37200+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37201 return 0;
37202 }
37203 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37204@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37205 int bytes;
37206 read_unlock(&vcc_sklist_lock);
37207 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37208- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37209+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37210 lvcc->stats.x.aal5.service_trash++;
37211 bytes = (SERVICE_GET_END(s) * 16) -
37212 (((unsigned long) lvcc->rx.buf.ptr) -
37213@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37214 }
37215 if (s & SERVICE_STREAM) {
37216 read_unlock(&vcc_sklist_lock);
37217- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37218+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37219 lvcc->stats.x.aal5.service_stream++;
37220 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37221 "PDU on VCI %d!\n", lanai->number, vci);
37222@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37223 return 0;
37224 }
37225 DPRINTK("got rx crc error on vci %d\n", vci);
37226- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37227+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37228 lvcc->stats.x.aal5.service_rxcrc++;
37229 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37230 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37231diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37232index 9988ac9..7c52585 100644
37233--- a/drivers/atm/nicstar.c
37234+++ b/drivers/atm/nicstar.c
37235@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37236 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37237 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37238 card->index);
37239- atomic_inc(&vcc->stats->tx_err);
37240+ atomic_inc_unchecked(&vcc->stats->tx_err);
37241 dev_kfree_skb_any(skb);
37242 return -EINVAL;
37243 }
37244@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37245 if (!vc->tx) {
37246 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37247 card->index);
37248- atomic_inc(&vcc->stats->tx_err);
37249+ atomic_inc_unchecked(&vcc->stats->tx_err);
37250 dev_kfree_skb_any(skb);
37251 return -EINVAL;
37252 }
37253@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37254 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37255 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37256 card->index);
37257- atomic_inc(&vcc->stats->tx_err);
37258+ atomic_inc_unchecked(&vcc->stats->tx_err);
37259 dev_kfree_skb_any(skb);
37260 return -EINVAL;
37261 }
37262
37263 if (skb_shinfo(skb)->nr_frags != 0) {
37264 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37265- atomic_inc(&vcc->stats->tx_err);
37266+ atomic_inc_unchecked(&vcc->stats->tx_err);
37267 dev_kfree_skb_any(skb);
37268 return -EINVAL;
37269 }
37270@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37271 }
37272
37273 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37274- atomic_inc(&vcc->stats->tx_err);
37275+ atomic_inc_unchecked(&vcc->stats->tx_err);
37276 dev_kfree_skb_any(skb);
37277 return -EIO;
37278 }
37279- atomic_inc(&vcc->stats->tx);
37280+ atomic_inc_unchecked(&vcc->stats->tx);
37281
37282 return 0;
37283 }
37284@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37285 printk
37286 ("nicstar%d: Can't allocate buffers for aal0.\n",
37287 card->index);
37288- atomic_add(i, &vcc->stats->rx_drop);
37289+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37290 break;
37291 }
37292 if (!atm_charge(vcc, sb->truesize)) {
37293 RXPRINTK
37294 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37295 card->index);
37296- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37297+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37298 dev_kfree_skb_any(sb);
37299 break;
37300 }
37301@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37302 ATM_SKB(sb)->vcc = vcc;
37303 __net_timestamp(sb);
37304 vcc->push(vcc, sb);
37305- atomic_inc(&vcc->stats->rx);
37306+ atomic_inc_unchecked(&vcc->stats->rx);
37307 cell += ATM_CELL_PAYLOAD;
37308 }
37309
37310@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37311 if (iovb == NULL) {
37312 printk("nicstar%d: Out of iovec buffers.\n",
37313 card->index);
37314- atomic_inc(&vcc->stats->rx_drop);
37315+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37316 recycle_rx_buf(card, skb);
37317 return;
37318 }
37319@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37320 small or large buffer itself. */
37321 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37322 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37323- atomic_inc(&vcc->stats->rx_err);
37324+ atomic_inc_unchecked(&vcc->stats->rx_err);
37325 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37326 NS_MAX_IOVECS);
37327 NS_PRV_IOVCNT(iovb) = 0;
37328@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37329 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37330 card->index);
37331 which_list(card, skb);
37332- atomic_inc(&vcc->stats->rx_err);
37333+ atomic_inc_unchecked(&vcc->stats->rx_err);
37334 recycle_rx_buf(card, skb);
37335 vc->rx_iov = NULL;
37336 recycle_iov_buf(card, iovb);
37337@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37338 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37339 card->index);
37340 which_list(card, skb);
37341- atomic_inc(&vcc->stats->rx_err);
37342+ atomic_inc_unchecked(&vcc->stats->rx_err);
37343 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37344 NS_PRV_IOVCNT(iovb));
37345 vc->rx_iov = NULL;
37346@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37347 printk(" - PDU size mismatch.\n");
37348 else
37349 printk(".\n");
37350- atomic_inc(&vcc->stats->rx_err);
37351+ atomic_inc_unchecked(&vcc->stats->rx_err);
37352 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37353 NS_PRV_IOVCNT(iovb));
37354 vc->rx_iov = NULL;
37355@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37356 /* skb points to a small buffer */
37357 if (!atm_charge(vcc, skb->truesize)) {
37358 push_rxbufs(card, skb);
37359- atomic_inc(&vcc->stats->rx_drop);
37360+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37361 } else {
37362 skb_put(skb, len);
37363 dequeue_sm_buf(card, skb);
37364@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37365 ATM_SKB(skb)->vcc = vcc;
37366 __net_timestamp(skb);
37367 vcc->push(vcc, skb);
37368- atomic_inc(&vcc->stats->rx);
37369+ atomic_inc_unchecked(&vcc->stats->rx);
37370 }
37371 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37372 struct sk_buff *sb;
37373@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37374 if (len <= NS_SMBUFSIZE) {
37375 if (!atm_charge(vcc, sb->truesize)) {
37376 push_rxbufs(card, sb);
37377- atomic_inc(&vcc->stats->rx_drop);
37378+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37379 } else {
37380 skb_put(sb, len);
37381 dequeue_sm_buf(card, sb);
37382@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37383 ATM_SKB(sb)->vcc = vcc;
37384 __net_timestamp(sb);
37385 vcc->push(vcc, sb);
37386- atomic_inc(&vcc->stats->rx);
37387+ atomic_inc_unchecked(&vcc->stats->rx);
37388 }
37389
37390 push_rxbufs(card, skb);
37391@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37392
37393 if (!atm_charge(vcc, skb->truesize)) {
37394 push_rxbufs(card, skb);
37395- atomic_inc(&vcc->stats->rx_drop);
37396+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37397 } else {
37398 dequeue_lg_buf(card, skb);
37399 #ifdef NS_USE_DESTRUCTORS
37400@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37401 ATM_SKB(skb)->vcc = vcc;
37402 __net_timestamp(skb);
37403 vcc->push(vcc, skb);
37404- atomic_inc(&vcc->stats->rx);
37405+ atomic_inc_unchecked(&vcc->stats->rx);
37406 }
37407
37408 push_rxbufs(card, sb);
37409@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37410 printk
37411 ("nicstar%d: Out of huge buffers.\n",
37412 card->index);
37413- atomic_inc(&vcc->stats->rx_drop);
37414+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37415 recycle_iovec_rx_bufs(card,
37416 (struct iovec *)
37417 iovb->data,
37418@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37419 card->hbpool.count++;
37420 } else
37421 dev_kfree_skb_any(hb);
37422- atomic_inc(&vcc->stats->rx_drop);
37423+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37424 } else {
37425 /* Copy the small buffer to the huge buffer */
37426 sb = (struct sk_buff *)iov->iov_base;
37427@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37428 #endif /* NS_USE_DESTRUCTORS */
37429 __net_timestamp(hb);
37430 vcc->push(vcc, hb);
37431- atomic_inc(&vcc->stats->rx);
37432+ atomic_inc_unchecked(&vcc->stats->rx);
37433 }
37434 }
37435
37436diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37437index 21b0bc6..b5f40ba 100644
37438--- a/drivers/atm/solos-pci.c
37439+++ b/drivers/atm/solos-pci.c
37440@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37441 }
37442 atm_charge(vcc, skb->truesize);
37443 vcc->push(vcc, skb);
37444- atomic_inc(&vcc->stats->rx);
37445+ atomic_inc_unchecked(&vcc->stats->rx);
37446 break;
37447
37448 case PKT_STATUS:
37449@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37450 vcc = SKB_CB(oldskb)->vcc;
37451
37452 if (vcc) {
37453- atomic_inc(&vcc->stats->tx);
37454+ atomic_inc_unchecked(&vcc->stats->tx);
37455 solos_pop(vcc, oldskb);
37456 } else {
37457 dev_kfree_skb_irq(oldskb);
37458diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37459index 0215934..ce9f5b1 100644
37460--- a/drivers/atm/suni.c
37461+++ b/drivers/atm/suni.c
37462@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37463
37464
37465 #define ADD_LIMITED(s,v) \
37466- atomic_add((v),&stats->s); \
37467- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37468+ atomic_add_unchecked((v),&stats->s); \
37469+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37470
37471
37472 static void suni_hz(unsigned long from_timer)
37473diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37474index 5120a96..e2572bd 100644
37475--- a/drivers/atm/uPD98402.c
37476+++ b/drivers/atm/uPD98402.c
37477@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37478 struct sonet_stats tmp;
37479 int error = 0;
37480
37481- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37482+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37483 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37484 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37485 if (zero && !error) {
37486@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37487
37488
37489 #define ADD_LIMITED(s,v) \
37490- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37491- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37492- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37493+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37494+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37495+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37496
37497
37498 static void stat_event(struct atm_dev *dev)
37499@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37500 if (reason & uPD98402_INT_PFM) stat_event(dev);
37501 if (reason & uPD98402_INT_PCO) {
37502 (void) GET(PCOCR); /* clear interrupt cause */
37503- atomic_add(GET(HECCT),
37504+ atomic_add_unchecked(GET(HECCT),
37505 &PRIV(dev)->sonet_stats.uncorr_hcs);
37506 }
37507 if ((reason & uPD98402_INT_RFO) &&
37508@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37509 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37510 uPD98402_INT_LOS),PIMR); /* enable them */
37511 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37512- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37513- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37514- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37515+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37516+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37517+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37518 return 0;
37519 }
37520
37521diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37522index 969c3c2..9b72956 100644
37523--- a/drivers/atm/zatm.c
37524+++ b/drivers/atm/zatm.c
37525@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37526 }
37527 if (!size) {
37528 dev_kfree_skb_irq(skb);
37529- if (vcc) atomic_inc(&vcc->stats->rx_err);
37530+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37531 continue;
37532 }
37533 if (!atm_charge(vcc,skb->truesize)) {
37534@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37535 skb->len = size;
37536 ATM_SKB(skb)->vcc = vcc;
37537 vcc->push(vcc,skb);
37538- atomic_inc(&vcc->stats->rx);
37539+ atomic_inc_unchecked(&vcc->stats->rx);
37540 }
37541 zout(pos & 0xffff,MTA(mbx));
37542 #if 0 /* probably a stupid idea */
37543@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37544 skb_queue_head(&zatm_vcc->backlog,skb);
37545 break;
37546 }
37547- atomic_inc(&vcc->stats->tx);
37548+ atomic_inc_unchecked(&vcc->stats->tx);
37549 wake_up(&zatm_vcc->tx_wait);
37550 }
37551
37552diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37553index 876bae5..8978785 100644
37554--- a/drivers/base/bus.c
37555+++ b/drivers/base/bus.c
37556@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37557 return -EINVAL;
37558
37559 mutex_lock(&subsys->p->mutex);
37560- list_add_tail(&sif->node, &subsys->p->interfaces);
37561+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37562 if (sif->add_dev) {
37563 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37564 while ((dev = subsys_dev_iter_next(&iter)))
37565@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37566 subsys = sif->subsys;
37567
37568 mutex_lock(&subsys->p->mutex);
37569- list_del_init(&sif->node);
37570+ pax_list_del_init((struct list_head *)&sif->node);
37571 if (sif->remove_dev) {
37572 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37573 while ((dev = subsys_dev_iter_next(&iter)))
37574diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37575index 25798db..15f130e 100644
37576--- a/drivers/base/devtmpfs.c
37577+++ b/drivers/base/devtmpfs.c
37578@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37579 if (!thread)
37580 return 0;
37581
37582- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37583+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37584 if (err)
37585 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37586 else
37587@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37588 *err = sys_unshare(CLONE_NEWNS);
37589 if (*err)
37590 goto out;
37591- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37592+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37593 if (*err)
37594 goto out;
37595- sys_chdir("/.."); /* will traverse into overmounted root */
37596- sys_chroot(".");
37597+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37598+ sys_chroot((char __force_user *)".");
37599 complete(&setup_done);
37600 while (1) {
37601 spin_lock(&req_lock);
37602diff --git a/drivers/base/node.c b/drivers/base/node.c
37603index a3b82e9..f90a8ce 100644
37604--- a/drivers/base/node.c
37605+++ b/drivers/base/node.c
37606@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37607 struct node_attr {
37608 struct device_attribute attr;
37609 enum node_states state;
37610-};
37611+} __do_const;
37612
37613 static ssize_t show_node_state(struct device *dev,
37614 struct device_attribute *attr, char *buf)
37615diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37616index 0d8780c..0b5df3f 100644
37617--- a/drivers/base/power/domain.c
37618+++ b/drivers/base/power/domain.c
37619@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37620 {
37621 struct cpuidle_driver *cpuidle_drv;
37622 struct gpd_cpuidle_data *cpuidle_data;
37623- struct cpuidle_state *idle_state;
37624+ cpuidle_state_no_const *idle_state;
37625 int ret = 0;
37626
37627 if (IS_ERR_OR_NULL(genpd) || state < 0)
37628@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37629 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37630 {
37631 struct gpd_cpuidle_data *cpuidle_data;
37632- struct cpuidle_state *idle_state;
37633+ cpuidle_state_no_const *idle_state;
37634 int ret = 0;
37635
37636 if (IS_ERR_OR_NULL(genpd))
37637@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
37638 return ret;
37639 }
37640
37641- dev->pm_domain->detach = genpd_dev_pm_detach;
37642+ pax_open_kernel();
37643+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37644+ pax_close_kernel();
37645+
37646 pm_genpd_poweron(pd);
37647
37648 return 0;
37649diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37650index d2be3f9..0a3167a 100644
37651--- a/drivers/base/power/sysfs.c
37652+++ b/drivers/base/power/sysfs.c
37653@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37654 return -EIO;
37655 }
37656 }
37657- return sprintf(buf, p);
37658+ return sprintf(buf, "%s", p);
37659 }
37660
37661 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37662diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37663index c2744b3..08fac19 100644
37664--- a/drivers/base/power/wakeup.c
37665+++ b/drivers/base/power/wakeup.c
37666@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37667 * They need to be modified together atomically, so it's better to use one
37668 * atomic variable to hold them both.
37669 */
37670-static atomic_t combined_event_count = ATOMIC_INIT(0);
37671+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37672
37673 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37674 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37675
37676 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37677 {
37678- unsigned int comb = atomic_read(&combined_event_count);
37679+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37680
37681 *cnt = (comb >> IN_PROGRESS_BITS);
37682 *inpr = comb & MAX_IN_PROGRESS;
37683@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37684 ws->start_prevent_time = ws->last_time;
37685
37686 /* Increment the counter of events in progress. */
37687- cec = atomic_inc_return(&combined_event_count);
37688+ cec = atomic_inc_return_unchecked(&combined_event_count);
37689
37690 trace_wakeup_source_activate(ws->name, cec);
37691 }
37692@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37693 * Increment the counter of registered wakeup events and decrement the
37694 * couter of wakeup events in progress simultaneously.
37695 */
37696- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37697+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37698 trace_wakeup_source_deactivate(ws->name, cec);
37699
37700 split_counters(&cnt, &inpr);
37701diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37702index 8d98a32..61d3165 100644
37703--- a/drivers/base/syscore.c
37704+++ b/drivers/base/syscore.c
37705@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37706 void register_syscore_ops(struct syscore_ops *ops)
37707 {
37708 mutex_lock(&syscore_ops_lock);
37709- list_add_tail(&ops->node, &syscore_ops_list);
37710+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37711 mutex_unlock(&syscore_ops_lock);
37712 }
37713 EXPORT_SYMBOL_GPL(register_syscore_ops);
37714@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37715 void unregister_syscore_ops(struct syscore_ops *ops)
37716 {
37717 mutex_lock(&syscore_ops_lock);
37718- list_del(&ops->node);
37719+ pax_list_del((struct list_head *)&ops->node);
37720 mutex_unlock(&syscore_ops_lock);
37721 }
37722 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37723diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37724index ff20f19..018f1da 100644
37725--- a/drivers/block/cciss.c
37726+++ b/drivers/block/cciss.c
37727@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37728 while (!list_empty(&h->reqQ)) {
37729 c = list_entry(h->reqQ.next, CommandList_struct, list);
37730 /* can't do anything if fifo is full */
37731- if ((h->access.fifo_full(h))) {
37732+ if ((h->access->fifo_full(h))) {
37733 dev_warn(&h->pdev->dev, "fifo full\n");
37734 break;
37735 }
37736@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37737 h->Qdepth--;
37738
37739 /* Tell the controller execute command */
37740- h->access.submit_command(h, c);
37741+ h->access->submit_command(h, c);
37742
37743 /* Put job onto the completed Q */
37744 addQ(&h->cmpQ, c);
37745@@ -3444,17 +3444,17 @@ startio:
37746
37747 static inline unsigned long get_next_completion(ctlr_info_t *h)
37748 {
37749- return h->access.command_completed(h);
37750+ return h->access->command_completed(h);
37751 }
37752
37753 static inline int interrupt_pending(ctlr_info_t *h)
37754 {
37755- return h->access.intr_pending(h);
37756+ return h->access->intr_pending(h);
37757 }
37758
37759 static inline long interrupt_not_for_us(ctlr_info_t *h)
37760 {
37761- return ((h->access.intr_pending(h) == 0) ||
37762+ return ((h->access->intr_pending(h) == 0) ||
37763 (h->interrupts_enabled == 0));
37764 }
37765
37766@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37767 u32 a;
37768
37769 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37770- return h->access.command_completed(h);
37771+ return h->access->command_completed(h);
37772
37773 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37774 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37775@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37776 trans_support & CFGTBL_Trans_use_short_tags);
37777
37778 /* Change the access methods to the performant access methods */
37779- h->access = SA5_performant_access;
37780+ h->access = &SA5_performant_access;
37781 h->transMethod = CFGTBL_Trans_Performant;
37782
37783 return;
37784@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37785 if (prod_index < 0)
37786 return -ENODEV;
37787 h->product_name = products[prod_index].product_name;
37788- h->access = *(products[prod_index].access);
37789+ h->access = products[prod_index].access;
37790
37791 if (cciss_board_disabled(h)) {
37792 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37793@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37794 }
37795
37796 /* make sure the board interrupts are off */
37797- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37798+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37799 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37800 if (rc)
37801 goto clean2;
37802@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37803 * fake ones to scoop up any residual completions.
37804 */
37805 spin_lock_irqsave(&h->lock, flags);
37806- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37807+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37808 spin_unlock_irqrestore(&h->lock, flags);
37809 free_irq(h->intr[h->intr_mode], h);
37810 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37811@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37812 dev_info(&h->pdev->dev, "Board READY.\n");
37813 dev_info(&h->pdev->dev,
37814 "Waiting for stale completions to drain.\n");
37815- h->access.set_intr_mask(h, CCISS_INTR_ON);
37816+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37817 msleep(10000);
37818- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37819+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37820
37821 rc = controller_reset_failed(h->cfgtable);
37822 if (rc)
37823@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37824 cciss_scsi_setup(h);
37825
37826 /* Turn the interrupts on so we can service requests */
37827- h->access.set_intr_mask(h, CCISS_INTR_ON);
37828+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37829
37830 /* Get the firmware version */
37831 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37832@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37833 kfree(flush_buf);
37834 if (return_code != IO_OK)
37835 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37836- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37837+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37838 free_irq(h->intr[h->intr_mode], h);
37839 }
37840
37841diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37842index 7fda30e..2f27946 100644
37843--- a/drivers/block/cciss.h
37844+++ b/drivers/block/cciss.h
37845@@ -101,7 +101,7 @@ struct ctlr_info
37846 /* information about each logical volume */
37847 drive_info_struct *drv[CISS_MAX_LUN];
37848
37849- struct access_method access;
37850+ struct access_method *access;
37851
37852 /* queue and queue Info */
37853 struct list_head reqQ;
37854@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
37855 }
37856
37857 static struct access_method SA5_access = {
37858- SA5_submit_command,
37859- SA5_intr_mask,
37860- SA5_fifo_full,
37861- SA5_intr_pending,
37862- SA5_completed,
37863+ .submit_command = SA5_submit_command,
37864+ .set_intr_mask = SA5_intr_mask,
37865+ .fifo_full = SA5_fifo_full,
37866+ .intr_pending = SA5_intr_pending,
37867+ .command_completed = SA5_completed,
37868 };
37869
37870 static struct access_method SA5B_access = {
37871- SA5_submit_command,
37872- SA5B_intr_mask,
37873- SA5_fifo_full,
37874- SA5B_intr_pending,
37875- SA5_completed,
37876+ .submit_command = SA5_submit_command,
37877+ .set_intr_mask = SA5B_intr_mask,
37878+ .fifo_full = SA5_fifo_full,
37879+ .intr_pending = SA5B_intr_pending,
37880+ .command_completed = SA5_completed,
37881 };
37882
37883 static struct access_method SA5_performant_access = {
37884- SA5_submit_command,
37885- SA5_performant_intr_mask,
37886- SA5_fifo_full,
37887- SA5_performant_intr_pending,
37888- SA5_performant_completed,
37889+ .submit_command = SA5_submit_command,
37890+ .set_intr_mask = SA5_performant_intr_mask,
37891+ .fifo_full = SA5_fifo_full,
37892+ .intr_pending = SA5_performant_intr_pending,
37893+ .command_completed = SA5_performant_completed,
37894 };
37895
37896 struct board_type {
37897diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37898index 2b94403..fd6ad1f 100644
37899--- a/drivers/block/cpqarray.c
37900+++ b/drivers/block/cpqarray.c
37901@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37902 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37903 goto Enomem4;
37904 }
37905- hba[i]->access.set_intr_mask(hba[i], 0);
37906+ hba[i]->access->set_intr_mask(hba[i], 0);
37907 if (request_irq(hba[i]->intr, do_ida_intr,
37908 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37909 {
37910@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37911 add_timer(&hba[i]->timer);
37912
37913 /* Enable IRQ now that spinlock and rate limit timer are set up */
37914- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37915+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37916
37917 for(j=0; j<NWD; j++) {
37918 struct gendisk *disk = ida_gendisk[i][j];
37919@@ -694,7 +694,7 @@ DBGINFO(
37920 for(i=0; i<NR_PRODUCTS; i++) {
37921 if (board_id == products[i].board_id) {
37922 c->product_name = products[i].product_name;
37923- c->access = *(products[i].access);
37924+ c->access = products[i].access;
37925 break;
37926 }
37927 }
37928@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37929 hba[ctlr]->intr = intr;
37930 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37931 hba[ctlr]->product_name = products[j].product_name;
37932- hba[ctlr]->access = *(products[j].access);
37933+ hba[ctlr]->access = products[j].access;
37934 hba[ctlr]->ctlr = ctlr;
37935 hba[ctlr]->board_id = board_id;
37936 hba[ctlr]->pci_dev = NULL; /* not PCI */
37937@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37938
37939 while((c = h->reqQ) != NULL) {
37940 /* Can't do anything if we're busy */
37941- if (h->access.fifo_full(h) == 0)
37942+ if (h->access->fifo_full(h) == 0)
37943 return;
37944
37945 /* Get the first entry from the request Q */
37946@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37947 h->Qdepth--;
37948
37949 /* Tell the controller to do our bidding */
37950- h->access.submit_command(h, c);
37951+ h->access->submit_command(h, c);
37952
37953 /* Get onto the completion Q */
37954 addQ(&h->cmpQ, c);
37955@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37956 unsigned long flags;
37957 __u32 a,a1;
37958
37959- istat = h->access.intr_pending(h);
37960+ istat = h->access->intr_pending(h);
37961 /* Is this interrupt for us? */
37962 if (istat == 0)
37963 return IRQ_NONE;
37964@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37965 */
37966 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37967 if (istat & FIFO_NOT_EMPTY) {
37968- while((a = h->access.command_completed(h))) {
37969+ while((a = h->access->command_completed(h))) {
37970 a1 = a; a &= ~3;
37971 if ((c = h->cmpQ) == NULL)
37972 {
37973@@ -1448,11 +1448,11 @@ static int sendcmd(
37974 /*
37975 * Disable interrupt
37976 */
37977- info_p->access.set_intr_mask(info_p, 0);
37978+ info_p->access->set_intr_mask(info_p, 0);
37979 /* Make sure there is room in the command FIFO */
37980 /* Actually it should be completely empty at this time. */
37981 for (i = 200000; i > 0; i--) {
37982- temp = info_p->access.fifo_full(info_p);
37983+ temp = info_p->access->fifo_full(info_p);
37984 if (temp != 0) {
37985 break;
37986 }
37987@@ -1465,7 +1465,7 @@ DBG(
37988 /*
37989 * Send the cmd
37990 */
37991- info_p->access.submit_command(info_p, c);
37992+ info_p->access->submit_command(info_p, c);
37993 complete = pollcomplete(ctlr);
37994
37995 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
37996@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
37997 * we check the new geometry. Then turn interrupts back on when
37998 * we're done.
37999 */
38000- host->access.set_intr_mask(host, 0);
38001+ host->access->set_intr_mask(host, 0);
38002 getgeometry(ctlr);
38003- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38004+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38005
38006 for(i=0; i<NWD; i++) {
38007 struct gendisk *disk = ida_gendisk[ctlr][i];
38008@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38009 /* Wait (up to 2 seconds) for a command to complete */
38010
38011 for (i = 200000; i > 0; i--) {
38012- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38013+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38014 if (done == 0) {
38015 udelay(10); /* a short fixed delay */
38016 } else
38017diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38018index be73e9d..7fbf140 100644
38019--- a/drivers/block/cpqarray.h
38020+++ b/drivers/block/cpqarray.h
38021@@ -99,7 +99,7 @@ struct ctlr_info {
38022 drv_info_t drv[NWD];
38023 struct proc_dir_entry *proc;
38024
38025- struct access_method access;
38026+ struct access_method *access;
38027
38028 cmdlist_t *reqQ;
38029 cmdlist_t *cmpQ;
38030diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38031index 434c77d..6d3219a 100644
38032--- a/drivers/block/drbd/drbd_bitmap.c
38033+++ b/drivers/block/drbd/drbd_bitmap.c
38034@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38035 submit_bio(rw, bio);
38036 /* this should not count as user activity and cause the
38037 * resync to throttle -- see drbd_rs_should_slow_down(). */
38038- atomic_add(len >> 9, &device->rs_sect_ev);
38039+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38040 }
38041 }
38042
38043diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38044index b905e98..0812ed8 100644
38045--- a/drivers/block/drbd/drbd_int.h
38046+++ b/drivers/block/drbd/drbd_int.h
38047@@ -385,7 +385,7 @@ struct drbd_epoch {
38048 struct drbd_connection *connection;
38049 struct list_head list;
38050 unsigned int barrier_nr;
38051- atomic_t epoch_size; /* increased on every request added. */
38052+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38053 atomic_t active; /* increased on every req. added, and dec on every finished. */
38054 unsigned long flags;
38055 };
38056@@ -946,7 +946,7 @@ struct drbd_device {
38057 unsigned int al_tr_number;
38058 int al_tr_cycle;
38059 wait_queue_head_t seq_wait;
38060- atomic_t packet_seq;
38061+ atomic_unchecked_t packet_seq;
38062 unsigned int peer_seq;
38063 spinlock_t peer_seq_lock;
38064 unsigned long comm_bm_set; /* communicated number of set bits. */
38065@@ -955,8 +955,8 @@ struct drbd_device {
38066 struct mutex own_state_mutex;
38067 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38068 char congestion_reason; /* Why we where congested... */
38069- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38070- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38071+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38072+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38073 int rs_last_sect_ev; /* counter to compare with */
38074 int rs_last_events; /* counter of read or write "events" (unit sectors)
38075 * on the lower level device when we last looked. */
38076diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38077index 1fc8342..7e7742b 100644
38078--- a/drivers/block/drbd/drbd_main.c
38079+++ b/drivers/block/drbd/drbd_main.c
38080@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38081 p->sector = sector;
38082 p->block_id = block_id;
38083 p->blksize = blksize;
38084- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38085+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38086 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38087 }
38088
38089@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38090 return -EIO;
38091 p->sector = cpu_to_be64(req->i.sector);
38092 p->block_id = (unsigned long)req;
38093- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38094+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38095 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38096 if (device->state.conn >= C_SYNC_SOURCE &&
38097 device->state.conn <= C_PAUSED_SYNC_T)
38098@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38099 atomic_set(&device->unacked_cnt, 0);
38100 atomic_set(&device->local_cnt, 0);
38101 atomic_set(&device->pp_in_use_by_net, 0);
38102- atomic_set(&device->rs_sect_in, 0);
38103- atomic_set(&device->rs_sect_ev, 0);
38104+ atomic_set_unchecked(&device->rs_sect_in, 0);
38105+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38106 atomic_set(&device->ap_in_flight, 0);
38107 atomic_set(&device->md_io.in_use, 0);
38108
38109@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38110 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38111 struct drbd_resource *resource = connection->resource;
38112
38113- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38114- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38115+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38116+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38117 kfree(connection->current_epoch);
38118
38119 idr_destroy(&connection->peer_devices);
38120diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38121index 74df8cf..e41fc24 100644
38122--- a/drivers/block/drbd/drbd_nl.c
38123+++ b/drivers/block/drbd/drbd_nl.c
38124@@ -3637,13 +3637,13 @@ finish:
38125
38126 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38127 {
38128- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38129+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38130 struct sk_buff *msg;
38131 struct drbd_genlmsghdr *d_out;
38132 unsigned seq;
38133 int err = -ENOMEM;
38134
38135- seq = atomic_inc_return(&drbd_genl_seq);
38136+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38137 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38138 if (!msg)
38139 goto failed;
38140diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38141index d169b4a..481463f 100644
38142--- a/drivers/block/drbd/drbd_receiver.c
38143+++ b/drivers/block/drbd/drbd_receiver.c
38144@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38145 struct drbd_device *device = peer_device->device;
38146 int err;
38147
38148- atomic_set(&device->packet_seq, 0);
38149+ atomic_set_unchecked(&device->packet_seq, 0);
38150 device->peer_seq = 0;
38151
38152 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38153@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38154 do {
38155 next_epoch = NULL;
38156
38157- epoch_size = atomic_read(&epoch->epoch_size);
38158+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38159
38160 switch (ev & ~EV_CLEANUP) {
38161 case EV_PUT:
38162@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38163 rv = FE_DESTROYED;
38164 } else {
38165 epoch->flags = 0;
38166- atomic_set(&epoch->epoch_size, 0);
38167+ atomic_set_unchecked(&epoch->epoch_size, 0);
38168 /* atomic_set(&epoch->active, 0); is already zero */
38169 if (rv == FE_STILL_LIVE)
38170 rv = FE_RECYCLED;
38171@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38172 conn_wait_active_ee_empty(connection);
38173 drbd_flush(connection);
38174
38175- if (atomic_read(&connection->current_epoch->epoch_size)) {
38176+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38177 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38178 if (epoch)
38179 break;
38180@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38181 }
38182
38183 epoch->flags = 0;
38184- atomic_set(&epoch->epoch_size, 0);
38185+ atomic_set_unchecked(&epoch->epoch_size, 0);
38186 atomic_set(&epoch->active, 0);
38187
38188 spin_lock(&connection->epoch_lock);
38189- if (atomic_read(&connection->current_epoch->epoch_size)) {
38190+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38191 list_add(&epoch->list, &connection->current_epoch->list);
38192 connection->current_epoch = epoch;
38193 connection->epochs++;
38194@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38195 list_add_tail(&peer_req->w.list, &device->sync_ee);
38196 spin_unlock_irq(&device->resource->req_lock);
38197
38198- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38199+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38200 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38201 return 0;
38202
38203@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38204 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38205 }
38206
38207- atomic_add(pi->size >> 9, &device->rs_sect_in);
38208+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38209
38210 return err;
38211 }
38212@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38213
38214 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38215 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38216- atomic_inc(&connection->current_epoch->epoch_size);
38217+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38218 err2 = drbd_drain_block(peer_device, pi->size);
38219 if (!err)
38220 err = err2;
38221@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38222
38223 spin_lock(&connection->epoch_lock);
38224 peer_req->epoch = connection->current_epoch;
38225- atomic_inc(&peer_req->epoch->epoch_size);
38226+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38227 atomic_inc(&peer_req->epoch->active);
38228 spin_unlock(&connection->epoch_lock);
38229
38230@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38231
38232 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38233 (int)part_stat_read(&disk->part0, sectors[1]) -
38234- atomic_read(&device->rs_sect_ev);
38235+ atomic_read_unchecked(&device->rs_sect_ev);
38236
38237 if (atomic_read(&device->ap_actlog_cnt)
38238 || curr_events - device->rs_last_events > 64) {
38239@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38240 device->use_csums = true;
38241 } else if (pi->cmd == P_OV_REPLY) {
38242 /* track progress, we may need to throttle */
38243- atomic_add(size >> 9, &device->rs_sect_in);
38244+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38245 peer_req->w.cb = w_e_end_ov_reply;
38246 dec_rs_pending(device);
38247 /* drbd_rs_begin_io done when we sent this request,
38248@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38249 goto out_free_e;
38250
38251 submit_for_resync:
38252- atomic_add(size >> 9, &device->rs_sect_ev);
38253+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38254
38255 submit:
38256 update_receiver_timing_details(connection, drbd_submit_peer_request);
38257@@ -4564,7 +4564,7 @@ struct data_cmd {
38258 int expect_payload;
38259 size_t pkt_size;
38260 int (*fn)(struct drbd_connection *, struct packet_info *);
38261-};
38262+} __do_const;
38263
38264 static struct data_cmd drbd_cmd_handler[] = {
38265 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38266@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38267 if (!list_empty(&connection->current_epoch->list))
38268 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38269 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38270- atomic_set(&connection->current_epoch->epoch_size, 0);
38271+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38272 connection->send.seen_any_write_yet = false;
38273
38274 drbd_info(connection, "Connection closed\n");
38275@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38276 put_ldev(device);
38277 }
38278 dec_rs_pending(device);
38279- atomic_add(blksize >> 9, &device->rs_sect_in);
38280+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38281
38282 return 0;
38283 }
38284@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38285 struct asender_cmd {
38286 size_t pkt_size;
38287 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38288-};
38289+} __do_const;
38290
38291 static struct asender_cmd asender_tbl[] = {
38292 [P_PING] = { 0, got_Ping },
38293diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38294index d0fae55..4469096 100644
38295--- a/drivers/block/drbd/drbd_worker.c
38296+++ b/drivers/block/drbd/drbd_worker.c
38297@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38298 list_add_tail(&peer_req->w.list, &device->read_ee);
38299 spin_unlock_irq(&device->resource->req_lock);
38300
38301- atomic_add(size >> 9, &device->rs_sect_ev);
38302+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38303 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38304 return 0;
38305
38306@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38307 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38308 int number, mxb;
38309
38310- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38311+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38312 device->rs_in_flight -= sect_in;
38313
38314 rcu_read_lock();
38315@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38316 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38317 struct fifo_buffer *plan;
38318
38319- atomic_set(&device->rs_sect_in, 0);
38320- atomic_set(&device->rs_sect_ev, 0);
38321+ atomic_set_unchecked(&device->rs_sect_in, 0);
38322+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38323 device->rs_in_flight = 0;
38324 device->rs_last_events =
38325 (int)part_stat_read(&disk->part0, sectors[0]) +
38326diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38327index 6cb1beb..bf490f7 100644
38328--- a/drivers/block/loop.c
38329+++ b/drivers/block/loop.c
38330@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38331
38332 file_start_write(file);
38333 set_fs(get_ds());
38334- bw = file->f_op->write(file, buf, len, &pos);
38335+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38336 set_fs(old_fs);
38337 file_end_write(file);
38338 if (likely(bw == len))
38339diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
38340index d826bf3..8eb406c 100644
38341--- a/drivers/block/nvme-core.c
38342+++ b/drivers/block/nvme-core.c
38343@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
38344 static struct task_struct *nvme_thread;
38345 static struct workqueue_struct *nvme_workq;
38346 static wait_queue_head_t nvme_kthread_wait;
38347-static struct notifier_block nvme_nb;
38348
38349 static void nvme_reset_failed_dev(struct work_struct *ws);
38350 static int nvme_process_cq(struct nvme_queue *nvmeq);
38351@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
38352 static void __exit nvme_exit(void)
38353 {
38354 pci_unregister_driver(&nvme_driver);
38355- unregister_hotcpu_notifier(&nvme_nb);
38356 unregister_blkdev(nvme_major, "nvme");
38357 destroy_workqueue(nvme_workq);
38358 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
38359diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38360index 09e628da..7607aaa 100644
38361--- a/drivers/block/pktcdvd.c
38362+++ b/drivers/block/pktcdvd.c
38363@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38364
38365 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38366 {
38367- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38368+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38369 }
38370
38371 /*
38372@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38373 return -EROFS;
38374 }
38375 pd->settings.fp = ti.fp;
38376- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38377+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38378
38379 if (ti.nwa_v) {
38380 pd->nwa = be32_to_cpu(ti.next_writable);
38381diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38382index 8a86b62..f54c87e 100644
38383--- a/drivers/block/rbd.c
38384+++ b/drivers/block/rbd.c
38385@@ -63,7 +63,7 @@
38386 * If the counter is already at its maximum value returns
38387 * -EINVAL without updating it.
38388 */
38389-static int atomic_inc_return_safe(atomic_t *v)
38390+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38391 {
38392 unsigned int counter;
38393
38394diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38395index e5565fb..71be10b4 100644
38396--- a/drivers/block/smart1,2.h
38397+++ b/drivers/block/smart1,2.h
38398@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38399 }
38400
38401 static struct access_method smart4_access = {
38402- smart4_submit_command,
38403- smart4_intr_mask,
38404- smart4_fifo_full,
38405- smart4_intr_pending,
38406- smart4_completed,
38407+ .submit_command = smart4_submit_command,
38408+ .set_intr_mask = smart4_intr_mask,
38409+ .fifo_full = smart4_fifo_full,
38410+ .intr_pending = smart4_intr_pending,
38411+ .command_completed = smart4_completed,
38412 };
38413
38414 /*
38415@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38416 }
38417
38418 static struct access_method smart2_access = {
38419- smart2_submit_command,
38420- smart2_intr_mask,
38421- smart2_fifo_full,
38422- smart2_intr_pending,
38423- smart2_completed,
38424+ .submit_command = smart2_submit_command,
38425+ .set_intr_mask = smart2_intr_mask,
38426+ .fifo_full = smart2_fifo_full,
38427+ .intr_pending = smart2_intr_pending,
38428+ .command_completed = smart2_completed,
38429 };
38430
38431 /*
38432@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38433 }
38434
38435 static struct access_method smart2e_access = {
38436- smart2e_submit_command,
38437- smart2e_intr_mask,
38438- smart2e_fifo_full,
38439- smart2e_intr_pending,
38440- smart2e_completed,
38441+ .submit_command = smart2e_submit_command,
38442+ .set_intr_mask = smart2e_intr_mask,
38443+ .fifo_full = smart2e_fifo_full,
38444+ .intr_pending = smart2e_intr_pending,
38445+ .command_completed = smart2e_completed,
38446 };
38447
38448 /*
38449@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38450 }
38451
38452 static struct access_method smart1_access = {
38453- smart1_submit_command,
38454- smart1_intr_mask,
38455- smart1_fifo_full,
38456- smart1_intr_pending,
38457- smart1_completed,
38458+ .submit_command = smart1_submit_command,
38459+ .set_intr_mask = smart1_intr_mask,
38460+ .fifo_full = smart1_fifo_full,
38461+ .intr_pending = smart1_intr_pending,
38462+ .command_completed = smart1_completed,
38463 };
38464diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38465index 55c135b..9f8d60c 100644
38466--- a/drivers/bluetooth/btwilink.c
38467+++ b/drivers/bluetooth/btwilink.c
38468@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38469
38470 static int bt_ti_probe(struct platform_device *pdev)
38471 {
38472- static struct ti_st *hst;
38473+ struct ti_st *hst;
38474 struct hci_dev *hdev;
38475 int err;
38476
38477diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38478index 5d28a45..a538f90 100644
38479--- a/drivers/cdrom/cdrom.c
38480+++ b/drivers/cdrom/cdrom.c
38481@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38482 ENSURE(reset, CDC_RESET);
38483 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38484 cdi->mc_flags = 0;
38485- cdo->n_minors = 0;
38486 cdi->options = CDO_USE_FFLAGS;
38487
38488 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38489@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38490 else
38491 cdi->cdda_method = CDDA_OLD;
38492
38493- if (!cdo->generic_packet)
38494- cdo->generic_packet = cdrom_dummy_generic_packet;
38495+ if (!cdo->generic_packet) {
38496+ pax_open_kernel();
38497+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38498+ pax_close_kernel();
38499+ }
38500
38501 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38502 mutex_lock(&cdrom_mutex);
38503@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38504 if (cdi->exit)
38505 cdi->exit(cdi);
38506
38507- cdi->ops->n_minors--;
38508 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38509 }
38510
38511@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38512 */
38513 nr = nframes;
38514 do {
38515- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38516+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38517 if (cgc.buffer)
38518 break;
38519
38520@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38521 struct cdrom_device_info *cdi;
38522 int ret;
38523
38524- ret = scnprintf(info + *pos, max_size - *pos, header);
38525+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38526 if (!ret)
38527 return 1;
38528
38529diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38530index 584bc31..e64a12c 100644
38531--- a/drivers/cdrom/gdrom.c
38532+++ b/drivers/cdrom/gdrom.c
38533@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38534 .audio_ioctl = gdrom_audio_ioctl,
38535 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38536 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38537- .n_minors = 1,
38538 };
38539
38540 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38541diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38542index efefd12..4f1d494 100644
38543--- a/drivers/char/Kconfig
38544+++ b/drivers/char/Kconfig
38545@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38546
38547 config DEVKMEM
38548 bool "/dev/kmem virtual device support"
38549- default y
38550+ default n
38551+ depends on !GRKERNSEC_KMEM
38552 help
38553 Say Y here if you want to support the /dev/kmem device. The
38554 /dev/kmem device is rarely used, but can be used for certain
38555@@ -577,6 +578,7 @@ config DEVPORT
38556 bool
38557 depends on !M68K
38558 depends on ISA || PCI
38559+ depends on !GRKERNSEC_KMEM
38560 default y
38561
38562 source "drivers/s390/char/Kconfig"
38563diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38564index a48e05b..6bac831 100644
38565--- a/drivers/char/agp/compat_ioctl.c
38566+++ b/drivers/char/agp/compat_ioctl.c
38567@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38568 return -ENOMEM;
38569 }
38570
38571- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38572+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38573 sizeof(*usegment) * ureserve.seg_count)) {
38574 kfree(usegment);
38575 kfree(ksegment);
38576diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38577index 09f17eb..8531d2f 100644
38578--- a/drivers/char/agp/frontend.c
38579+++ b/drivers/char/agp/frontend.c
38580@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38581 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38582 return -EFAULT;
38583
38584- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38585+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38586 return -EFAULT;
38587
38588 client = agp_find_client_by_pid(reserve.pid);
38589@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38590 if (segment == NULL)
38591 return -ENOMEM;
38592
38593- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38594+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38595 sizeof(struct agp_segment) * reserve.seg_count)) {
38596 kfree(segment);
38597 return -EFAULT;
38598diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38599index 4f94375..413694e 100644
38600--- a/drivers/char/genrtc.c
38601+++ b/drivers/char/genrtc.c
38602@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38603 switch (cmd) {
38604
38605 case RTC_PLL_GET:
38606+ memset(&pll, 0, sizeof(pll));
38607 if (get_rtc_pll(&pll))
38608 return -EINVAL;
38609 else
38610diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38611index d5d4cd8..22d561d 100644
38612--- a/drivers/char/hpet.c
38613+++ b/drivers/char/hpet.c
38614@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38615 }
38616
38617 static int
38618-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38619+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38620 struct hpet_info *info)
38621 {
38622 struct hpet_timer __iomem *timer;
38623diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38624index 6b65fa4..8ebbc99 100644
38625--- a/drivers/char/ipmi/ipmi_msghandler.c
38626+++ b/drivers/char/ipmi/ipmi_msghandler.c
38627@@ -436,7 +436,7 @@ struct ipmi_smi {
38628 struct proc_dir_entry *proc_dir;
38629 char proc_dir_name[10];
38630
38631- atomic_t stats[IPMI_NUM_STATS];
38632+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38633
38634 /*
38635 * run_to_completion duplicate of smb_info, smi_info
38636@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38637 static DEFINE_MUTEX(smi_watchers_mutex);
38638
38639 #define ipmi_inc_stat(intf, stat) \
38640- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38641+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38642 #define ipmi_get_stat(intf, stat) \
38643- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38644+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38645
38646 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38647 "ACPI", "SMBIOS", "PCI",
38648@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38649 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38650 init_waitqueue_head(&intf->waitq);
38651 for (i = 0; i < IPMI_NUM_STATS; i++)
38652- atomic_set(&intf->stats[i], 0);
38653+ atomic_set_unchecked(&intf->stats[i], 0);
38654
38655 intf->proc_dir = NULL;
38656
38657diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38658index 967b73a..946e94c 100644
38659--- a/drivers/char/ipmi/ipmi_si_intf.c
38660+++ b/drivers/char/ipmi/ipmi_si_intf.c
38661@@ -284,7 +284,7 @@ struct smi_info {
38662 unsigned char slave_addr;
38663
38664 /* Counters and things for the proc filesystem. */
38665- atomic_t stats[SI_NUM_STATS];
38666+ atomic_unchecked_t stats[SI_NUM_STATS];
38667
38668 struct task_struct *thread;
38669
38670@@ -293,9 +293,9 @@ struct smi_info {
38671 };
38672
38673 #define smi_inc_stat(smi, stat) \
38674- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38675+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38676 #define smi_get_stat(smi, stat) \
38677- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38678+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38679
38680 #define SI_MAX_PARMS 4
38681
38682@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
38683 atomic_set(&new_smi->req_events, 0);
38684 new_smi->run_to_completion = false;
38685 for (i = 0; i < SI_NUM_STATS; i++)
38686- atomic_set(&new_smi->stats[i], 0);
38687+ atomic_set_unchecked(&new_smi->stats[i], 0);
38688
38689 new_smi->interrupt_disabled = true;
38690 atomic_set(&new_smi->need_watch, 0);
38691diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38692index 4c58333..d5cca27 100644
38693--- a/drivers/char/mem.c
38694+++ b/drivers/char/mem.c
38695@@ -18,6 +18,7 @@
38696 #include <linux/raw.h>
38697 #include <linux/tty.h>
38698 #include <linux/capability.h>
38699+#include <linux/security.h>
38700 #include <linux/ptrace.h>
38701 #include <linux/device.h>
38702 #include <linux/highmem.h>
38703@@ -36,6 +37,10 @@
38704
38705 #define DEVPORT_MINOR 4
38706
38707+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38708+extern const struct file_operations grsec_fops;
38709+#endif
38710+
38711 static inline unsigned long size_inside_page(unsigned long start,
38712 unsigned long size)
38713 {
38714@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38715
38716 while (cursor < to) {
38717 if (!devmem_is_allowed(pfn)) {
38718+#ifdef CONFIG_GRKERNSEC_KMEM
38719+ gr_handle_mem_readwrite(from, to);
38720+#else
38721 printk(KERN_INFO
38722 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38723 current->comm, from, to);
38724+#endif
38725 return 0;
38726 }
38727 cursor += PAGE_SIZE;
38728@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38729 }
38730 return 1;
38731 }
38732+#elif defined(CONFIG_GRKERNSEC_KMEM)
38733+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38734+{
38735+ return 0;
38736+}
38737 #else
38738 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38739 {
38740@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38741 #endif
38742
38743 while (count > 0) {
38744- unsigned long remaining;
38745+ unsigned long remaining = 0;
38746+ char *temp;
38747
38748 sz = size_inside_page(p, count);
38749
38750@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38751 if (!ptr)
38752 return -EFAULT;
38753
38754- remaining = copy_to_user(buf, ptr, sz);
38755+#ifdef CONFIG_PAX_USERCOPY
38756+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38757+ if (!temp) {
38758+ unxlate_dev_mem_ptr(p, ptr);
38759+ return -ENOMEM;
38760+ }
38761+ remaining = probe_kernel_read(temp, ptr, sz);
38762+#else
38763+ temp = ptr;
38764+#endif
38765+
38766+ if (!remaining)
38767+ remaining = copy_to_user(buf, temp, sz);
38768+
38769+#ifdef CONFIG_PAX_USERCOPY
38770+ kfree(temp);
38771+#endif
38772+
38773 unxlate_dev_mem_ptr(p, ptr);
38774 if (remaining)
38775 return -EFAULT;
38776@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38777 size_t count, loff_t *ppos)
38778 {
38779 unsigned long p = *ppos;
38780- ssize_t low_count, read, sz;
38781+ ssize_t low_count, read, sz, err = 0;
38782 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38783- int err = 0;
38784
38785 read = 0;
38786 if (p < (unsigned long) high_memory) {
38787@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38788 }
38789 #endif
38790 while (low_count > 0) {
38791+ char *temp;
38792+
38793 sz = size_inside_page(p, low_count);
38794
38795 /*
38796@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38797 */
38798 kbuf = xlate_dev_kmem_ptr((void *)p);
38799
38800- if (copy_to_user(buf, kbuf, sz))
38801+#ifdef CONFIG_PAX_USERCOPY
38802+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38803+ if (!temp)
38804+ return -ENOMEM;
38805+ err = probe_kernel_read(temp, kbuf, sz);
38806+#else
38807+ temp = kbuf;
38808+#endif
38809+
38810+ if (!err)
38811+ err = copy_to_user(buf, temp, sz);
38812+
38813+#ifdef CONFIG_PAX_USERCOPY
38814+ kfree(temp);
38815+#endif
38816+
38817+ if (err)
38818 return -EFAULT;
38819 buf += sz;
38820 p += sz;
38821@@ -800,6 +849,9 @@ static const struct memdev {
38822 #ifdef CONFIG_PRINTK
38823 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38824 #endif
38825+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38826+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38827+#endif
38828 };
38829
38830 static int memory_open(struct inode *inode, struct file *filp)
38831@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
38832 continue;
38833
38834 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38835- NULL, devlist[minor].name);
38836+ NULL, "%s", devlist[minor].name);
38837 }
38838
38839 return tty_init();
38840diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38841index 9df78e2..01ba9ae 100644
38842--- a/drivers/char/nvram.c
38843+++ b/drivers/char/nvram.c
38844@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38845
38846 spin_unlock_irq(&rtc_lock);
38847
38848- if (copy_to_user(buf, contents, tmp - contents))
38849+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38850 return -EFAULT;
38851
38852 *ppos = i;
38853diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
38854index 0ea9986..e7b07e4 100644
38855--- a/drivers/char/pcmcia/synclink_cs.c
38856+++ b/drivers/char/pcmcia/synclink_cs.c
38857@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38858
38859 if (debug_level >= DEBUG_LEVEL_INFO)
38860 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
38861- __FILE__, __LINE__, info->device_name, port->count);
38862+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
38863
38864 if (tty_port_close_start(port, tty, filp) == 0)
38865 goto cleanup;
38866@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38867 cleanup:
38868 if (debug_level >= DEBUG_LEVEL_INFO)
38869 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
38870- tty->driver->name, port->count);
38871+ tty->driver->name, atomic_read(&port->count));
38872 }
38873
38874 /* Wait until the transmitter is empty.
38875@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38876
38877 if (debug_level >= DEBUG_LEVEL_INFO)
38878 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
38879- __FILE__, __LINE__, tty->driver->name, port->count);
38880+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
38881
38882 /* If port is closing, signal caller to try again */
38883 if (port->flags & ASYNC_CLOSING){
38884@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38885 goto cleanup;
38886 }
38887 spin_lock(&port->lock);
38888- port->count++;
38889+ atomic_inc(&port->count);
38890 spin_unlock(&port->lock);
38891 spin_unlock_irqrestore(&info->netlock, flags);
38892
38893- if (port->count == 1) {
38894+ if (atomic_read(&port->count) == 1) {
38895 /* 1st open on this device, init hardware */
38896 retval = startup(info, tty);
38897 if (retval < 0)
38898@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38899 unsigned short new_crctype;
38900
38901 /* return error if TTY interface open */
38902- if (info->port.count)
38903+ if (atomic_read(&info->port.count))
38904 return -EBUSY;
38905
38906 switch (encoding)
38907@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
38908
38909 /* arbitrate between network and tty opens */
38910 spin_lock_irqsave(&info->netlock, flags);
38911- if (info->port.count != 0 || info->netcount != 0) {
38912+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38913 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38914 spin_unlock_irqrestore(&info->netlock, flags);
38915 return -EBUSY;
38916@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38917 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
38918
38919 /* return error if TTY interface open */
38920- if (info->port.count)
38921+ if (atomic_read(&info->port.count))
38922 return -EBUSY;
38923
38924 if (cmd != SIOCWANDEV)
38925diff --git a/drivers/char/random.c b/drivers/char/random.c
38926index 9cd6968..6416f00 100644
38927--- a/drivers/char/random.c
38928+++ b/drivers/char/random.c
38929@@ -289,9 +289,6 @@
38930 /*
38931 * To allow fractional bits to be tracked, the entropy_count field is
38932 * denominated in units of 1/8th bits.
38933- *
38934- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
38935- * credit_entropy_bits() needs to be 64 bits wide.
38936 */
38937 #define ENTROPY_SHIFT 3
38938 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
38939@@ -439,9 +436,9 @@ struct entropy_store {
38940 };
38941
38942 static void push_to_pool(struct work_struct *work);
38943-static __u32 input_pool_data[INPUT_POOL_WORDS];
38944-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
38945-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
38946+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
38947+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38948+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38949
38950 static struct entropy_store input_pool = {
38951 .poolinfo = &poolinfo_table[0],
38952@@ -635,7 +632,7 @@ retry:
38953 /* The +2 corresponds to the /4 in the denominator */
38954
38955 do {
38956- unsigned int anfrac = min(pnfrac, pool_size/2);
38957+ u64 anfrac = min(pnfrac, pool_size/2);
38958 unsigned int add =
38959 ((pool_size - entropy_count)*anfrac*3) >> s;
38960
38961@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38962
38963 extract_buf(r, tmp);
38964 i = min_t(int, nbytes, EXTRACT_SIZE);
38965- if (copy_to_user(buf, tmp, i)) {
38966+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38967 ret = -EFAULT;
38968 break;
38969 }
38970@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
38971 static int proc_do_uuid(struct ctl_table *table, int write,
38972 void __user *buffer, size_t *lenp, loff_t *ppos)
38973 {
38974- struct ctl_table fake_table;
38975+ ctl_table_no_const fake_table;
38976 unsigned char buf[64], tmp_uuid[16], *uuid;
38977
38978 uuid = table->data;
38979@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38980 static int proc_do_entropy(struct ctl_table *table, int write,
38981 void __user *buffer, size_t *lenp, loff_t *ppos)
38982 {
38983- struct ctl_table fake_table;
38984+ ctl_table_no_const fake_table;
38985 int entropy_count;
38986
38987 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38988diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
38989index e496dae..b793e7d 100644
38990--- a/drivers/char/sonypi.c
38991+++ b/drivers/char/sonypi.c
38992@@ -54,6 +54,7 @@
38993
38994 #include <asm/uaccess.h>
38995 #include <asm/io.h>
38996+#include <asm/local.h>
38997
38998 #include <linux/sonypi.h>
38999
39000@@ -490,7 +491,7 @@ static struct sonypi_device {
39001 spinlock_t fifo_lock;
39002 wait_queue_head_t fifo_proc_list;
39003 struct fasync_struct *fifo_async;
39004- int open_count;
39005+ local_t open_count;
39006 int model;
39007 struct input_dev *input_jog_dev;
39008 struct input_dev *input_key_dev;
39009@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39010 static int sonypi_misc_release(struct inode *inode, struct file *file)
39011 {
39012 mutex_lock(&sonypi_device.lock);
39013- sonypi_device.open_count--;
39014+ local_dec(&sonypi_device.open_count);
39015 mutex_unlock(&sonypi_device.lock);
39016 return 0;
39017 }
39018@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39019 {
39020 mutex_lock(&sonypi_device.lock);
39021 /* Flush input queue on first open */
39022- if (!sonypi_device.open_count)
39023+ if (!local_read(&sonypi_device.open_count))
39024 kfifo_reset(&sonypi_device.fifo);
39025- sonypi_device.open_count++;
39026+ local_inc(&sonypi_device.open_count);
39027 mutex_unlock(&sonypi_device.lock);
39028
39029 return 0;
39030diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39031index 565a947..dcdc06e 100644
39032--- a/drivers/char/tpm/tpm_acpi.c
39033+++ b/drivers/char/tpm/tpm_acpi.c
39034@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39035 virt = acpi_os_map_iomem(start, len);
39036 if (!virt) {
39037 kfree(log->bios_event_log);
39038+ log->bios_event_log = NULL;
39039 printk("%s: ERROR - Unable to map memory\n", __func__);
39040 return -EIO;
39041 }
39042
39043- memcpy_fromio(log->bios_event_log, virt, len);
39044+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39045
39046 acpi_os_unmap_iomem(virt, len);
39047 return 0;
39048diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39049index 3a56a13..f8cbd25 100644
39050--- a/drivers/char/tpm/tpm_eventlog.c
39051+++ b/drivers/char/tpm/tpm_eventlog.c
39052@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39053 event = addr;
39054
39055 if ((event->event_type == 0 && event->event_size == 0) ||
39056- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39057+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39058 return NULL;
39059
39060 return addr;
39061@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39062 return NULL;
39063
39064 if ((event->event_type == 0 && event->event_size == 0) ||
39065- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39066+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39067 return NULL;
39068
39069 (*pos)++;
39070@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39071 int i;
39072
39073 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39074- seq_putc(m, data[i]);
39075+ if (!seq_putc(m, data[i]))
39076+ return -EFAULT;
39077
39078 return 0;
39079 }
39080diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39081index de03df9..0a309a9 100644
39082--- a/drivers/char/virtio_console.c
39083+++ b/drivers/char/virtio_console.c
39084@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39085 if (to_user) {
39086 ssize_t ret;
39087
39088- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39089+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39090 if (ret)
39091 return -EFAULT;
39092 } else {
39093@@ -788,7 +788,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39094 if (!port_has_data(port) && !port->host_connected)
39095 return 0;
39096
39097- return fill_readbuf(port, ubuf, count, true);
39098+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39099 }
39100
39101 static int wait_port_writable(struct port *port, bool nonblock)
39102diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39103index 4386697..754ceca 100644
39104--- a/drivers/clk/clk-composite.c
39105+++ b/drivers/clk/clk-composite.c
39106@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39107 struct clk *clk;
39108 struct clk_init_data init;
39109 struct clk_composite *composite;
39110- struct clk_ops *clk_composite_ops;
39111+ clk_ops_no_const *clk_composite_ops;
39112
39113 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39114 if (!composite) {
39115diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39116index dd3a78c..386d49c 100644
39117--- a/drivers/clk/socfpga/clk-gate.c
39118+++ b/drivers/clk/socfpga/clk-gate.c
39119@@ -22,6 +22,7 @@
39120 #include <linux/mfd/syscon.h>
39121 #include <linux/of.h>
39122 #include <linux/regmap.h>
39123+#include <asm/pgtable.h>
39124
39125 #include "clk.h"
39126
39127@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39128 return 0;
39129 }
39130
39131-static struct clk_ops gateclk_ops = {
39132+static clk_ops_no_const gateclk_ops __read_only = {
39133 .prepare = socfpga_clk_prepare,
39134 .recalc_rate = socfpga_clk_recalc_rate,
39135 .get_parent = socfpga_clk_get_parent,
39136@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39137 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39138 socfpga_clk->hw.bit_idx = clk_gate[1];
39139
39140- gateclk_ops.enable = clk_gate_ops.enable;
39141- gateclk_ops.disable = clk_gate_ops.disable;
39142+ pax_open_kernel();
39143+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39144+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39145+ pax_close_kernel();
39146 }
39147
39148 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39149diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39150index de6da95..c98278b 100644
39151--- a/drivers/clk/socfpga/clk-pll.c
39152+++ b/drivers/clk/socfpga/clk-pll.c
39153@@ -21,6 +21,7 @@
39154 #include <linux/io.h>
39155 #include <linux/of.h>
39156 #include <linux/of_address.h>
39157+#include <asm/pgtable.h>
39158
39159 #include "clk.h"
39160
39161@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39162 CLK_MGR_PLL_CLK_SRC_MASK;
39163 }
39164
39165-static struct clk_ops clk_pll_ops = {
39166+static clk_ops_no_const clk_pll_ops __read_only = {
39167 .recalc_rate = clk_pll_recalc_rate,
39168 .get_parent = clk_pll_get_parent,
39169 };
39170@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39171 pll_clk->hw.hw.init = &init;
39172
39173 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39174- clk_pll_ops.enable = clk_gate_ops.enable;
39175- clk_pll_ops.disable = clk_gate_ops.disable;
39176+ pax_open_kernel();
39177+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39178+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39179+ pax_close_kernel();
39180
39181 clk = clk_register(NULL, &pll_clk->hw.hw);
39182 if (WARN_ON(IS_ERR(clk))) {
39183diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39184index b0c18ed..1713a80 100644
39185--- a/drivers/cpufreq/acpi-cpufreq.c
39186+++ b/drivers/cpufreq/acpi-cpufreq.c
39187@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39188 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39189 per_cpu(acfreq_data, cpu) = data;
39190
39191- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39192- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39193+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39194+ pax_open_kernel();
39195+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39196+ pax_close_kernel();
39197+ }
39198
39199 result = acpi_processor_register_performance(data->acpi_data, cpu);
39200 if (result)
39201@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39202 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39203 break;
39204 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39205- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39206+ pax_open_kernel();
39207+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39208+ pax_close_kernel();
39209 break;
39210 default:
39211 break;
39212@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39213 if (!msrs)
39214 return;
39215
39216- acpi_cpufreq_driver.boost_supported = true;
39217- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39218+ pax_open_kernel();
39219+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39220+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39221+ pax_close_kernel();
39222
39223 cpu_notifier_register_begin();
39224
39225diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39226index fde97d6..3631eca 100644
39227--- a/drivers/cpufreq/cpufreq-dt.c
39228+++ b/drivers/cpufreq/cpufreq-dt.c
39229@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39230 if (!IS_ERR(cpu_reg))
39231 regulator_put(cpu_reg);
39232
39233- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39234+ pax_open_kernel();
39235+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39236+ pax_close_kernel();
39237
39238 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39239 if (ret)
39240diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39241index 7030c40..3a97de6 100644
39242--- a/drivers/cpufreq/cpufreq.c
39243+++ b/drivers/cpufreq/cpufreq.c
39244@@ -2135,7 +2135,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39245 }
39246
39247 mutex_lock(&cpufreq_governor_mutex);
39248- list_del(&governor->governor_list);
39249+ pax_list_del(&governor->governor_list);
39250 mutex_unlock(&cpufreq_governor_mutex);
39251 return;
39252 }
39253@@ -2351,7 +2351,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39254 return NOTIFY_OK;
39255 }
39256
39257-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39258+static struct notifier_block cpufreq_cpu_notifier = {
39259 .notifier_call = cpufreq_cpu_callback,
39260 };
39261
39262@@ -2391,13 +2391,17 @@ int cpufreq_boost_trigger_state(int state)
39263 return 0;
39264
39265 write_lock_irqsave(&cpufreq_driver_lock, flags);
39266- cpufreq_driver->boost_enabled = state;
39267+ pax_open_kernel();
39268+ *(bool *)&cpufreq_driver->boost_enabled = state;
39269+ pax_close_kernel();
39270 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39271
39272 ret = cpufreq_driver->set_boost(state);
39273 if (ret) {
39274 write_lock_irqsave(&cpufreq_driver_lock, flags);
39275- cpufreq_driver->boost_enabled = !state;
39276+ pax_open_kernel();
39277+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39278+ pax_close_kernel();
39279 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39280
39281 pr_err("%s: Cannot %s BOOST\n",
39282@@ -2454,8 +2458,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39283
39284 pr_debug("trying to register driver %s\n", driver_data->name);
39285
39286- if (driver_data->setpolicy)
39287- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39288+ if (driver_data->setpolicy) {
39289+ pax_open_kernel();
39290+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39291+ pax_close_kernel();
39292+ }
39293
39294 write_lock_irqsave(&cpufreq_driver_lock, flags);
39295 if (cpufreq_driver) {
39296@@ -2470,8 +2477,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39297 * Check if driver provides function to enable boost -
39298 * if not, use cpufreq_boost_set_sw as default
39299 */
39300- if (!cpufreq_driver->set_boost)
39301- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39302+ if (!cpufreq_driver->set_boost) {
39303+ pax_open_kernel();
39304+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39305+ pax_close_kernel();
39306+ }
39307
39308 ret = cpufreq_sysfs_create_file(&boost.attr);
39309 if (ret) {
39310diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39311index 1b44496..b80ff5e 100644
39312--- a/drivers/cpufreq/cpufreq_governor.c
39313+++ b/drivers/cpufreq/cpufreq_governor.c
39314@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39315 struct dbs_data *dbs_data;
39316 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39317 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39318- struct od_ops *od_ops = NULL;
39319+ const struct od_ops *od_ops = NULL;
39320 struct od_dbs_tuners *od_tuners = NULL;
39321 struct cs_dbs_tuners *cs_tuners = NULL;
39322 struct cpu_dbs_common_info *cpu_cdbs;
39323@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39324
39325 if ((cdata->governor == GOV_CONSERVATIVE) &&
39326 (!policy->governor->initialized)) {
39327- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39328+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39329
39330 cpufreq_register_notifier(cs_ops->notifier_block,
39331 CPUFREQ_TRANSITION_NOTIFIER);
39332@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39333
39334 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39335 (policy->governor->initialized == 1)) {
39336- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39337+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39338
39339 cpufreq_unregister_notifier(cs_ops->notifier_block,
39340 CPUFREQ_TRANSITION_NOTIFIER);
39341diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39342index cc401d1..8197340 100644
39343--- a/drivers/cpufreq/cpufreq_governor.h
39344+++ b/drivers/cpufreq/cpufreq_governor.h
39345@@ -212,7 +212,7 @@ struct common_dbs_data {
39346 void (*exit)(struct dbs_data *dbs_data);
39347
39348 /* Governor specific ops, see below */
39349- void *gov_ops;
39350+ const void *gov_ops;
39351 };
39352
39353 /* Governor Per policy data */
39354@@ -232,7 +232,7 @@ struct od_ops {
39355 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39356 unsigned int freq_next, unsigned int relation);
39357 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39358-};
39359+} __no_const;
39360
39361 struct cs_ops {
39362 struct notifier_block *notifier_block;
39363diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39364index ad3f38f..8f086cd 100644
39365--- a/drivers/cpufreq/cpufreq_ondemand.c
39366+++ b/drivers/cpufreq/cpufreq_ondemand.c
39367@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39368
39369 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39370
39371-static struct od_ops od_ops = {
39372+static struct od_ops od_ops __read_only = {
39373 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39374 .powersave_bias_target = generic_powersave_bias_target,
39375 .freq_increase = dbs_freq_increase,
39376@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39377 (struct cpufreq_policy *, unsigned int, unsigned int),
39378 unsigned int powersave_bias)
39379 {
39380- od_ops.powersave_bias_target = f;
39381+ pax_open_kernel();
39382+ *(void **)&od_ops.powersave_bias_target = f;
39383+ pax_close_kernel();
39384 od_set_powersave_bias(powersave_bias);
39385 }
39386 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39387
39388 void od_unregister_powersave_bias_handler(void)
39389 {
39390- od_ops.powersave_bias_target = generic_powersave_bias_target;
39391+ pax_open_kernel();
39392+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39393+ pax_close_kernel();
39394 od_set_powersave_bias(0);
39395 }
39396 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39397diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39398index 742eefb..e2fcfc8 100644
39399--- a/drivers/cpufreq/intel_pstate.c
39400+++ b/drivers/cpufreq/intel_pstate.c
39401@@ -133,10 +133,10 @@ struct pstate_funcs {
39402 struct cpu_defaults {
39403 struct pstate_adjust_policy pid_policy;
39404 struct pstate_funcs funcs;
39405-};
39406+} __do_const;
39407
39408 static struct pstate_adjust_policy pid_params;
39409-static struct pstate_funcs pstate_funcs;
39410+static struct pstate_funcs *pstate_funcs;
39411 static int hwp_active;
39412
39413 struct perf_limits {
39414@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39415
39416 cpu->pstate.current_pstate = pstate;
39417
39418- pstate_funcs.set(cpu, pstate);
39419+ pstate_funcs->set(cpu, pstate);
39420 }
39421
39422 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39423 {
39424- cpu->pstate.min_pstate = pstate_funcs.get_min();
39425- cpu->pstate.max_pstate = pstate_funcs.get_max();
39426- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39427- cpu->pstate.scaling = pstate_funcs.get_scaling();
39428+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39429+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39430+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39431+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39432
39433- if (pstate_funcs.get_vid)
39434- pstate_funcs.get_vid(cpu);
39435+ if (pstate_funcs->get_vid)
39436+ pstate_funcs->get_vid(cpu);
39437 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39438 }
39439
39440@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
39441 rdmsrl(MSR_IA32_APERF, aperf);
39442 rdmsrl(MSR_IA32_MPERF, mperf);
39443
39444- if (!pstate_funcs.get_max() ||
39445- !pstate_funcs.get_min() ||
39446- !pstate_funcs.get_turbo())
39447+ if (!pstate_funcs->get_max() ||
39448+ !pstate_funcs->get_min() ||
39449+ !pstate_funcs->get_turbo())
39450 return -ENODEV;
39451
39452 rdmsrl(MSR_IA32_APERF, tmp);
39453@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
39454 return 0;
39455 }
39456
39457-static void copy_pid_params(struct pstate_adjust_policy *policy)
39458+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39459 {
39460 pid_params.sample_rate_ms = policy->sample_rate_ms;
39461 pid_params.p_gain_pct = policy->p_gain_pct;
39462@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39463
39464 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39465 {
39466- pstate_funcs.get_max = funcs->get_max;
39467- pstate_funcs.get_min = funcs->get_min;
39468- pstate_funcs.get_turbo = funcs->get_turbo;
39469- pstate_funcs.get_scaling = funcs->get_scaling;
39470- pstate_funcs.set = funcs->set;
39471- pstate_funcs.get_vid = funcs->get_vid;
39472+ pstate_funcs = funcs;
39473 }
39474
39475 #if IS_ENABLED(CONFIG_ACPI)
39476diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39477index 529cfd9..0e28fff 100644
39478--- a/drivers/cpufreq/p4-clockmod.c
39479+++ b/drivers/cpufreq/p4-clockmod.c
39480@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39481 case 0x0F: /* Core Duo */
39482 case 0x16: /* Celeron Core */
39483 case 0x1C: /* Atom */
39484- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39485+ pax_open_kernel();
39486+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39487+ pax_close_kernel();
39488 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39489 case 0x0D: /* Pentium M (Dothan) */
39490- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39491+ pax_open_kernel();
39492+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39493+ pax_close_kernel();
39494 /* fall through */
39495 case 0x09: /* Pentium M (Banias) */
39496 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39497@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39498
39499 /* on P-4s, the TSC runs with constant frequency independent whether
39500 * throttling is active or not. */
39501- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39502+ pax_open_kernel();
39503+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39504+ pax_close_kernel();
39505
39506 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39507 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39508diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39509index 9bb42ba..b01b4a2 100644
39510--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39511+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39512@@ -18,14 +18,12 @@
39513 #include <asm/head.h>
39514 #include <asm/timer.h>
39515
39516-static struct cpufreq_driver *cpufreq_us3_driver;
39517-
39518 struct us3_freq_percpu_info {
39519 struct cpufreq_frequency_table table[4];
39520 };
39521
39522 /* Indexed by cpu number. */
39523-static struct us3_freq_percpu_info *us3_freq_table;
39524+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39525
39526 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39527 * in the Safari config register.
39528@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39529
39530 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39531 {
39532- if (cpufreq_us3_driver)
39533- us3_freq_target(policy, 0);
39534+ us3_freq_target(policy, 0);
39535
39536 return 0;
39537 }
39538
39539+static int __init us3_freq_init(void);
39540+static void __exit us3_freq_exit(void);
39541+
39542+static struct cpufreq_driver cpufreq_us3_driver = {
39543+ .init = us3_freq_cpu_init,
39544+ .verify = cpufreq_generic_frequency_table_verify,
39545+ .target_index = us3_freq_target,
39546+ .get = us3_freq_get,
39547+ .exit = us3_freq_cpu_exit,
39548+ .name = "UltraSPARC-III",
39549+
39550+};
39551+
39552 static int __init us3_freq_init(void)
39553 {
39554 unsigned long manuf, impl, ver;
39555- int ret;
39556
39557 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39558 return -ENODEV;
39559@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39560 (impl == CHEETAH_IMPL ||
39561 impl == CHEETAH_PLUS_IMPL ||
39562 impl == JAGUAR_IMPL ||
39563- impl == PANTHER_IMPL)) {
39564- struct cpufreq_driver *driver;
39565-
39566- ret = -ENOMEM;
39567- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39568- if (!driver)
39569- goto err_out;
39570-
39571- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39572- GFP_KERNEL);
39573- if (!us3_freq_table)
39574- goto err_out;
39575-
39576- driver->init = us3_freq_cpu_init;
39577- driver->verify = cpufreq_generic_frequency_table_verify;
39578- driver->target_index = us3_freq_target;
39579- driver->get = us3_freq_get;
39580- driver->exit = us3_freq_cpu_exit;
39581- strcpy(driver->name, "UltraSPARC-III");
39582-
39583- cpufreq_us3_driver = driver;
39584- ret = cpufreq_register_driver(driver);
39585- if (ret)
39586- goto err_out;
39587-
39588- return 0;
39589-
39590-err_out:
39591- if (driver) {
39592- kfree(driver);
39593- cpufreq_us3_driver = NULL;
39594- }
39595- kfree(us3_freq_table);
39596- us3_freq_table = NULL;
39597- return ret;
39598- }
39599+ impl == PANTHER_IMPL))
39600+ return cpufreq_register_driver(&cpufreq_us3_driver);
39601
39602 return -ENODEV;
39603 }
39604
39605 static void __exit us3_freq_exit(void)
39606 {
39607- if (cpufreq_us3_driver) {
39608- cpufreq_unregister_driver(cpufreq_us3_driver);
39609- kfree(cpufreq_us3_driver);
39610- cpufreq_us3_driver = NULL;
39611- kfree(us3_freq_table);
39612- us3_freq_table = NULL;
39613- }
39614+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39615 }
39616
39617 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39618diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39619index 7d4a315..21bb886 100644
39620--- a/drivers/cpufreq/speedstep-centrino.c
39621+++ b/drivers/cpufreq/speedstep-centrino.c
39622@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39623 !cpu_has(cpu, X86_FEATURE_EST))
39624 return -ENODEV;
39625
39626- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39627- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39628+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39629+ pax_open_kernel();
39630+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39631+ pax_close_kernel();
39632+ }
39633
39634 if (policy->cpu != 0)
39635 return -ENODEV;
39636diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39637index 2697e87..c32476c 100644
39638--- a/drivers/cpuidle/driver.c
39639+++ b/drivers/cpuidle/driver.c
39640@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39641
39642 static void poll_idle_init(struct cpuidle_driver *drv)
39643 {
39644- struct cpuidle_state *state = &drv->states[0];
39645+ cpuidle_state_no_const *state = &drv->states[0];
39646
39647 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39648 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39649diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39650index fb9f511..213e6cc 100644
39651--- a/drivers/cpuidle/governor.c
39652+++ b/drivers/cpuidle/governor.c
39653@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39654 mutex_lock(&cpuidle_lock);
39655 if (__cpuidle_find_governor(gov->name) == NULL) {
39656 ret = 0;
39657- list_add_tail(&gov->governor_list, &cpuidle_governors);
39658+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39659 if (!cpuidle_curr_governor ||
39660 cpuidle_curr_governor->rating < gov->rating)
39661 cpuidle_switch_governor(gov);
39662diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39663index 97c5903..023ad23 100644
39664--- a/drivers/cpuidle/sysfs.c
39665+++ b/drivers/cpuidle/sysfs.c
39666@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39667 NULL
39668 };
39669
39670-static struct attribute_group cpuidle_attr_group = {
39671+static attribute_group_no_const cpuidle_attr_group = {
39672 .attrs = cpuidle_default_attrs,
39673 .name = "cpuidle",
39674 };
39675diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39676index 8d2a772..33826c9 100644
39677--- a/drivers/crypto/hifn_795x.c
39678+++ b/drivers/crypto/hifn_795x.c
39679@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39680 MODULE_PARM_DESC(hifn_pll_ref,
39681 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39682
39683-static atomic_t hifn_dev_number;
39684+static atomic_unchecked_t hifn_dev_number;
39685
39686 #define ACRYPTO_OP_DECRYPT 0
39687 #define ACRYPTO_OP_ENCRYPT 1
39688@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39689 goto err_out_disable_pci_device;
39690
39691 snprintf(name, sizeof(name), "hifn%d",
39692- atomic_inc_return(&hifn_dev_number)-1);
39693+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39694
39695 err = pci_request_regions(pdev, name);
39696 if (err)
39697diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39698index 30b538d8..1610d75 100644
39699--- a/drivers/devfreq/devfreq.c
39700+++ b/drivers/devfreq/devfreq.c
39701@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39702 goto err_out;
39703 }
39704
39705- list_add(&governor->node, &devfreq_governor_list);
39706+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39707
39708 list_for_each_entry(devfreq, &devfreq_list, node) {
39709 int ret = 0;
39710@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39711 }
39712 }
39713
39714- list_del(&governor->node);
39715+ pax_list_del((struct list_head *)&governor->node);
39716 err_out:
39717 mutex_unlock(&devfreq_list_lock);
39718
39719diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39720index 3a2adb1..b3be9a3 100644
39721--- a/drivers/dma/sh/shdma-base.c
39722+++ b/drivers/dma/sh/shdma-base.c
39723@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39724 schan->slave_id = -EINVAL;
39725 }
39726
39727- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39728- sdev->desc_size, GFP_KERNEL);
39729+ schan->desc = kcalloc(sdev->desc_size,
39730+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39731 if (!schan->desc) {
39732 ret = -ENOMEM;
39733 goto edescalloc;
39734diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39735index aec8a84..7b45a1f 100644
39736--- a/drivers/dma/sh/shdmac.c
39737+++ b/drivers/dma/sh/shdmac.c
39738@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39739 return ret;
39740 }
39741
39742-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39743+static struct notifier_block sh_dmae_nmi_notifier = {
39744 .notifier_call = sh_dmae_nmi_handler,
39745
39746 /* Run before NMI debug handler and KGDB */
39747diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39748index 592af5f..bb1d583 100644
39749--- a/drivers/edac/edac_device.c
39750+++ b/drivers/edac/edac_device.c
39751@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39752 */
39753 int edac_device_alloc_index(void)
39754 {
39755- static atomic_t device_indexes = ATOMIC_INIT(0);
39756+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39757
39758- return atomic_inc_return(&device_indexes) - 1;
39759+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39760 }
39761 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39762
39763diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39764index 670d282..6675f4d 100644
39765--- a/drivers/edac/edac_mc_sysfs.c
39766+++ b/drivers/edac/edac_mc_sysfs.c
39767@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39768 struct dev_ch_attribute {
39769 struct device_attribute attr;
39770 int channel;
39771-};
39772+} __do_const;
39773
39774 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39775 struct dev_ch_attribute dev_attr_legacy_##_name = \
39776@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39777 }
39778
39779 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39780+ pax_open_kernel();
39781 if (mci->get_sdram_scrub_rate) {
39782- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39783- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39784+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39785+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39786 }
39787 if (mci->set_sdram_scrub_rate) {
39788- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39789- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39790+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39791+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39792 }
39793+ pax_close_kernel();
39794 err = device_create_file(&mci->dev,
39795 &dev_attr_sdram_scrub_rate);
39796 if (err) {
39797diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39798index 2cf44b4d..6dd2dc7 100644
39799--- a/drivers/edac/edac_pci.c
39800+++ b/drivers/edac/edac_pci.c
39801@@ -29,7 +29,7 @@
39802
39803 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39804 static LIST_HEAD(edac_pci_list);
39805-static atomic_t pci_indexes = ATOMIC_INIT(0);
39806+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39807
39808 /*
39809 * edac_pci_alloc_ctl_info
39810@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39811 */
39812 int edac_pci_alloc_index(void)
39813 {
39814- return atomic_inc_return(&pci_indexes) - 1;
39815+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39816 }
39817 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39818
39819diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39820index 24d877f..4e30133 100644
39821--- a/drivers/edac/edac_pci_sysfs.c
39822+++ b/drivers/edac/edac_pci_sysfs.c
39823@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39824 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39825 static int edac_pci_poll_msec = 1000; /* one second workq period */
39826
39827-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39828-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39829+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39830+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39831
39832 static struct kobject *edac_pci_top_main_kobj;
39833 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39834@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
39835 void *value;
39836 ssize_t(*show) (void *, char *);
39837 ssize_t(*store) (void *, const char *, size_t);
39838-};
39839+} __do_const;
39840
39841 /* Set of show/store abstract level functions for PCI Parity object */
39842 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39843@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39844 edac_printk(KERN_CRIT, EDAC_PCI,
39845 "Signaled System Error on %s\n",
39846 pci_name(dev));
39847- atomic_inc(&pci_nonparity_count);
39848+ atomic_inc_unchecked(&pci_nonparity_count);
39849 }
39850
39851 if (status & (PCI_STATUS_PARITY)) {
39852@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39853 "Master Data Parity Error on %s\n",
39854 pci_name(dev));
39855
39856- atomic_inc(&pci_parity_count);
39857+ atomic_inc_unchecked(&pci_parity_count);
39858 }
39859
39860 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39861@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39862 "Detected Parity Error on %s\n",
39863 pci_name(dev));
39864
39865- atomic_inc(&pci_parity_count);
39866+ atomic_inc_unchecked(&pci_parity_count);
39867 }
39868 }
39869
39870@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39871 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
39872 "Signaled System Error on %s\n",
39873 pci_name(dev));
39874- atomic_inc(&pci_nonparity_count);
39875+ atomic_inc_unchecked(&pci_nonparity_count);
39876 }
39877
39878 if (status & (PCI_STATUS_PARITY)) {
39879@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39880 "Master Data Parity Error on "
39881 "%s\n", pci_name(dev));
39882
39883- atomic_inc(&pci_parity_count);
39884+ atomic_inc_unchecked(&pci_parity_count);
39885 }
39886
39887 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39888@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39889 "Detected Parity Error on %s\n",
39890 pci_name(dev));
39891
39892- atomic_inc(&pci_parity_count);
39893+ atomic_inc_unchecked(&pci_parity_count);
39894 }
39895 }
39896 }
39897@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
39898 if (!check_pci_errors)
39899 return;
39900
39901- before_count = atomic_read(&pci_parity_count);
39902+ before_count = atomic_read_unchecked(&pci_parity_count);
39903
39904 /* scan all PCI devices looking for a Parity Error on devices and
39905 * bridges.
39906@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
39907 /* Only if operator has selected panic on PCI Error */
39908 if (edac_pci_get_panic_on_pe()) {
39909 /* If the count is different 'after' from 'before' */
39910- if (before_count != atomic_read(&pci_parity_count))
39911+ if (before_count != atomic_read_unchecked(&pci_parity_count))
39912 panic("EDAC: PCI Parity Error");
39913 }
39914 }
39915diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
39916index c2359a1..8bd119d 100644
39917--- a/drivers/edac/mce_amd.h
39918+++ b/drivers/edac/mce_amd.h
39919@@ -74,7 +74,7 @@ struct amd_decoder_ops {
39920 bool (*mc0_mce)(u16, u8);
39921 bool (*mc1_mce)(u16, u8);
39922 bool (*mc2_mce)(u16, u8);
39923-};
39924+} __no_const;
39925
39926 void amd_report_gart_errors(bool);
39927 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
39928diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
39929index 57ea7f4..af06b76 100644
39930--- a/drivers/firewire/core-card.c
39931+++ b/drivers/firewire/core-card.c
39932@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
39933 const struct fw_card_driver *driver,
39934 struct device *device)
39935 {
39936- static atomic_t index = ATOMIC_INIT(-1);
39937+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
39938
39939- card->index = atomic_inc_return(&index);
39940+ card->index = atomic_inc_return_unchecked(&index);
39941 card->driver = driver;
39942 card->device = device;
39943 card->current_tlabel = 0;
39944@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
39945
39946 void fw_core_remove_card(struct fw_card *card)
39947 {
39948- struct fw_card_driver dummy_driver = dummy_driver_template;
39949+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
39950
39951 card->driver->update_phy_reg(card, 4,
39952 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
39953diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
39954index f9e3aee..269dbdb 100644
39955--- a/drivers/firewire/core-device.c
39956+++ b/drivers/firewire/core-device.c
39957@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
39958 struct config_rom_attribute {
39959 struct device_attribute attr;
39960 u32 key;
39961-};
39962+} __do_const;
39963
39964 static ssize_t show_immediate(struct device *dev,
39965 struct device_attribute *dattr, char *buf)
39966diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
39967index eb6935c..3cc2bfa 100644
39968--- a/drivers/firewire/core-transaction.c
39969+++ b/drivers/firewire/core-transaction.c
39970@@ -38,6 +38,7 @@
39971 #include <linux/timer.h>
39972 #include <linux/types.h>
39973 #include <linux/workqueue.h>
39974+#include <linux/sched.h>
39975
39976 #include <asm/byteorder.h>
39977
39978diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
39979index e1480ff6..1a429bd 100644
39980--- a/drivers/firewire/core.h
39981+++ b/drivers/firewire/core.h
39982@@ -111,6 +111,7 @@ struct fw_card_driver {
39983
39984 int (*stop_iso)(struct fw_iso_context *ctx);
39985 };
39986+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
39987
39988 void fw_card_initialize(struct fw_card *card,
39989 const struct fw_card_driver *driver, struct device *device);
39990diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
39991index aff9018..fc87ded 100644
39992--- a/drivers/firewire/ohci.c
39993+++ b/drivers/firewire/ohci.c
39994@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
39995 be32_to_cpu(ohci->next_header));
39996 }
39997
39998+#ifndef CONFIG_GRKERNSEC
39999 if (param_remote_dma) {
40000 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40001 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40002 }
40003+#endif
40004
40005 spin_unlock_irq(&ohci->lock);
40006
40007@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40008 unsigned long flags;
40009 int n, ret = 0;
40010
40011+#ifndef CONFIG_GRKERNSEC
40012 if (param_remote_dma)
40013 return 0;
40014+#endif
40015
40016 /*
40017 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40018diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40019index 94a58a0..f5eba42 100644
40020--- a/drivers/firmware/dmi-id.c
40021+++ b/drivers/firmware/dmi-id.c
40022@@ -16,7 +16,7 @@
40023 struct dmi_device_attribute{
40024 struct device_attribute dev_attr;
40025 int field;
40026-};
40027+} __do_const;
40028 #define to_dmi_dev_attr(_dev_attr) \
40029 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40030
40031diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40032index c5f7b4e..74bc7c9 100644
40033--- a/drivers/firmware/dmi_scan.c
40034+++ b/drivers/firmware/dmi_scan.c
40035@@ -900,7 +900,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40036 if (buf == NULL)
40037 return -1;
40038
40039- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40040+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40041
40042 dmi_unmap(buf);
40043 return 0;
40044diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40045index 4fd9961..52d60ce 100644
40046--- a/drivers/firmware/efi/cper.c
40047+++ b/drivers/firmware/efi/cper.c
40048@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40049 */
40050 u64 cper_next_record_id(void)
40051 {
40052- static atomic64_t seq;
40053+ static atomic64_unchecked_t seq;
40054
40055- if (!atomic64_read(&seq))
40056- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40057+ if (!atomic64_read_unchecked(&seq))
40058+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40059
40060- return atomic64_inc_return(&seq);
40061+ return atomic64_inc_return_unchecked(&seq);
40062 }
40063 EXPORT_SYMBOL_GPL(cper_next_record_id);
40064
40065diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40066index 9035c1b..aff45f8 100644
40067--- a/drivers/firmware/efi/efi.c
40068+++ b/drivers/firmware/efi/efi.c
40069@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
40070 };
40071
40072 static struct efivars generic_efivars;
40073-static struct efivar_operations generic_ops;
40074+static efivar_operations_no_const generic_ops __read_only;
40075
40076 static int generic_ops_register(void)
40077 {
40078- generic_ops.get_variable = efi.get_variable;
40079- generic_ops.set_variable = efi.set_variable;
40080- generic_ops.get_next_variable = efi.get_next_variable;
40081- generic_ops.query_variable_store = efi_query_variable_store;
40082+ pax_open_kernel();
40083+ *(void **)&generic_ops.get_variable = efi.get_variable;
40084+ *(void **)&generic_ops.set_variable = efi.set_variable;
40085+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40086+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40087+ pax_close_kernel();
40088
40089 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40090 }
40091diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40092index f256ecd..387dcb1 100644
40093--- a/drivers/firmware/efi/efivars.c
40094+++ b/drivers/firmware/efi/efivars.c
40095@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40096 static int
40097 create_efivars_bin_attributes(void)
40098 {
40099- struct bin_attribute *attr;
40100+ bin_attribute_no_const *attr;
40101 int error;
40102
40103 /* new_var */
40104diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40105index 2f569aa..c95f4fb 100644
40106--- a/drivers/firmware/google/memconsole.c
40107+++ b/drivers/firmware/google/memconsole.c
40108@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40109 if (!found_memconsole())
40110 return -ENODEV;
40111
40112- memconsole_bin_attr.size = memconsole_length;
40113+ pax_open_kernel();
40114+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40115+ pax_close_kernel();
40116+
40117 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40118 }
40119
40120diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40121index 3cfcfc6..09d6f117 100644
40122--- a/drivers/gpio/gpio-em.c
40123+++ b/drivers/gpio/gpio-em.c
40124@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40125 struct em_gio_priv *p;
40126 struct resource *io[2], *irq[2];
40127 struct gpio_chip *gpio_chip;
40128- struct irq_chip *irq_chip;
40129+ irq_chip_no_const *irq_chip;
40130 const char *name = dev_name(&pdev->dev);
40131 int ret;
40132
40133diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40134index 7818cd1..1be40e5 100644
40135--- a/drivers/gpio/gpio-ich.c
40136+++ b/drivers/gpio/gpio-ich.c
40137@@ -94,7 +94,7 @@ struct ichx_desc {
40138 * this option allows driver caching written output values
40139 */
40140 bool use_outlvl_cache;
40141-};
40142+} __do_const;
40143
40144 static struct {
40145 spinlock_t lock;
40146diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40147index f476ae2..05e1bdd 100644
40148--- a/drivers/gpio/gpio-omap.c
40149+++ b/drivers/gpio/gpio-omap.c
40150@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40151 const struct omap_gpio_platform_data *pdata;
40152 struct resource *res;
40153 struct gpio_bank *bank;
40154- struct irq_chip *irqc;
40155+ irq_chip_no_const *irqc;
40156 int ret;
40157
40158 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40159diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40160index 584484e..e26ebd6 100644
40161--- a/drivers/gpio/gpio-rcar.c
40162+++ b/drivers/gpio/gpio-rcar.c
40163@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40164 struct gpio_rcar_priv *p;
40165 struct resource *io, *irq;
40166 struct gpio_chip *gpio_chip;
40167- struct irq_chip *irq_chip;
40168+ irq_chip_no_const *irq_chip;
40169 struct device *dev = &pdev->dev;
40170 const char *name = dev_name(dev);
40171 int ret;
40172diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40173index c1caa45..f0f97d2 100644
40174--- a/drivers/gpio/gpio-vr41xx.c
40175+++ b/drivers/gpio/gpio-vr41xx.c
40176@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40177 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40178 maskl, pendl, maskh, pendh);
40179
40180- atomic_inc(&irq_err_count);
40181+ atomic_inc_unchecked(&irq_err_count);
40182
40183 return -EINVAL;
40184 }
40185diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40186index 568aa2b..d1204d8 100644
40187--- a/drivers/gpio/gpiolib.c
40188+++ b/drivers/gpio/gpiolib.c
40189@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40190 }
40191
40192 if (gpiochip->irqchip) {
40193- gpiochip->irqchip->irq_request_resources = NULL;
40194- gpiochip->irqchip->irq_release_resources = NULL;
40195+ pax_open_kernel();
40196+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40197+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40198+ pax_close_kernel();
40199 gpiochip->irqchip = NULL;
40200 }
40201 }
40202@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40203 gpiochip->irqchip = NULL;
40204 return -EINVAL;
40205 }
40206- irqchip->irq_request_resources = gpiochip_irq_reqres;
40207- irqchip->irq_release_resources = gpiochip_irq_relres;
40208+
40209+ pax_open_kernel();
40210+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40211+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40212+ pax_close_kernel();
40213
40214 /*
40215 * Prepare the mapping since the irqchip shall be orthogonal to
40216diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40217index 5213da4..7ef736e 100644
40218--- a/drivers/gpu/drm/drm_crtc.c
40219+++ b/drivers/gpu/drm/drm_crtc.c
40220@@ -3961,7 +3961,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40221 goto done;
40222 }
40223
40224- if (copy_to_user(&enum_ptr[copied].name,
40225+ if (copy_to_user(enum_ptr[copied].name,
40226 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40227 ret = -EFAULT;
40228 goto done;
40229diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40230index 4f41377..ee33f40 100644
40231--- a/drivers/gpu/drm/drm_drv.c
40232+++ b/drivers/gpu/drm/drm_drv.c
40233@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
40234
40235 drm_device_set_unplugged(dev);
40236
40237- if (dev->open_count == 0) {
40238+ if (local_read(&dev->open_count) == 0) {
40239 drm_put_dev(dev);
40240 }
40241 mutex_unlock(&drm_global_mutex);
40242diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40243index 0b9514b..6acd174 100644
40244--- a/drivers/gpu/drm/drm_fops.c
40245+++ b/drivers/gpu/drm/drm_fops.c
40246@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40247 return PTR_ERR(minor);
40248
40249 dev = minor->dev;
40250- if (!dev->open_count++)
40251+ if (local_inc_return(&dev->open_count) == 1)
40252 need_setup = 1;
40253
40254 /* share address_space across all char-devs of a single device */
40255@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40256 return 0;
40257
40258 err_undo:
40259- dev->open_count--;
40260+ local_dec(&dev->open_count);
40261 drm_minor_release(minor);
40262 return retcode;
40263 }
40264@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40265
40266 mutex_lock(&drm_global_mutex);
40267
40268- DRM_DEBUG("open_count = %d\n", dev->open_count);
40269+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40270
40271 mutex_lock(&dev->struct_mutex);
40272 list_del(&file_priv->lhead);
40273@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40274 * Begin inline drm_release
40275 */
40276
40277- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40278+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40279 task_pid_nr(current),
40280 (long)old_encode_dev(file_priv->minor->kdev->devt),
40281- dev->open_count);
40282+ local_read(&dev->open_count));
40283
40284 /* Release any auth tokens that might point to this file_priv,
40285 (do that under the drm_global_mutex) */
40286@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40287 * End inline drm_release
40288 */
40289
40290- if (!--dev->open_count) {
40291+ if (local_dec_and_test(&dev->open_count)) {
40292 retcode = drm_lastclose(dev);
40293 if (drm_device_is_unplugged(dev))
40294 drm_put_dev(dev);
40295diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40296index 3d2e91c..d31c4c9 100644
40297--- a/drivers/gpu/drm/drm_global.c
40298+++ b/drivers/gpu/drm/drm_global.c
40299@@ -36,7 +36,7 @@
40300 struct drm_global_item {
40301 struct mutex mutex;
40302 void *object;
40303- int refcount;
40304+ atomic_t refcount;
40305 };
40306
40307 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40308@@ -49,7 +49,7 @@ void drm_global_init(void)
40309 struct drm_global_item *item = &glob[i];
40310 mutex_init(&item->mutex);
40311 item->object = NULL;
40312- item->refcount = 0;
40313+ atomic_set(&item->refcount, 0);
40314 }
40315 }
40316
40317@@ -59,7 +59,7 @@ void drm_global_release(void)
40318 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40319 struct drm_global_item *item = &glob[i];
40320 BUG_ON(item->object != NULL);
40321- BUG_ON(item->refcount != 0);
40322+ BUG_ON(atomic_read(&item->refcount) != 0);
40323 }
40324 }
40325
40326@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40327 struct drm_global_item *item = &glob[ref->global_type];
40328
40329 mutex_lock(&item->mutex);
40330- if (item->refcount == 0) {
40331+ if (atomic_read(&item->refcount) == 0) {
40332 item->object = kzalloc(ref->size, GFP_KERNEL);
40333 if (unlikely(item->object == NULL)) {
40334 ret = -ENOMEM;
40335@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40336 goto out_err;
40337
40338 }
40339- ++item->refcount;
40340+ atomic_inc(&item->refcount);
40341 ref->object = item->object;
40342 mutex_unlock(&item->mutex);
40343 return 0;
40344@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40345 struct drm_global_item *item = &glob[ref->global_type];
40346
40347 mutex_lock(&item->mutex);
40348- BUG_ON(item->refcount == 0);
40349+ BUG_ON(atomic_read(&item->refcount) == 0);
40350 BUG_ON(ref->object != item->object);
40351- if (--item->refcount == 0) {
40352+ if (atomic_dec_and_test(&item->refcount)) {
40353 ref->release(ref);
40354 item->object = NULL;
40355 }
40356diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40357index 51efebd..2b70935 100644
40358--- a/drivers/gpu/drm/drm_info.c
40359+++ b/drivers/gpu/drm/drm_info.c
40360@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40361 struct drm_local_map *map;
40362 struct drm_map_list *r_list;
40363
40364- /* Hardcoded from _DRM_FRAME_BUFFER,
40365- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40366- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40367- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40368+ static const char * const types[] = {
40369+ [_DRM_FRAME_BUFFER] = "FB",
40370+ [_DRM_REGISTERS] = "REG",
40371+ [_DRM_SHM] = "SHM",
40372+ [_DRM_AGP] = "AGP",
40373+ [_DRM_SCATTER_GATHER] = "SG",
40374+ [_DRM_CONSISTENT] = "PCI"};
40375 const char *type;
40376 int i;
40377
40378@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40379 map = r_list->map;
40380 if (!map)
40381 continue;
40382- if (map->type < 0 || map->type > 5)
40383+ if (map->type >= ARRAY_SIZE(types))
40384 type = "??";
40385 else
40386 type = types[map->type];
40387diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40388index 2f4c4343..dd12cd2 100644
40389--- a/drivers/gpu/drm/drm_ioc32.c
40390+++ b/drivers/gpu/drm/drm_ioc32.c
40391@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40392 request = compat_alloc_user_space(nbytes);
40393 if (!access_ok(VERIFY_WRITE, request, nbytes))
40394 return -EFAULT;
40395- list = (struct drm_buf_desc *) (request + 1);
40396+ list = (struct drm_buf_desc __user *) (request + 1);
40397
40398 if (__put_user(count, &request->count)
40399 || __put_user(list, &request->list))
40400@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40401 request = compat_alloc_user_space(nbytes);
40402 if (!access_ok(VERIFY_WRITE, request, nbytes))
40403 return -EFAULT;
40404- list = (struct drm_buf_pub *) (request + 1);
40405+ list = (struct drm_buf_pub __user *) (request + 1);
40406
40407 if (__put_user(count, &request->count)
40408 || __put_user(list, &request->list))
40409@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40410 return 0;
40411 }
40412
40413-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40414+drm_ioctl_compat_t drm_compat_ioctls[] = {
40415 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40416 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40417 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40418@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40419 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40420 {
40421 unsigned int nr = DRM_IOCTL_NR(cmd);
40422- drm_ioctl_compat_t *fn;
40423 int ret;
40424
40425 /* Assume that ioctls without an explicit compat routine will just
40426@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40427 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40428 return drm_ioctl(filp, cmd, arg);
40429
40430- fn = drm_compat_ioctls[nr];
40431-
40432- if (fn != NULL)
40433- ret = (*fn) (filp, cmd, arg);
40434+ if (drm_compat_ioctls[nr] != NULL)
40435+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40436 else
40437 ret = drm_ioctl(filp, cmd, arg);
40438
40439diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40440index 00587a1..57a65ca 100644
40441--- a/drivers/gpu/drm/drm_ioctl.c
40442+++ b/drivers/gpu/drm/drm_ioctl.c
40443@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
40444 struct drm_file *file_priv = filp->private_data;
40445 struct drm_device *dev;
40446 const struct drm_ioctl_desc *ioctl = NULL;
40447- drm_ioctl_t *func;
40448+ drm_ioctl_no_const_t func;
40449 unsigned int nr = DRM_IOCTL_NR(cmd);
40450 int retcode = -EINVAL;
40451 char stack_kdata[128];
40452diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40453index 93ec5dc..82acbaf 100644
40454--- a/drivers/gpu/drm/i810/i810_drv.h
40455+++ b/drivers/gpu/drm/i810/i810_drv.h
40456@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40457 int page_flipping;
40458
40459 wait_queue_head_t irq_queue;
40460- atomic_t irq_received;
40461- atomic_t irq_emitted;
40462+ atomic_unchecked_t irq_received;
40463+ atomic_unchecked_t irq_emitted;
40464
40465 int front_offset;
40466 } drm_i810_private_t;
40467diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40468index ecee3bc..ad5ae67 100644
40469--- a/drivers/gpu/drm/i915/i915_dma.c
40470+++ b/drivers/gpu/drm/i915/i915_dma.c
40471@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40472 * locking inversion with the driver load path. And the access here is
40473 * completely racy anyway. So don't bother with locking for now.
40474 */
40475- return dev->open_count == 0;
40476+ return local_read(&dev->open_count) == 0;
40477 }
40478
40479 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40480diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40481index 1173831..7dfb389 100644
40482--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40483+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40484@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40485 static int
40486 validate_exec_list(struct drm_device *dev,
40487 struct drm_i915_gem_exec_object2 *exec,
40488- int count)
40489+ unsigned int count)
40490 {
40491 unsigned relocs_total = 0;
40492 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40493 unsigned invalid_flags;
40494- int i;
40495+ unsigned int i;
40496
40497 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40498 if (USES_FULL_PPGTT(dev))
40499diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40500index 176de63..1ef9ac7 100644
40501--- a/drivers/gpu/drm/i915/i915_ioc32.c
40502+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40503@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40504 (unsigned long)request);
40505 }
40506
40507-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40508+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40509 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40510 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40511 [DRM_I915_GETPARAM] = compat_i915_getparam,
40512@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40513 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40514 {
40515 unsigned int nr = DRM_IOCTL_NR(cmd);
40516- drm_ioctl_compat_t *fn = NULL;
40517 int ret;
40518
40519 if (nr < DRM_COMMAND_BASE)
40520 return drm_compat_ioctl(filp, cmd, arg);
40521
40522- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40523- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40524-
40525- if (fn != NULL)
40526+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40527+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40528 ret = (*fn) (filp, cmd, arg);
40529- else
40530+ } else
40531 ret = drm_ioctl(filp, cmd, arg);
40532
40533 return ret;
40534diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40535index e7a16f1..e0d82e8 100644
40536--- a/drivers/gpu/drm/i915/intel_display.c
40537+++ b/drivers/gpu/drm/i915/intel_display.c
40538@@ -12935,13 +12935,13 @@ struct intel_quirk {
40539 int subsystem_vendor;
40540 int subsystem_device;
40541 void (*hook)(struct drm_device *dev);
40542-};
40543+} __do_const;
40544
40545 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40546 struct intel_dmi_quirk {
40547 void (*hook)(struct drm_device *dev);
40548 const struct dmi_system_id (*dmi_id_list)[];
40549-};
40550+} __do_const;
40551
40552 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40553 {
40554@@ -12949,18 +12949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40555 return 1;
40556 }
40557
40558-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40559+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40560 {
40561- .dmi_id_list = &(const struct dmi_system_id[]) {
40562- {
40563- .callback = intel_dmi_reverse_brightness,
40564- .ident = "NCR Corporation",
40565- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40566- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40567- },
40568- },
40569- { } /* terminating entry */
40570+ .callback = intel_dmi_reverse_brightness,
40571+ .ident = "NCR Corporation",
40572+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40573+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40574 },
40575+ },
40576+ { } /* terminating entry */
40577+};
40578+
40579+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40580+ {
40581+ .dmi_id_list = &intel_dmi_quirks_table,
40582 .hook = quirk_invert_brightness,
40583 },
40584 };
40585diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40586index b250130..98df2a4 100644
40587--- a/drivers/gpu/drm/imx/imx-drm-core.c
40588+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40589@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40590 if (imxdrm->pipes >= MAX_CRTC)
40591 return -EINVAL;
40592
40593- if (imxdrm->drm->open_count)
40594+ if (local_read(&imxdrm->drm->open_count))
40595 return -EBUSY;
40596
40597 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40598diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40599index b4a2014..219ab78 100644
40600--- a/drivers/gpu/drm/mga/mga_drv.h
40601+++ b/drivers/gpu/drm/mga/mga_drv.h
40602@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40603 u32 clear_cmd;
40604 u32 maccess;
40605
40606- atomic_t vbl_received; /**< Number of vblanks received. */
40607+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40608 wait_queue_head_t fence_queue;
40609- atomic_t last_fence_retired;
40610+ atomic_unchecked_t last_fence_retired;
40611 u32 next_fence_to_post;
40612
40613 unsigned int fb_cpp;
40614diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40615index 729bfd5..ead8823 100644
40616--- a/drivers/gpu/drm/mga/mga_ioc32.c
40617+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40618@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40619 return 0;
40620 }
40621
40622-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40623+drm_ioctl_compat_t mga_compat_ioctls[] = {
40624 [DRM_MGA_INIT] = compat_mga_init,
40625 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40626 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40627@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40628 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40629 {
40630 unsigned int nr = DRM_IOCTL_NR(cmd);
40631- drm_ioctl_compat_t *fn = NULL;
40632 int ret;
40633
40634 if (nr < DRM_COMMAND_BASE)
40635 return drm_compat_ioctl(filp, cmd, arg);
40636
40637- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40638- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40639-
40640- if (fn != NULL)
40641+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40642+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40643 ret = (*fn) (filp, cmd, arg);
40644- else
40645+ } else
40646 ret = drm_ioctl(filp, cmd, arg);
40647
40648 return ret;
40649diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40650index 1b071b8..de8601a 100644
40651--- a/drivers/gpu/drm/mga/mga_irq.c
40652+++ b/drivers/gpu/drm/mga/mga_irq.c
40653@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40654 if (crtc != 0)
40655 return 0;
40656
40657- return atomic_read(&dev_priv->vbl_received);
40658+ return atomic_read_unchecked(&dev_priv->vbl_received);
40659 }
40660
40661
40662@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40663 /* VBLANK interrupt */
40664 if (status & MGA_VLINEPEN) {
40665 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40666- atomic_inc(&dev_priv->vbl_received);
40667+ atomic_inc_unchecked(&dev_priv->vbl_received);
40668 drm_handle_vblank(dev, 0);
40669 handled = 1;
40670 }
40671@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40672 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40673 MGA_WRITE(MGA_PRIMEND, prim_end);
40674
40675- atomic_inc(&dev_priv->last_fence_retired);
40676+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40677 wake_up(&dev_priv->fence_queue);
40678 handled = 1;
40679 }
40680@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40681 * using fences.
40682 */
40683 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40684- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40685+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40686 - *sequence) <= (1 << 23)));
40687
40688 *sequence = cur_fence;
40689diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40690index 7df6acc..84bbe52 100644
40691--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40692+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40693@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40694 struct bit_table {
40695 const char id;
40696 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40697-};
40698+} __no_const;
40699
40700 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40701
40702diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40703index 8ae36f2..1147a30 100644
40704--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40705+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40706@@ -121,7 +121,6 @@ struct nouveau_drm {
40707 struct drm_global_reference mem_global_ref;
40708 struct ttm_bo_global_ref bo_global_ref;
40709 struct ttm_bo_device bdev;
40710- atomic_t validate_sequence;
40711 int (*move)(struct nouveau_channel *,
40712 struct ttm_buffer_object *,
40713 struct ttm_mem_reg *, struct ttm_mem_reg *);
40714diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40715index 462679a..88e32a7 100644
40716--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40717+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40718@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40719 unsigned long arg)
40720 {
40721 unsigned int nr = DRM_IOCTL_NR(cmd);
40722- drm_ioctl_compat_t *fn = NULL;
40723+ drm_ioctl_compat_t fn = NULL;
40724 int ret;
40725
40726 if (nr < DRM_COMMAND_BASE)
40727diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40728index 3d1cfcb..0542700 100644
40729--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40730+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40731@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40732 }
40733
40734 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40735- nouveau_vram_manager_init,
40736- nouveau_vram_manager_fini,
40737- nouveau_vram_manager_new,
40738- nouveau_vram_manager_del,
40739- nouveau_vram_manager_debug
40740+ .init = nouveau_vram_manager_init,
40741+ .takedown = nouveau_vram_manager_fini,
40742+ .get_node = nouveau_vram_manager_new,
40743+ .put_node = nouveau_vram_manager_del,
40744+ .debug = nouveau_vram_manager_debug
40745 };
40746
40747 static int
40748@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40749 }
40750
40751 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40752- nouveau_gart_manager_init,
40753- nouveau_gart_manager_fini,
40754- nouveau_gart_manager_new,
40755- nouveau_gart_manager_del,
40756- nouveau_gart_manager_debug
40757+ .init = nouveau_gart_manager_init,
40758+ .takedown = nouveau_gart_manager_fini,
40759+ .get_node = nouveau_gart_manager_new,
40760+ .put_node = nouveau_gart_manager_del,
40761+ .debug = nouveau_gart_manager_debug
40762 };
40763
40764 /*XXX*/
40765@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40766 }
40767
40768 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40769- nv04_gart_manager_init,
40770- nv04_gart_manager_fini,
40771- nv04_gart_manager_new,
40772- nv04_gart_manager_del,
40773- nv04_gart_manager_debug
40774+ .init = nv04_gart_manager_init,
40775+ .takedown = nv04_gart_manager_fini,
40776+ .get_node = nv04_gart_manager_new,
40777+ .put_node = nv04_gart_manager_del,
40778+ .debug = nv04_gart_manager_debug
40779 };
40780
40781 int
40782diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40783index c7592ec..dd45ebc 100644
40784--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40785+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40786@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40787 * locking inversion with the driver load path. And the access here is
40788 * completely racy anyway. So don't bother with locking for now.
40789 */
40790- return dev->open_count == 0;
40791+ return local_read(&dev->open_count) == 0;
40792 }
40793
40794 static const struct vga_switcheroo_client_ops
40795diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40796index 9782364..89bd954 100644
40797--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40798+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40799@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40800 int ret;
40801
40802 mutex_lock(&qdev->async_io_mutex);
40803- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40804+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40805 if (qdev->last_sent_io_cmd > irq_num) {
40806 if (intr)
40807 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40808- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40809+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40810 else
40811 ret = wait_event_timeout(qdev->io_cmd_event,
40812- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40813+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40814 /* 0 is timeout, just bail the "hw" has gone away */
40815 if (ret <= 0)
40816 goto out;
40817- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40818+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40819 }
40820 outb(val, addr);
40821 qdev->last_sent_io_cmd = irq_num + 1;
40822 if (intr)
40823 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40824- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40825+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40826 else
40827 ret = wait_event_timeout(qdev->io_cmd_event,
40828- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40829+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40830 out:
40831 if (ret > 0)
40832 ret = 0;
40833diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40834index 6911b8c..89d6867 100644
40835--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40836+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40837@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40838 struct drm_info_node *node = (struct drm_info_node *) m->private;
40839 struct qxl_device *qdev = node->minor->dev->dev_private;
40840
40841- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40842- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40843- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40844- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40845+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40846+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40847+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40848+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40849 seq_printf(m, "%d\n", qdev->irq_received_error);
40850 return 0;
40851 }
40852diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40853index 7c6cafe..460f542 100644
40854--- a/drivers/gpu/drm/qxl/qxl_drv.h
40855+++ b/drivers/gpu/drm/qxl/qxl_drv.h
40856@@ -290,10 +290,10 @@ struct qxl_device {
40857 unsigned int last_sent_io_cmd;
40858
40859 /* interrupt handling */
40860- atomic_t irq_received;
40861- atomic_t irq_received_display;
40862- atomic_t irq_received_cursor;
40863- atomic_t irq_received_io_cmd;
40864+ atomic_unchecked_t irq_received;
40865+ atomic_unchecked_t irq_received_display;
40866+ atomic_unchecked_t irq_received_cursor;
40867+ atomic_unchecked_t irq_received_io_cmd;
40868 unsigned irq_received_error;
40869 wait_queue_head_t display_event;
40870 wait_queue_head_t cursor_event;
40871diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
40872index b110883..dd06418 100644
40873--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
40874+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
40875@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40876
40877 /* TODO copy slow path code from i915 */
40878 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
40879- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
40880+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
40881
40882 {
40883 struct qxl_drawable *draw = fb_cmd;
40884@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40885 struct drm_qxl_reloc reloc;
40886
40887 if (copy_from_user(&reloc,
40888- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
40889+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
40890 sizeof(reloc))) {
40891 ret = -EFAULT;
40892 goto out_free_bos;
40893@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40894
40895 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
40896
40897- struct drm_qxl_command *commands =
40898- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40899+ struct drm_qxl_command __user *commands =
40900+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
40901
40902- if (copy_from_user(&user_cmd, &commands[cmd_num],
40903+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40904 sizeof(user_cmd)))
40905 return -EFAULT;
40906
40907diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40908index 0bf1e20..42a7310 100644
40909--- a/drivers/gpu/drm/qxl/qxl_irq.c
40910+++ b/drivers/gpu/drm/qxl/qxl_irq.c
40911@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
40912 if (!pending)
40913 return IRQ_NONE;
40914
40915- atomic_inc(&qdev->irq_received);
40916+ atomic_inc_unchecked(&qdev->irq_received);
40917
40918 if (pending & QXL_INTERRUPT_DISPLAY) {
40919- atomic_inc(&qdev->irq_received_display);
40920+ atomic_inc_unchecked(&qdev->irq_received_display);
40921 wake_up_all(&qdev->display_event);
40922 qxl_queue_garbage_collect(qdev, false);
40923 }
40924 if (pending & QXL_INTERRUPT_CURSOR) {
40925- atomic_inc(&qdev->irq_received_cursor);
40926+ atomic_inc_unchecked(&qdev->irq_received_cursor);
40927 wake_up_all(&qdev->cursor_event);
40928 }
40929 if (pending & QXL_INTERRUPT_IO_CMD) {
40930- atomic_inc(&qdev->irq_received_io_cmd);
40931+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
40932 wake_up_all(&qdev->io_cmd_event);
40933 }
40934 if (pending & QXL_INTERRUPT_ERROR) {
40935@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
40936 init_waitqueue_head(&qdev->io_cmd_event);
40937 INIT_WORK(&qdev->client_monitors_config_work,
40938 qxl_client_monitors_config_work_func);
40939- atomic_set(&qdev->irq_received, 0);
40940- atomic_set(&qdev->irq_received_display, 0);
40941- atomic_set(&qdev->irq_received_cursor, 0);
40942- atomic_set(&qdev->irq_received_io_cmd, 0);
40943+ atomic_set_unchecked(&qdev->irq_received, 0);
40944+ atomic_set_unchecked(&qdev->irq_received_display, 0);
40945+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40946+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40947 qdev->irq_received_error = 0;
40948 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
40949 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40950diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40951index 0cbc4c9..0e46686 100644
40952--- a/drivers/gpu/drm/qxl/qxl_ttm.c
40953+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40954@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40955 }
40956 }
40957
40958-static struct vm_operations_struct qxl_ttm_vm_ops;
40959+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40960 static const struct vm_operations_struct *ttm_vm_ops;
40961
40962 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40963@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40964 return r;
40965 if (unlikely(ttm_vm_ops == NULL)) {
40966 ttm_vm_ops = vma->vm_ops;
40967+ pax_open_kernel();
40968 qxl_ttm_vm_ops = *ttm_vm_ops;
40969 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40970+ pax_close_kernel();
40971 }
40972 vma->vm_ops = &qxl_ttm_vm_ops;
40973 return 0;
40974@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40975 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40976 {
40977 #if defined(CONFIG_DEBUG_FS)
40978- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40979- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40980- unsigned i;
40981+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40982+ {
40983+ .name = "qxl_mem_mm",
40984+ .show = &qxl_mm_dump_table,
40985+ },
40986+ {
40987+ .name = "qxl_surf_mm",
40988+ .show = &qxl_mm_dump_table,
40989+ }
40990+ };
40991
40992- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
40993- if (i == 0)
40994- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
40995- else
40996- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
40997- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
40998- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
40999- qxl_mem_types_list[i].driver_features = 0;
41000- if (i == 0)
41001- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41002- else
41003- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41004+ pax_open_kernel();
41005+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41006+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41007+ pax_close_kernel();
41008
41009- }
41010- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41011+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41012 #else
41013 return 0;
41014 #endif
41015diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41016index 2c45ac9..5d740f8 100644
41017--- a/drivers/gpu/drm/r128/r128_cce.c
41018+++ b/drivers/gpu/drm/r128/r128_cce.c
41019@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41020
41021 /* GH: Simple idle check.
41022 */
41023- atomic_set(&dev_priv->idle_count, 0);
41024+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41025
41026 /* We don't support anything other than bus-mastering ring mode,
41027 * but the ring can be in either AGP or PCI space for the ring
41028diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41029index 723e5d6..102dbaf 100644
41030--- a/drivers/gpu/drm/r128/r128_drv.h
41031+++ b/drivers/gpu/drm/r128/r128_drv.h
41032@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41033 int is_pci;
41034 unsigned long cce_buffers_offset;
41035
41036- atomic_t idle_count;
41037+ atomic_unchecked_t idle_count;
41038
41039 int page_flipping;
41040 int current_page;
41041 u32 crtc_offset;
41042 u32 crtc_offset_cntl;
41043
41044- atomic_t vbl_received;
41045+ atomic_unchecked_t vbl_received;
41046
41047 u32 color_fmt;
41048 unsigned int front_offset;
41049diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41050index 663f38c..c689495 100644
41051--- a/drivers/gpu/drm/r128/r128_ioc32.c
41052+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41053@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41054 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41055 }
41056
41057-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41058+drm_ioctl_compat_t r128_compat_ioctls[] = {
41059 [DRM_R128_INIT] = compat_r128_init,
41060 [DRM_R128_DEPTH] = compat_r128_depth,
41061 [DRM_R128_STIPPLE] = compat_r128_stipple,
41062@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41063 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41064 {
41065 unsigned int nr = DRM_IOCTL_NR(cmd);
41066- drm_ioctl_compat_t *fn = NULL;
41067 int ret;
41068
41069 if (nr < DRM_COMMAND_BASE)
41070 return drm_compat_ioctl(filp, cmd, arg);
41071
41072- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41073- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41074-
41075- if (fn != NULL)
41076+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41077+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41078 ret = (*fn) (filp, cmd, arg);
41079- else
41080+ } else
41081 ret = drm_ioctl(filp, cmd, arg);
41082
41083 return ret;
41084diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41085index c2ae496..30b5993 100644
41086--- a/drivers/gpu/drm/r128/r128_irq.c
41087+++ b/drivers/gpu/drm/r128/r128_irq.c
41088@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41089 if (crtc != 0)
41090 return 0;
41091
41092- return atomic_read(&dev_priv->vbl_received);
41093+ return atomic_read_unchecked(&dev_priv->vbl_received);
41094 }
41095
41096 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41097@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41098 /* VBLANK interrupt */
41099 if (status & R128_CRTC_VBLANK_INT) {
41100 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41101- atomic_inc(&dev_priv->vbl_received);
41102+ atomic_inc_unchecked(&dev_priv->vbl_received);
41103 drm_handle_vblank(dev, 0);
41104 return IRQ_HANDLED;
41105 }
41106diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41107index 8fd2d9f..18c9660 100644
41108--- a/drivers/gpu/drm/r128/r128_state.c
41109+++ b/drivers/gpu/drm/r128/r128_state.c
41110@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41111
41112 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41113 {
41114- if (atomic_read(&dev_priv->idle_count) == 0)
41115+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41116 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41117 else
41118- atomic_set(&dev_priv->idle_count, 0);
41119+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41120 }
41121
41122 #endif
41123diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41124index b928c17..e5d9400 100644
41125--- a/drivers/gpu/drm/radeon/mkregtable.c
41126+++ b/drivers/gpu/drm/radeon/mkregtable.c
41127@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41128 regex_t mask_rex;
41129 regmatch_t match[4];
41130 char buf[1024];
41131- size_t end;
41132+ long end;
41133 int len;
41134 int done = 0;
41135 int r;
41136 unsigned o;
41137 struct offset *offset;
41138 char last_reg_s[10];
41139- int last_reg;
41140+ unsigned long last_reg;
41141
41142 if (regcomp
41143 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41144diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41145index bd7519f..e1c2cd95 100644
41146--- a/drivers/gpu/drm/radeon/radeon_device.c
41147+++ b/drivers/gpu/drm/radeon/radeon_device.c
41148@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41149 * locking inversion with the driver load path. And the access here is
41150 * completely racy anyway. So don't bother with locking for now.
41151 */
41152- return dev->open_count == 0;
41153+ return local_read(&dev->open_count) == 0;
41154 }
41155
41156 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41157diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41158index 46bd393..6ae4719 100644
41159--- a/drivers/gpu/drm/radeon/radeon_drv.h
41160+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41161@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41162
41163 /* SW interrupt */
41164 wait_queue_head_t swi_queue;
41165- atomic_t swi_emitted;
41166+ atomic_unchecked_t swi_emitted;
41167 int vblank_crtc;
41168 uint32_t irq_enable_reg;
41169 uint32_t r500_disp_irq_reg;
41170diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41171index 0b98ea1..0881827 100644
41172--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41173+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41174@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41175 request = compat_alloc_user_space(sizeof(*request));
41176 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41177 || __put_user(req32.param, &request->param)
41178- || __put_user((void __user *)(unsigned long)req32.value,
41179+ || __put_user((unsigned long)req32.value,
41180 &request->value))
41181 return -EFAULT;
41182
41183@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41184 #define compat_radeon_cp_setparam NULL
41185 #endif /* X86_64 || IA64 */
41186
41187-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41188+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41189 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41190 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41191 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41192@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41193 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41194 {
41195 unsigned int nr = DRM_IOCTL_NR(cmd);
41196- drm_ioctl_compat_t *fn = NULL;
41197 int ret;
41198
41199 if (nr < DRM_COMMAND_BASE)
41200 return drm_compat_ioctl(filp, cmd, arg);
41201
41202- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41203- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41204-
41205- if (fn != NULL)
41206+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41207+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41208 ret = (*fn) (filp, cmd, arg);
41209- else
41210+ } else
41211 ret = drm_ioctl(filp, cmd, arg);
41212
41213 return ret;
41214diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41215index 244b19b..c19226d 100644
41216--- a/drivers/gpu/drm/radeon/radeon_irq.c
41217+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41218@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41219 unsigned int ret;
41220 RING_LOCALS;
41221
41222- atomic_inc(&dev_priv->swi_emitted);
41223- ret = atomic_read(&dev_priv->swi_emitted);
41224+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41225+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41226
41227 BEGIN_RING(4);
41228 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41229@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41230 drm_radeon_private_t *dev_priv =
41231 (drm_radeon_private_t *) dev->dev_private;
41232
41233- atomic_set(&dev_priv->swi_emitted, 0);
41234+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41235 init_waitqueue_head(&dev_priv->swi_queue);
41236
41237 dev->max_vblank_count = 0x001fffff;
41238diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41239index 15aee72..cda326e 100644
41240--- a/drivers/gpu/drm/radeon/radeon_state.c
41241+++ b/drivers/gpu/drm/radeon/radeon_state.c
41242@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41243 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41244 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41245
41246- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41247+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41248 sarea_priv->nbox * sizeof(depth_boxes[0])))
41249 return -EFAULT;
41250
41251@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41252 {
41253 drm_radeon_private_t *dev_priv = dev->dev_private;
41254 drm_radeon_getparam_t *param = data;
41255- int value;
41256+ int value = 0;
41257
41258 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41259
41260diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41261index d02aa1d..ca19e2c 100644
41262--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41263+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41264@@ -959,7 +959,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41265 man->size = size >> PAGE_SHIFT;
41266 }
41267
41268-static struct vm_operations_struct radeon_ttm_vm_ops;
41269+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41270 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41271
41272 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41273@@ -1000,8 +1000,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41274 }
41275 if (unlikely(ttm_vm_ops == NULL)) {
41276 ttm_vm_ops = vma->vm_ops;
41277+ pax_open_kernel();
41278 radeon_ttm_vm_ops = *ttm_vm_ops;
41279 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41280+ pax_close_kernel();
41281 }
41282 vma->vm_ops = &radeon_ttm_vm_ops;
41283 return 0;
41284diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41285index 978993f..e36e50e 100644
41286--- a/drivers/gpu/drm/tegra/dc.c
41287+++ b/drivers/gpu/drm/tegra/dc.c
41288@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41289 }
41290
41291 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41292- dc->debugfs_files[i].data = dc;
41293+ *(void **)&dc->debugfs_files[i].data = dc;
41294
41295 err = drm_debugfs_create_files(dc->debugfs_files,
41296 ARRAY_SIZE(debugfs_files),
41297diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41298index 33f67fd..55ee9761 100644
41299--- a/drivers/gpu/drm/tegra/dsi.c
41300+++ b/drivers/gpu/drm/tegra/dsi.c
41301@@ -39,7 +39,7 @@ struct tegra_dsi {
41302 struct clk *clk_lp;
41303 struct clk *clk;
41304
41305- struct drm_info_list *debugfs_files;
41306+ drm_info_list_no_const *debugfs_files;
41307 struct drm_minor *minor;
41308 struct dentry *debugfs;
41309
41310diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41311index ffe2654..03c7b1c 100644
41312--- a/drivers/gpu/drm/tegra/hdmi.c
41313+++ b/drivers/gpu/drm/tegra/hdmi.c
41314@@ -60,7 +60,7 @@ struct tegra_hdmi {
41315 bool stereo;
41316 bool dvi;
41317
41318- struct drm_info_list *debugfs_files;
41319+ drm_info_list_no_const *debugfs_files;
41320 struct drm_minor *minor;
41321 struct dentry *debugfs;
41322 };
41323diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41324index aa0bd054..aea6a01 100644
41325--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41326+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41327@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41328 }
41329
41330 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41331- ttm_bo_man_init,
41332- ttm_bo_man_takedown,
41333- ttm_bo_man_get_node,
41334- ttm_bo_man_put_node,
41335- ttm_bo_man_debug
41336+ .init = ttm_bo_man_init,
41337+ .takedown = ttm_bo_man_takedown,
41338+ .get_node = ttm_bo_man_get_node,
41339+ .put_node = ttm_bo_man_put_node,
41340+ .debug = ttm_bo_man_debug
41341 };
41342 EXPORT_SYMBOL(ttm_bo_manager_func);
41343diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41344index a1803fb..c53f6b0 100644
41345--- a/drivers/gpu/drm/ttm/ttm_memory.c
41346+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41347@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41348 zone->glob = glob;
41349 glob->zone_kernel = zone;
41350 ret = kobject_init_and_add(
41351- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41352+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41353 if (unlikely(ret != 0)) {
41354 kobject_put(&zone->kobj);
41355 return ret;
41356@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41357 zone->glob = glob;
41358 glob->zone_dma32 = zone;
41359 ret = kobject_init_and_add(
41360- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41361+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41362 if (unlikely(ret != 0)) {
41363 kobject_put(&zone->kobj);
41364 return ret;
41365diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41366index 025c429..314062f 100644
41367--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41368+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41369@@ -54,7 +54,7 @@
41370
41371 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41372 #define SMALL_ALLOCATION 16
41373-#define FREE_ALL_PAGES (~0U)
41374+#define FREE_ALL_PAGES (~0UL)
41375 /* times are in msecs */
41376 #define PAGE_FREE_INTERVAL 1000
41377
41378@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41379 * @free_all: If set to true will free all pages in pool
41380 * @use_static: Safe to use static buffer
41381 **/
41382-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41383+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41384 bool use_static)
41385 {
41386 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41387 unsigned long irq_flags;
41388 struct page *p;
41389 struct page **pages_to_free;
41390- unsigned freed_pages = 0,
41391- npages_to_free = nr_free;
41392+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41393
41394 if (NUM_PAGES_TO_ALLOC < nr_free)
41395 npages_to_free = NUM_PAGES_TO_ALLOC;
41396@@ -371,7 +370,8 @@ restart:
41397 __list_del(&p->lru, &pool->list);
41398
41399 ttm_pool_update_free_locked(pool, freed_pages);
41400- nr_free -= freed_pages;
41401+ if (likely(nr_free != FREE_ALL_PAGES))
41402+ nr_free -= freed_pages;
41403 }
41404
41405 spin_unlock_irqrestore(&pool->lock, irq_flags);
41406@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41407 unsigned i;
41408 unsigned pool_offset;
41409 struct ttm_page_pool *pool;
41410- int shrink_pages = sc->nr_to_scan;
41411+ unsigned long shrink_pages = sc->nr_to_scan;
41412 unsigned long freed = 0;
41413
41414 if (!mutex_trylock(&lock))
41415@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41416 pool_offset = ++start_pool % NUM_POOLS;
41417 /* select start pool in round robin fashion */
41418 for (i = 0; i < NUM_POOLS; ++i) {
41419- unsigned nr_free = shrink_pages;
41420+ unsigned long nr_free = shrink_pages;
41421 if (shrink_pages == 0)
41422 break;
41423 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41424@@ -673,7 +673,7 @@ out:
41425 }
41426
41427 /* Put all pages in pages list to correct pool to wait for reuse */
41428-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41429+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41430 enum ttm_caching_state cstate)
41431 {
41432 unsigned long irq_flags;
41433@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41434 struct list_head plist;
41435 struct page *p = NULL;
41436 gfp_t gfp_flags = GFP_USER;
41437- unsigned count;
41438+ unsigned long count;
41439 int r;
41440
41441 /* set zero flag for page allocation if required */
41442diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41443index 01e1d27..aaa018a 100644
41444--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41445+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41446@@ -56,7 +56,7 @@
41447
41448 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41449 #define SMALL_ALLOCATION 4
41450-#define FREE_ALL_PAGES (~0U)
41451+#define FREE_ALL_PAGES (~0UL)
41452 /* times are in msecs */
41453 #define IS_UNDEFINED (0)
41454 #define IS_WC (1<<1)
41455@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41456 * @nr_free: If set to true will free all pages in pool
41457 * @use_static: Safe to use static buffer
41458 **/
41459-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41460+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41461 bool use_static)
41462 {
41463 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41464@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41465 struct dma_page *dma_p, *tmp;
41466 struct page **pages_to_free;
41467 struct list_head d_pages;
41468- unsigned freed_pages = 0,
41469- npages_to_free = nr_free;
41470+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41471
41472 if (NUM_PAGES_TO_ALLOC < nr_free)
41473 npages_to_free = NUM_PAGES_TO_ALLOC;
41474@@ -499,7 +498,8 @@ restart:
41475 /* remove range of pages from the pool */
41476 if (freed_pages) {
41477 ttm_pool_update_free_locked(pool, freed_pages);
41478- nr_free -= freed_pages;
41479+ if (likely(nr_free != FREE_ALL_PAGES))
41480+ nr_free -= freed_pages;
41481 }
41482
41483 spin_unlock_irqrestore(&pool->lock, irq_flags);
41484@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41485 struct dma_page *d_page, *next;
41486 enum pool_type type;
41487 bool is_cached = false;
41488- unsigned count = 0, i, npages = 0;
41489+ unsigned long count = 0, i, npages = 0;
41490 unsigned long irq_flags;
41491
41492 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41493@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41494 static unsigned start_pool;
41495 unsigned idx = 0;
41496 unsigned pool_offset;
41497- unsigned shrink_pages = sc->nr_to_scan;
41498+ unsigned long shrink_pages = sc->nr_to_scan;
41499 struct device_pools *p;
41500 unsigned long freed = 0;
41501
41502@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41503 goto out;
41504 pool_offset = ++start_pool % _manager->npools;
41505 list_for_each_entry(p, &_manager->pools, pools) {
41506- unsigned nr_free;
41507+ unsigned long nr_free;
41508
41509 if (!p->dev)
41510 continue;
41511@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41512 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41513 freed += nr_free - shrink_pages;
41514
41515- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41516+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41517 p->pool->dev_name, p->pool->name, current->pid,
41518 nr_free, shrink_pages);
41519 }
41520diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41521index 8cbcb45..a4d9cf7 100644
41522--- a/drivers/gpu/drm/udl/udl_fb.c
41523+++ b/drivers/gpu/drm/udl/udl_fb.c
41524@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41525 fb_deferred_io_cleanup(info);
41526 kfree(info->fbdefio);
41527 info->fbdefio = NULL;
41528- info->fbops->fb_mmap = udl_fb_mmap;
41529 }
41530
41531 pr_warn("released /dev/fb%d user=%d count=%d\n",
41532diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41533index ef8c500..01030c8 100644
41534--- a/drivers/gpu/drm/via/via_drv.h
41535+++ b/drivers/gpu/drm/via/via_drv.h
41536@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41537 typedef uint32_t maskarray_t[5];
41538
41539 typedef struct drm_via_irq {
41540- atomic_t irq_received;
41541+ atomic_unchecked_t irq_received;
41542 uint32_t pending_mask;
41543 uint32_t enable_mask;
41544 wait_queue_head_t irq_queue;
41545@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41546 struct timeval last_vblank;
41547 int last_vblank_valid;
41548 unsigned usec_per_vblank;
41549- atomic_t vbl_received;
41550+ atomic_unchecked_t vbl_received;
41551 drm_via_state_t hc_state;
41552 char pci_buf[VIA_PCI_BUF_SIZE];
41553 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41554diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41555index 1319433..a993b0c 100644
41556--- a/drivers/gpu/drm/via/via_irq.c
41557+++ b/drivers/gpu/drm/via/via_irq.c
41558@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41559 if (crtc != 0)
41560 return 0;
41561
41562- return atomic_read(&dev_priv->vbl_received);
41563+ return atomic_read_unchecked(&dev_priv->vbl_received);
41564 }
41565
41566 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41567@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41568
41569 status = VIA_READ(VIA_REG_INTERRUPT);
41570 if (status & VIA_IRQ_VBLANK_PENDING) {
41571- atomic_inc(&dev_priv->vbl_received);
41572- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41573+ atomic_inc_unchecked(&dev_priv->vbl_received);
41574+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41575 do_gettimeofday(&cur_vblank);
41576 if (dev_priv->last_vblank_valid) {
41577 dev_priv->usec_per_vblank =
41578@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41579 dev_priv->last_vblank = cur_vblank;
41580 dev_priv->last_vblank_valid = 1;
41581 }
41582- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41583+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41584 DRM_DEBUG("US per vblank is: %u\n",
41585 dev_priv->usec_per_vblank);
41586 }
41587@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41588
41589 for (i = 0; i < dev_priv->num_irqs; ++i) {
41590 if (status & cur_irq->pending_mask) {
41591- atomic_inc(&cur_irq->irq_received);
41592+ atomic_inc_unchecked(&cur_irq->irq_received);
41593 wake_up(&cur_irq->irq_queue);
41594 handled = 1;
41595 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41596@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41597 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41598 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41599 masks[irq][4]));
41600- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41601+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41602 } else {
41603 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41604 (((cur_irq_sequence =
41605- atomic_read(&cur_irq->irq_received)) -
41606+ atomic_read_unchecked(&cur_irq->irq_received)) -
41607 *sequence) <= (1 << 23)));
41608 }
41609 *sequence = cur_irq_sequence;
41610@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41611 }
41612
41613 for (i = 0; i < dev_priv->num_irqs; ++i) {
41614- atomic_set(&cur_irq->irq_received, 0);
41615+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41616 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41617 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41618 init_waitqueue_head(&cur_irq->irq_queue);
41619@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41620 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41621 case VIA_IRQ_RELATIVE:
41622 irqwait->request.sequence +=
41623- atomic_read(&cur_irq->irq_received);
41624+ atomic_read_unchecked(&cur_irq->irq_received);
41625 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41626 case VIA_IRQ_ABSOLUTE:
41627 break;
41628diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41629index d26a6da..5fa41ed 100644
41630--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41631+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41632@@ -447,7 +447,7 @@ struct vmw_private {
41633 * Fencing and IRQs.
41634 */
41635
41636- atomic_t marker_seq;
41637+ atomic_unchecked_t marker_seq;
41638 wait_queue_head_t fence_queue;
41639 wait_queue_head_t fifo_queue;
41640 spinlock_t waiter_lock;
41641diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41642index 39f2b03..d1b0a64 100644
41643--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41644+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41645@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41646 (unsigned int) min,
41647 (unsigned int) fifo->capabilities);
41648
41649- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41650+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41651 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41652 vmw_marker_queue_init(&fifo->marker_queue);
41653 return vmw_fifo_send_fence(dev_priv, &dummy);
41654@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41655 if (reserveable)
41656 iowrite32(bytes, fifo_mem +
41657 SVGA_FIFO_RESERVED);
41658- return fifo_mem + (next_cmd >> 2);
41659+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41660 } else {
41661 need_bounce = true;
41662 }
41663@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41664
41665 fm = vmw_fifo_reserve(dev_priv, bytes);
41666 if (unlikely(fm == NULL)) {
41667- *seqno = atomic_read(&dev_priv->marker_seq);
41668+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41669 ret = -ENOMEM;
41670 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41671 false, 3*HZ);
41672@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41673 }
41674
41675 do {
41676- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41677+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41678 } while (*seqno == 0);
41679
41680 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41681diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41682index 170b61b..fec7348 100644
41683--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41684+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41685@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41686 }
41687
41688 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41689- vmw_gmrid_man_init,
41690- vmw_gmrid_man_takedown,
41691- vmw_gmrid_man_get_node,
41692- vmw_gmrid_man_put_node,
41693- vmw_gmrid_man_debug
41694+ .init = vmw_gmrid_man_init,
41695+ .takedown = vmw_gmrid_man_takedown,
41696+ .get_node = vmw_gmrid_man_get_node,
41697+ .put_node = vmw_gmrid_man_put_node,
41698+ .debug = vmw_gmrid_man_debug
41699 };
41700diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41701index 69c8ce2..cacb0ab 100644
41702--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41703+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41704@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41705 int ret;
41706
41707 num_clips = arg->num_clips;
41708- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41709+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41710
41711 if (unlikely(num_clips == 0))
41712 return 0;
41713@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41714 int ret;
41715
41716 num_clips = arg->num_clips;
41717- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41718+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41719
41720 if (unlikely(num_clips == 0))
41721 return 0;
41722diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41723index 9fe9827..0aa2fc0 100644
41724--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41725+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41726@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41727 * emitted. Then the fence is stale and signaled.
41728 */
41729
41730- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41731+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41732 > VMW_FENCE_WRAP);
41733
41734 return ret;
41735@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41736
41737 if (fifo_idle)
41738 down_read(&fifo_state->rwsem);
41739- signal_seq = atomic_read(&dev_priv->marker_seq);
41740+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41741 ret = 0;
41742
41743 for (;;) {
41744diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41745index efd1ffd..0ae13ca 100644
41746--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41747+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41748@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41749 while (!vmw_lag_lt(queue, us)) {
41750 spin_lock(&queue->lock);
41751 if (list_empty(&queue->head))
41752- seqno = atomic_read(&dev_priv->marker_seq);
41753+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41754 else {
41755 marker = list_first_entry(&queue->head,
41756 struct vmw_marker, head);
41757diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41758index 37ac7b5..d52a5c9 100644
41759--- a/drivers/gpu/vga/vga_switcheroo.c
41760+++ b/drivers/gpu/vga/vga_switcheroo.c
41761@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41762
41763 /* this version is for the case where the power switch is separate
41764 to the device being powered down. */
41765-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41766+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41767 {
41768 /* copy over all the bus versions */
41769 if (dev->bus && dev->bus->pm) {
41770@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41771 return ret;
41772 }
41773
41774-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41775+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41776 {
41777 /* copy over all the bus versions */
41778 if (dev->bus && dev->bus->pm) {
41779diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41780index 8b63879..a5a5e72 100644
41781--- a/drivers/hid/hid-core.c
41782+++ b/drivers/hid/hid-core.c
41783@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41784
41785 int hid_add_device(struct hid_device *hdev)
41786 {
41787- static atomic_t id = ATOMIC_INIT(0);
41788+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41789 int ret;
41790
41791 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41792@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
41793 /* XXX hack, any other cleaner solution after the driver core
41794 * is converted to allow more than 20 bytes as the device name? */
41795 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41796- hdev->vendor, hdev->product, atomic_inc_return(&id));
41797+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41798
41799 hid_debug_register(hdev, dev_name(&hdev->dev));
41800 ret = device_add(&hdev->dev);
41801diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
41802index 5bc6d80..e47b55a 100644
41803--- a/drivers/hid/hid-logitech-dj.c
41804+++ b/drivers/hid/hid-logitech-dj.c
41805@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
41806 * case we forward it to the correct hid device (via hid_input_report()
41807 * ) and return 1 so hid-core does not anything else with it.
41808 */
41809+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41810+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41811+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
41812+ __func__, dj_report->device_index);
41813+ return false;
41814+ }
41815
41816 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41817 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41818diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41819index c13fb5b..55a3802 100644
41820--- a/drivers/hid/hid-wiimote-debug.c
41821+++ b/drivers/hid/hid-wiimote-debug.c
41822@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41823 else if (size == 0)
41824 return -EIO;
41825
41826- if (copy_to_user(u, buf, size))
41827+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41828 return -EFAULT;
41829
41830 *off += size;
41831diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41832index 433f72a..2926005 100644
41833--- a/drivers/hv/channel.c
41834+++ b/drivers/hv/channel.c
41835@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41836 unsigned long flags;
41837 int ret = 0;
41838
41839- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41840- atomic_inc(&vmbus_connection.next_gpadl_handle);
41841+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41842+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41843
41844 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41845 if (ret)
41846diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41847index 3e4235c..877d0e5 100644
41848--- a/drivers/hv/hv.c
41849+++ b/drivers/hv/hv.c
41850@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41851 u64 output_address = (output) ? virt_to_phys(output) : 0;
41852 u32 output_address_hi = output_address >> 32;
41853 u32 output_address_lo = output_address & 0xFFFFFFFF;
41854- void *hypercall_page = hv_context.hypercall_page;
41855+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
41856
41857 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
41858 "=a"(hv_status_lo) : "d" (control_hi),
41859@@ -156,7 +156,7 @@ int hv_init(void)
41860 /* See if the hypercall page is already set */
41861 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
41862
41863- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
41864+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
41865
41866 if (!virtaddr)
41867 goto cleanup;
41868diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
41869index b958ded..b2452bb 100644
41870--- a/drivers/hv/hv_balloon.c
41871+++ b/drivers/hv/hv_balloon.c
41872@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
41873
41874 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
41875 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
41876-static atomic_t trans_id = ATOMIC_INIT(0);
41877+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
41878
41879 static int dm_ring_size = (5 * PAGE_SIZE);
41880
41881@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
41882 pr_info("Memory hot add failed\n");
41883
41884 dm->state = DM_INITIALIZED;
41885- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41886+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41887 vmbus_sendpacket(dm->dev->channel, &resp,
41888 sizeof(struct dm_hot_add_response),
41889 (unsigned long)NULL,
41890@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
41891 memset(&status, 0, sizeof(struct dm_status));
41892 status.hdr.type = DM_STATUS_REPORT;
41893 status.hdr.size = sizeof(struct dm_status);
41894- status.hdr.trans_id = atomic_inc_return(&trans_id);
41895+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41896
41897 /*
41898 * The host expects the guest to report free memory.
41899@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
41900 * send the status. This can happen if we were interrupted
41901 * after we picked our transaction ID.
41902 */
41903- if (status.hdr.trans_id != atomic_read(&trans_id))
41904+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
41905 return;
41906
41907 /*
41908@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
41909 */
41910
41911 do {
41912- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
41913+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41914 ret = vmbus_sendpacket(dm_device.dev->channel,
41915 bl_resp,
41916 bl_resp->hdr.size,
41917@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
41918
41919 memset(&resp, 0, sizeof(struct dm_unballoon_response));
41920 resp.hdr.type = DM_UNBALLOON_RESPONSE;
41921- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41922+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41923 resp.hdr.size = sizeof(struct dm_unballoon_response);
41924
41925 vmbus_sendpacket(dm_device.dev->channel, &resp,
41926@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
41927 memset(&version_req, 0, sizeof(struct dm_version_request));
41928 version_req.hdr.type = DM_VERSION_REQUEST;
41929 version_req.hdr.size = sizeof(struct dm_version_request);
41930- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41931+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41932 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
41933 version_req.is_last_attempt = 1;
41934
41935@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
41936 memset(&version_req, 0, sizeof(struct dm_version_request));
41937 version_req.hdr.type = DM_VERSION_REQUEST;
41938 version_req.hdr.size = sizeof(struct dm_version_request);
41939- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41940+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41941 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
41942 version_req.is_last_attempt = 0;
41943
41944@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
41945 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
41946 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
41947 cap_msg.hdr.size = sizeof(struct dm_capabilities);
41948- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
41949+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41950
41951 cap_msg.caps.cap_bits.balloon = 1;
41952 cap_msg.caps.cap_bits.hot_add = 1;
41953diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
41954index c386d8d..d6004c4 100644
41955--- a/drivers/hv/hyperv_vmbus.h
41956+++ b/drivers/hv/hyperv_vmbus.h
41957@@ -611,7 +611,7 @@ enum vmbus_connect_state {
41958 struct vmbus_connection {
41959 enum vmbus_connect_state conn_state;
41960
41961- atomic_t next_gpadl_handle;
41962+ atomic_unchecked_t next_gpadl_handle;
41963
41964 /*
41965 * Represents channel interrupts. Each bit position represents a
41966diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
41967index 4d6b269..2e23b86 100644
41968--- a/drivers/hv/vmbus_drv.c
41969+++ b/drivers/hv/vmbus_drv.c
41970@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
41971 {
41972 int ret = 0;
41973
41974- static atomic_t device_num = ATOMIC_INIT(0);
41975+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41976
41977 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
41978- atomic_inc_return(&device_num));
41979+ atomic_inc_return_unchecked(&device_num));
41980
41981 child_device_obj->device.bus = &hv_bus;
41982 child_device_obj->device.parent = &hv_acpi_dev->dev;
41983diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
41984index 579bdf9..75118b5 100644
41985--- a/drivers/hwmon/acpi_power_meter.c
41986+++ b/drivers/hwmon/acpi_power_meter.c
41987@@ -116,7 +116,7 @@ struct sensor_template {
41988 struct device_attribute *devattr,
41989 const char *buf, size_t count);
41990 int index;
41991-};
41992+} __do_const;
41993
41994 /* Averaging interval */
41995 static int update_avg_interval(struct acpi_power_meter_resource *resource)
41996@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
41997 struct sensor_template *attrs)
41998 {
41999 struct device *dev = &resource->acpi_dev->dev;
42000- struct sensor_device_attribute *sensors =
42001+ sensor_device_attribute_no_const *sensors =
42002 &resource->sensors[resource->num_sensors];
42003 int res = 0;
42004
42005diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42006index 0af63da..05a183a 100644
42007--- a/drivers/hwmon/applesmc.c
42008+++ b/drivers/hwmon/applesmc.c
42009@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42010 {
42011 struct applesmc_node_group *grp;
42012 struct applesmc_dev_attr *node;
42013- struct attribute *attr;
42014+ attribute_no_const *attr;
42015 int ret, i;
42016
42017 for (grp = groups; grp->format; grp++) {
42018diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42019index cccef87..06ce8ec 100644
42020--- a/drivers/hwmon/asus_atk0110.c
42021+++ b/drivers/hwmon/asus_atk0110.c
42022@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42023 struct atk_sensor_data {
42024 struct list_head list;
42025 struct atk_data *data;
42026- struct device_attribute label_attr;
42027- struct device_attribute input_attr;
42028- struct device_attribute limit1_attr;
42029- struct device_attribute limit2_attr;
42030+ device_attribute_no_const label_attr;
42031+ device_attribute_no_const input_attr;
42032+ device_attribute_no_const limit1_attr;
42033+ device_attribute_no_const limit2_attr;
42034 char label_attr_name[ATTR_NAME_SIZE];
42035 char input_attr_name[ATTR_NAME_SIZE];
42036 char limit1_attr_name[ATTR_NAME_SIZE];
42037@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42038 static struct device_attribute atk_name_attr =
42039 __ATTR(name, 0444, atk_name_show, NULL);
42040
42041-static void atk_init_attribute(struct device_attribute *attr, char *name,
42042+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42043 sysfs_show_func show)
42044 {
42045 sysfs_attr_init(&attr->attr);
42046diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42047index 5b7fec8..05c957a 100644
42048--- a/drivers/hwmon/coretemp.c
42049+++ b/drivers/hwmon/coretemp.c
42050@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42051 return NOTIFY_OK;
42052 }
42053
42054-static struct notifier_block coretemp_cpu_notifier __refdata = {
42055+static struct notifier_block coretemp_cpu_notifier = {
42056 .notifier_call = coretemp_cpu_callback,
42057 };
42058
42059diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42060index 7a8a6fb..015c1fd 100644
42061--- a/drivers/hwmon/ibmaem.c
42062+++ b/drivers/hwmon/ibmaem.c
42063@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42064 struct aem_rw_sensor_template *rw)
42065 {
42066 struct device *dev = &data->pdev->dev;
42067- struct sensor_device_attribute *sensors = data->sensors;
42068+ sensor_device_attribute_no_const *sensors = data->sensors;
42069 int err;
42070
42071 /* Set up read-only sensors */
42072diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42073index 17ae2eb..21b71dd 100644
42074--- a/drivers/hwmon/iio_hwmon.c
42075+++ b/drivers/hwmon/iio_hwmon.c
42076@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42077 {
42078 struct device *dev = &pdev->dev;
42079 struct iio_hwmon_state *st;
42080- struct sensor_device_attribute *a;
42081+ sensor_device_attribute_no_const *a;
42082 int ret, i;
42083 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42084 enum iio_chan_type type;
42085diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42086index f3830db..9f4d6d5 100644
42087--- a/drivers/hwmon/nct6683.c
42088+++ b/drivers/hwmon/nct6683.c
42089@@ -397,11 +397,11 @@ static struct attribute_group *
42090 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42091 int repeat)
42092 {
42093- struct sensor_device_attribute_2 *a2;
42094- struct sensor_device_attribute *a;
42095+ sensor_device_attribute_2_no_const *a2;
42096+ sensor_device_attribute_no_const *a;
42097 struct sensor_device_template **t;
42098 struct sensor_device_attr_u *su;
42099- struct attribute_group *group;
42100+ attribute_group_no_const *group;
42101 struct attribute **attrs;
42102 int i, j, count;
42103
42104diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42105index 1be4117..88ae1e1 100644
42106--- a/drivers/hwmon/nct6775.c
42107+++ b/drivers/hwmon/nct6775.c
42108@@ -952,10 +952,10 @@ static struct attribute_group *
42109 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42110 int repeat)
42111 {
42112- struct attribute_group *group;
42113+ attribute_group_no_const *group;
42114 struct sensor_device_attr_u *su;
42115- struct sensor_device_attribute *a;
42116- struct sensor_device_attribute_2 *a2;
42117+ sensor_device_attribute_no_const *a;
42118+ sensor_device_attribute_2_no_const *a2;
42119 struct attribute **attrs;
42120 struct sensor_device_template **t;
42121 int i, count;
42122diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42123index f2e47c7..45d7941 100644
42124--- a/drivers/hwmon/pmbus/pmbus_core.c
42125+++ b/drivers/hwmon/pmbus/pmbus_core.c
42126@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42127 return 0;
42128 }
42129
42130-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42131+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42132 const char *name,
42133 umode_t mode,
42134 ssize_t (*show)(struct device *dev,
42135@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42136 dev_attr->store = store;
42137 }
42138
42139-static void pmbus_attr_init(struct sensor_device_attribute *a,
42140+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42141 const char *name,
42142 umode_t mode,
42143 ssize_t (*show)(struct device *dev,
42144@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42145 u16 reg, u8 mask)
42146 {
42147 struct pmbus_boolean *boolean;
42148- struct sensor_device_attribute *a;
42149+ sensor_device_attribute_no_const *a;
42150
42151 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42152 if (!boolean)
42153@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42154 bool update, bool readonly)
42155 {
42156 struct pmbus_sensor *sensor;
42157- struct device_attribute *a;
42158+ device_attribute_no_const *a;
42159
42160 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42161 if (!sensor)
42162@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42163 const char *lstring, int index)
42164 {
42165 struct pmbus_label *label;
42166- struct device_attribute *a;
42167+ device_attribute_no_const *a;
42168
42169 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42170 if (!label)
42171diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42172index d4f0935..7420593 100644
42173--- a/drivers/hwmon/sht15.c
42174+++ b/drivers/hwmon/sht15.c
42175@@ -169,7 +169,7 @@ struct sht15_data {
42176 int supply_uv;
42177 bool supply_uv_valid;
42178 struct work_struct update_supply_work;
42179- atomic_t interrupt_handled;
42180+ atomic_unchecked_t interrupt_handled;
42181 };
42182
42183 /**
42184@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42185 ret = gpio_direction_input(data->pdata->gpio_data);
42186 if (ret)
42187 return ret;
42188- atomic_set(&data->interrupt_handled, 0);
42189+ atomic_set_unchecked(&data->interrupt_handled, 0);
42190
42191 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42192 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42193 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42194 /* Only relevant if the interrupt hasn't occurred. */
42195- if (!atomic_read(&data->interrupt_handled))
42196+ if (!atomic_read_unchecked(&data->interrupt_handled))
42197 schedule_work(&data->read_work);
42198 }
42199 ret = wait_event_timeout(data->wait_queue,
42200@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42201
42202 /* First disable the interrupt */
42203 disable_irq_nosync(irq);
42204- atomic_inc(&data->interrupt_handled);
42205+ atomic_inc_unchecked(&data->interrupt_handled);
42206 /* Then schedule a reading work struct */
42207 if (data->state != SHT15_READING_NOTHING)
42208 schedule_work(&data->read_work);
42209@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42210 * If not, then start the interrupt again - care here as could
42211 * have gone low in meantime so verify it hasn't!
42212 */
42213- atomic_set(&data->interrupt_handled, 0);
42214+ atomic_set_unchecked(&data->interrupt_handled, 0);
42215 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42216 /* If still not occurred or another handler was scheduled */
42217 if (gpio_get_value(data->pdata->gpio_data)
42218- || atomic_read(&data->interrupt_handled))
42219+ || atomic_read_unchecked(&data->interrupt_handled))
42220 return;
42221 }
42222
42223diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42224index ac91c07..8e69663 100644
42225--- a/drivers/hwmon/via-cputemp.c
42226+++ b/drivers/hwmon/via-cputemp.c
42227@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42228 return NOTIFY_OK;
42229 }
42230
42231-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42232+static struct notifier_block via_cputemp_cpu_notifier = {
42233 .notifier_call = via_cputemp_cpu_callback,
42234 };
42235
42236diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42237index 65e3240..e6c511d 100644
42238--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42239+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42240@@ -39,7 +39,7 @@
42241 extern struct i2c_adapter amd756_smbus;
42242
42243 static struct i2c_adapter *s4882_adapter;
42244-static struct i2c_algorithm *s4882_algo;
42245+static i2c_algorithm_no_const *s4882_algo;
42246
42247 /* Wrapper access functions for multiplexed SMBus */
42248 static DEFINE_MUTEX(amd756_lock);
42249diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42250index b19a310..d6eece0 100644
42251--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42252+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42253@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42254 /* usb layer */
42255
42256 /* Send command to device, and get response. */
42257-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42258+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42259 {
42260 int ret = 0;
42261 int actual;
42262diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42263index 88eda09..cf40434 100644
42264--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42265+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42266@@ -37,7 +37,7 @@
42267 extern struct i2c_adapter *nforce2_smbus;
42268
42269 static struct i2c_adapter *s4985_adapter;
42270-static struct i2c_algorithm *s4985_algo;
42271+static i2c_algorithm_no_const *s4985_algo;
42272
42273 /* Wrapper access functions for multiplexed SMBus */
42274 static DEFINE_MUTEX(nforce2_lock);
42275diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42276index 71c7a39..71dd3e0 100644
42277--- a/drivers/i2c/i2c-dev.c
42278+++ b/drivers/i2c/i2c-dev.c
42279@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42280 break;
42281 }
42282
42283- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42284+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42285 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42286 if (IS_ERR(rdwr_pa[i].buf)) {
42287 res = PTR_ERR(rdwr_pa[i].buf);
42288diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42289index 0b510ba..4fbb5085 100644
42290--- a/drivers/ide/ide-cd.c
42291+++ b/drivers/ide/ide-cd.c
42292@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42293 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42294 if ((unsigned long)buf & alignment
42295 || blk_rq_bytes(rq) & q->dma_pad_mask
42296- || object_is_on_stack(buf))
42297+ || object_starts_on_stack(buf))
42298 drive->dma = 0;
42299 }
42300 }
42301diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42302index af3e76d..96dfe5e 100644
42303--- a/drivers/iio/industrialio-core.c
42304+++ b/drivers/iio/industrialio-core.c
42305@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42306 }
42307
42308 static
42309-int __iio_device_attr_init(struct device_attribute *dev_attr,
42310+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42311 const char *postfix,
42312 struct iio_chan_spec const *chan,
42313 ssize_t (*readfunc)(struct device *dev,
42314diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42315index e28a494..f7c2671 100644
42316--- a/drivers/infiniband/core/cm.c
42317+++ b/drivers/infiniband/core/cm.c
42318@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42319
42320 struct cm_counter_group {
42321 struct kobject obj;
42322- atomic_long_t counter[CM_ATTR_COUNT];
42323+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42324 };
42325
42326 struct cm_counter_attribute {
42327@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42328 struct ib_mad_send_buf *msg = NULL;
42329 int ret;
42330
42331- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42332+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42333 counter[CM_REQ_COUNTER]);
42334
42335 /* Quick state check to discard duplicate REQs. */
42336@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42337 if (!cm_id_priv)
42338 return;
42339
42340- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42341+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42342 counter[CM_REP_COUNTER]);
42343 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42344 if (ret)
42345@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42346 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42347 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42348 spin_unlock_irq(&cm_id_priv->lock);
42349- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42350+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42351 counter[CM_RTU_COUNTER]);
42352 goto out;
42353 }
42354@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42355 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42356 dreq_msg->local_comm_id);
42357 if (!cm_id_priv) {
42358- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42359+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42360 counter[CM_DREQ_COUNTER]);
42361 cm_issue_drep(work->port, work->mad_recv_wc);
42362 return -EINVAL;
42363@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42364 case IB_CM_MRA_REP_RCVD:
42365 break;
42366 case IB_CM_TIMEWAIT:
42367- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42368+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42369 counter[CM_DREQ_COUNTER]);
42370 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42371 goto unlock;
42372@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42373 cm_free_msg(msg);
42374 goto deref;
42375 case IB_CM_DREQ_RCVD:
42376- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42377+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42378 counter[CM_DREQ_COUNTER]);
42379 goto unlock;
42380 default:
42381@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42382 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42383 cm_id_priv->msg, timeout)) {
42384 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42385- atomic_long_inc(&work->port->
42386+ atomic_long_inc_unchecked(&work->port->
42387 counter_group[CM_RECV_DUPLICATES].
42388 counter[CM_MRA_COUNTER]);
42389 goto out;
42390@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42391 break;
42392 case IB_CM_MRA_REQ_RCVD:
42393 case IB_CM_MRA_REP_RCVD:
42394- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42395+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42396 counter[CM_MRA_COUNTER]);
42397 /* fall through */
42398 default:
42399@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42400 case IB_CM_LAP_IDLE:
42401 break;
42402 case IB_CM_MRA_LAP_SENT:
42403- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42404+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42405 counter[CM_LAP_COUNTER]);
42406 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42407 goto unlock;
42408@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42409 cm_free_msg(msg);
42410 goto deref;
42411 case IB_CM_LAP_RCVD:
42412- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42413+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42414 counter[CM_LAP_COUNTER]);
42415 goto unlock;
42416 default:
42417@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42418 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42419 if (cur_cm_id_priv) {
42420 spin_unlock_irq(&cm.lock);
42421- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42422+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42423 counter[CM_SIDR_REQ_COUNTER]);
42424 goto out; /* Duplicate message. */
42425 }
42426@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42427 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42428 msg->retries = 1;
42429
42430- atomic_long_add(1 + msg->retries,
42431+ atomic_long_add_unchecked(1 + msg->retries,
42432 &port->counter_group[CM_XMIT].counter[attr_index]);
42433 if (msg->retries)
42434- atomic_long_add(msg->retries,
42435+ atomic_long_add_unchecked(msg->retries,
42436 &port->counter_group[CM_XMIT_RETRIES].
42437 counter[attr_index]);
42438
42439@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42440 }
42441
42442 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42443- atomic_long_inc(&port->counter_group[CM_RECV].
42444+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42445 counter[attr_id - CM_ATTR_ID_OFFSET]);
42446
42447 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42448@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42449 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42450
42451 return sprintf(buf, "%ld\n",
42452- atomic_long_read(&group->counter[cm_attr->index]));
42453+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42454 }
42455
42456 static const struct sysfs_ops cm_counter_ops = {
42457diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42458index 9f5ad7c..588cd84 100644
42459--- a/drivers/infiniband/core/fmr_pool.c
42460+++ b/drivers/infiniband/core/fmr_pool.c
42461@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42462
42463 struct task_struct *thread;
42464
42465- atomic_t req_ser;
42466- atomic_t flush_ser;
42467+ atomic_unchecked_t req_ser;
42468+ atomic_unchecked_t flush_ser;
42469
42470 wait_queue_head_t force_wait;
42471 };
42472@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42473 struct ib_fmr_pool *pool = pool_ptr;
42474
42475 do {
42476- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42477+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42478 ib_fmr_batch_release(pool);
42479
42480- atomic_inc(&pool->flush_ser);
42481+ atomic_inc_unchecked(&pool->flush_ser);
42482 wake_up_interruptible(&pool->force_wait);
42483
42484 if (pool->flush_function)
42485@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42486 }
42487
42488 set_current_state(TASK_INTERRUPTIBLE);
42489- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42490+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42491 !kthread_should_stop())
42492 schedule();
42493 __set_current_state(TASK_RUNNING);
42494@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42495 pool->dirty_watermark = params->dirty_watermark;
42496 pool->dirty_len = 0;
42497 spin_lock_init(&pool->pool_lock);
42498- atomic_set(&pool->req_ser, 0);
42499- atomic_set(&pool->flush_ser, 0);
42500+ atomic_set_unchecked(&pool->req_ser, 0);
42501+ atomic_set_unchecked(&pool->flush_ser, 0);
42502 init_waitqueue_head(&pool->force_wait);
42503
42504 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42505@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42506 }
42507 spin_unlock_irq(&pool->pool_lock);
42508
42509- serial = atomic_inc_return(&pool->req_ser);
42510+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42511 wake_up_process(pool->thread);
42512
42513 if (wait_event_interruptible(pool->force_wait,
42514- atomic_read(&pool->flush_ser) - serial >= 0))
42515+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42516 return -EINTR;
42517
42518 return 0;
42519@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42520 } else {
42521 list_add_tail(&fmr->list, &pool->dirty_list);
42522 if (++pool->dirty_len >= pool->dirty_watermark) {
42523- atomic_inc(&pool->req_ser);
42524+ atomic_inc_unchecked(&pool->req_ser);
42525 wake_up_process(pool->thread);
42526 }
42527 }
42528diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42529index cb43c22..2e12dd7 100644
42530--- a/drivers/infiniband/hw/cxgb4/mem.c
42531+++ b/drivers/infiniband/hw/cxgb4/mem.c
42532@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42533 int err;
42534 struct fw_ri_tpte tpt;
42535 u32 stag_idx;
42536- static atomic_t key;
42537+ static atomic_unchecked_t key;
42538
42539 if (c4iw_fatal_error(rdev))
42540 return -EIO;
42541@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42542 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42543 rdev->stats.stag.max = rdev->stats.stag.cur;
42544 mutex_unlock(&rdev->stats.lock);
42545- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42546+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42547 }
42548 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42549 __func__, stag_state, type, pdid, stag_idx);
42550diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42551index 79b3dbc..96e5fcc 100644
42552--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42553+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42554@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42555 struct ib_atomic_eth *ateth;
42556 struct ipath_ack_entry *e;
42557 u64 vaddr;
42558- atomic64_t *maddr;
42559+ atomic64_unchecked_t *maddr;
42560 u64 sdata;
42561 u32 rkey;
42562 u8 next;
42563@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42564 IB_ACCESS_REMOTE_ATOMIC)))
42565 goto nack_acc_unlck;
42566 /* Perform atomic OP and save result. */
42567- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42568+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42569 sdata = be64_to_cpu(ateth->swap_data);
42570 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42571 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42572- (u64) atomic64_add_return(sdata, maddr) - sdata :
42573+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42574 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42575 be64_to_cpu(ateth->compare_data),
42576 sdata);
42577diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42578index 1f95bba..9530f87 100644
42579--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42580+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42581@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42582 unsigned long flags;
42583 struct ib_wc wc;
42584 u64 sdata;
42585- atomic64_t *maddr;
42586+ atomic64_unchecked_t *maddr;
42587 enum ib_wc_status send_status;
42588
42589 /*
42590@@ -382,11 +382,11 @@ again:
42591 IB_ACCESS_REMOTE_ATOMIC)))
42592 goto acc_err;
42593 /* Perform atomic OP and save result. */
42594- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42595+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42596 sdata = wqe->wr.wr.atomic.compare_add;
42597 *(u64 *) sqp->s_sge.sge.vaddr =
42598 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42599- (u64) atomic64_add_return(sdata, maddr) - sdata :
42600+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42601 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42602 sdata, wqe->wr.wr.atomic.swap);
42603 goto send_comp;
42604diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42605index 82a7dd8..8fb6ba6 100644
42606--- a/drivers/infiniband/hw/mlx4/mad.c
42607+++ b/drivers/infiniband/hw/mlx4/mad.c
42608@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42609
42610 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42611 {
42612- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42613+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42614 cpu_to_be64(0xff00000000000000LL);
42615 }
42616
42617diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42618index ed327e6..ca1739e0 100644
42619--- a/drivers/infiniband/hw/mlx4/mcg.c
42620+++ b/drivers/infiniband/hw/mlx4/mcg.c
42621@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42622 {
42623 char name[20];
42624
42625- atomic_set(&ctx->tid, 0);
42626+ atomic_set_unchecked(&ctx->tid, 0);
42627 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42628 ctx->mcg_wq = create_singlethread_workqueue(name);
42629 if (!ctx->mcg_wq)
42630diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42631index 6eb743f..a7b0f6d 100644
42632--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42633+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42634@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
42635 struct list_head mcg_mgid0_list;
42636 struct workqueue_struct *mcg_wq;
42637 struct mlx4_ib_demux_pv_ctx **tun;
42638- atomic_t tid;
42639+ atomic_unchecked_t tid;
42640 int flushing; /* flushing the work queue */
42641 };
42642
42643diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42644index 9d3e5c1..6f166df 100644
42645--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42646+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42647@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42648 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42649 }
42650
42651-int mthca_QUERY_FW(struct mthca_dev *dev)
42652+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42653 {
42654 struct mthca_mailbox *mailbox;
42655 u32 *outbox;
42656@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42657 CMD_TIME_CLASS_B);
42658 }
42659
42660-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42661+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42662 int num_mtt)
42663 {
42664 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42665@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42666 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42667 }
42668
42669-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42670+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42671 int eq_num)
42672 {
42673 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42674@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42675 CMD_TIME_CLASS_B);
42676 }
42677
42678-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42679+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42680 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42681 void *in_mad, void *response_mad)
42682 {
42683diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42684index ded76c1..0cf0a08 100644
42685--- a/drivers/infiniband/hw/mthca/mthca_main.c
42686+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42687@@ -692,7 +692,7 @@ err_close:
42688 return err;
42689 }
42690
42691-static int mthca_setup_hca(struct mthca_dev *dev)
42692+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42693 {
42694 int err;
42695
42696diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42697index ed9a989..6aa5dc2 100644
42698--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42699+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42700@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42701 * through the bitmaps)
42702 */
42703
42704-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42705+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42706 {
42707 int o;
42708 int m;
42709@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42710 return key;
42711 }
42712
42713-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42714+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42715 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42716 {
42717 struct mthca_mailbox *mailbox;
42718@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42719 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42720 }
42721
42722-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42723+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42724 u64 *buffer_list, int buffer_size_shift,
42725 int list_len, u64 iova, u64 total_size,
42726 u32 access, struct mthca_mr *mr)
42727diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42728index 415f8e1..e34214e 100644
42729--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42730+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42731@@ -764,7 +764,7 @@ unlock:
42732 return 0;
42733 }
42734
42735-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42736+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42737 {
42738 struct mthca_dev *dev = to_mdev(ibcq->device);
42739 struct mthca_cq *cq = to_mcq(ibcq);
42740diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42741index 3b2a6dc..bce26ff 100644
42742--- a/drivers/infiniband/hw/nes/nes.c
42743+++ b/drivers/infiniband/hw/nes/nes.c
42744@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42745 LIST_HEAD(nes_adapter_list);
42746 static LIST_HEAD(nes_dev_list);
42747
42748-atomic_t qps_destroyed;
42749+atomic_unchecked_t qps_destroyed;
42750
42751 static unsigned int ee_flsh_adapter;
42752 static unsigned int sysfs_nonidx_addr;
42753@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42754 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42755 struct nes_adapter *nesadapter = nesdev->nesadapter;
42756
42757- atomic_inc(&qps_destroyed);
42758+ atomic_inc_unchecked(&qps_destroyed);
42759
42760 /* Free the control structures */
42761
42762diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42763index bd9d132..70d84f4 100644
42764--- a/drivers/infiniband/hw/nes/nes.h
42765+++ b/drivers/infiniband/hw/nes/nes.h
42766@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
42767 extern unsigned int wqm_quanta;
42768 extern struct list_head nes_adapter_list;
42769
42770-extern atomic_t cm_connects;
42771-extern atomic_t cm_accepts;
42772-extern atomic_t cm_disconnects;
42773-extern atomic_t cm_closes;
42774-extern atomic_t cm_connecteds;
42775-extern atomic_t cm_connect_reqs;
42776-extern atomic_t cm_rejects;
42777-extern atomic_t mod_qp_timouts;
42778-extern atomic_t qps_created;
42779-extern atomic_t qps_destroyed;
42780-extern atomic_t sw_qps_destroyed;
42781+extern atomic_unchecked_t cm_connects;
42782+extern atomic_unchecked_t cm_accepts;
42783+extern atomic_unchecked_t cm_disconnects;
42784+extern atomic_unchecked_t cm_closes;
42785+extern atomic_unchecked_t cm_connecteds;
42786+extern atomic_unchecked_t cm_connect_reqs;
42787+extern atomic_unchecked_t cm_rejects;
42788+extern atomic_unchecked_t mod_qp_timouts;
42789+extern atomic_unchecked_t qps_created;
42790+extern atomic_unchecked_t qps_destroyed;
42791+extern atomic_unchecked_t sw_qps_destroyed;
42792 extern u32 mh_detected;
42793 extern u32 mh_pauses_sent;
42794 extern u32 cm_packets_sent;
42795@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
42796 extern u32 cm_packets_received;
42797 extern u32 cm_packets_dropped;
42798 extern u32 cm_packets_retrans;
42799-extern atomic_t cm_listens_created;
42800-extern atomic_t cm_listens_destroyed;
42801+extern atomic_unchecked_t cm_listens_created;
42802+extern atomic_unchecked_t cm_listens_destroyed;
42803 extern u32 cm_backlog_drops;
42804-extern atomic_t cm_loopbacks;
42805-extern atomic_t cm_nodes_created;
42806-extern atomic_t cm_nodes_destroyed;
42807-extern atomic_t cm_accel_dropped_pkts;
42808-extern atomic_t cm_resets_recvd;
42809-extern atomic_t pau_qps_created;
42810-extern atomic_t pau_qps_destroyed;
42811+extern atomic_unchecked_t cm_loopbacks;
42812+extern atomic_unchecked_t cm_nodes_created;
42813+extern atomic_unchecked_t cm_nodes_destroyed;
42814+extern atomic_unchecked_t cm_accel_dropped_pkts;
42815+extern atomic_unchecked_t cm_resets_recvd;
42816+extern atomic_unchecked_t pau_qps_created;
42817+extern atomic_unchecked_t pau_qps_destroyed;
42818
42819 extern u32 int_mod_timer_init;
42820 extern u32 int_mod_cq_depth_256;
42821diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42822index 6f09a72..cf4399d 100644
42823--- a/drivers/infiniband/hw/nes/nes_cm.c
42824+++ b/drivers/infiniband/hw/nes/nes_cm.c
42825@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
42826 u32 cm_packets_retrans;
42827 u32 cm_packets_created;
42828 u32 cm_packets_received;
42829-atomic_t cm_listens_created;
42830-atomic_t cm_listens_destroyed;
42831+atomic_unchecked_t cm_listens_created;
42832+atomic_unchecked_t cm_listens_destroyed;
42833 u32 cm_backlog_drops;
42834-atomic_t cm_loopbacks;
42835-atomic_t cm_nodes_created;
42836-atomic_t cm_nodes_destroyed;
42837-atomic_t cm_accel_dropped_pkts;
42838-atomic_t cm_resets_recvd;
42839+atomic_unchecked_t cm_loopbacks;
42840+atomic_unchecked_t cm_nodes_created;
42841+atomic_unchecked_t cm_nodes_destroyed;
42842+atomic_unchecked_t cm_accel_dropped_pkts;
42843+atomic_unchecked_t cm_resets_recvd;
42844
42845 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
42846 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
42847@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
42848 /* instance of function pointers for client API */
42849 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
42850 static struct nes_cm_ops nes_cm_api = {
42851- mini_cm_accelerated,
42852- mini_cm_listen,
42853- mini_cm_del_listen,
42854- mini_cm_connect,
42855- mini_cm_close,
42856- mini_cm_accept,
42857- mini_cm_reject,
42858- mini_cm_recv_pkt,
42859- mini_cm_dealloc_core,
42860- mini_cm_get,
42861- mini_cm_set
42862+ .accelerated = mini_cm_accelerated,
42863+ .listen = mini_cm_listen,
42864+ .stop_listener = mini_cm_del_listen,
42865+ .connect = mini_cm_connect,
42866+ .close = mini_cm_close,
42867+ .accept = mini_cm_accept,
42868+ .reject = mini_cm_reject,
42869+ .recv_pkt = mini_cm_recv_pkt,
42870+ .destroy_cm_core = mini_cm_dealloc_core,
42871+ .get = mini_cm_get,
42872+ .set = mini_cm_set
42873 };
42874
42875 static struct nes_cm_core *g_cm_core;
42876
42877-atomic_t cm_connects;
42878-atomic_t cm_accepts;
42879-atomic_t cm_disconnects;
42880-atomic_t cm_closes;
42881-atomic_t cm_connecteds;
42882-atomic_t cm_connect_reqs;
42883-atomic_t cm_rejects;
42884+atomic_unchecked_t cm_connects;
42885+atomic_unchecked_t cm_accepts;
42886+atomic_unchecked_t cm_disconnects;
42887+atomic_unchecked_t cm_closes;
42888+atomic_unchecked_t cm_connecteds;
42889+atomic_unchecked_t cm_connect_reqs;
42890+atomic_unchecked_t cm_rejects;
42891
42892 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
42893 {
42894@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
42895 kfree(listener);
42896 listener = NULL;
42897 ret = 0;
42898- atomic_inc(&cm_listens_destroyed);
42899+ atomic_inc_unchecked(&cm_listens_destroyed);
42900 } else {
42901 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
42902 }
42903@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
42904 cm_node->rem_mac);
42905
42906 add_hte_node(cm_core, cm_node);
42907- atomic_inc(&cm_nodes_created);
42908+ atomic_inc_unchecked(&cm_nodes_created);
42909
42910 return cm_node;
42911 }
42912@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
42913 }
42914
42915 atomic_dec(&cm_core->node_cnt);
42916- atomic_inc(&cm_nodes_destroyed);
42917+ atomic_inc_unchecked(&cm_nodes_destroyed);
42918 nesqp = cm_node->nesqp;
42919 if (nesqp) {
42920 nesqp->cm_node = NULL;
42921@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
42922
42923 static void drop_packet(struct sk_buff *skb)
42924 {
42925- atomic_inc(&cm_accel_dropped_pkts);
42926+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42927 dev_kfree_skb_any(skb);
42928 }
42929
42930@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
42931 {
42932
42933 int reset = 0; /* whether to send reset in case of err.. */
42934- atomic_inc(&cm_resets_recvd);
42935+ atomic_inc_unchecked(&cm_resets_recvd);
42936 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
42937 " refcnt=%d\n", cm_node, cm_node->state,
42938 atomic_read(&cm_node->ref_count));
42939@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
42940 rem_ref_cm_node(cm_node->cm_core, cm_node);
42941 return NULL;
42942 }
42943- atomic_inc(&cm_loopbacks);
42944+ atomic_inc_unchecked(&cm_loopbacks);
42945 loopbackremotenode->loopbackpartner = cm_node;
42946 loopbackremotenode->tcp_cntxt.rcv_wscale =
42947 NES_CM_DEFAULT_RCV_WND_SCALE;
42948@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
42949 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
42950 else {
42951 rem_ref_cm_node(cm_core, cm_node);
42952- atomic_inc(&cm_accel_dropped_pkts);
42953+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42954 dev_kfree_skb_any(skb);
42955 }
42956 break;
42957@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42958
42959 if ((cm_id) && (cm_id->event_handler)) {
42960 if (issue_disconn) {
42961- atomic_inc(&cm_disconnects);
42962+ atomic_inc_unchecked(&cm_disconnects);
42963 cm_event.event = IW_CM_EVENT_DISCONNECT;
42964 cm_event.status = disconn_status;
42965 cm_event.local_addr = cm_id->local_addr;
42966@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42967 }
42968
42969 if (issue_close) {
42970- atomic_inc(&cm_closes);
42971+ atomic_inc_unchecked(&cm_closes);
42972 nes_disconnect(nesqp, 1);
42973
42974 cm_id->provider_data = nesqp;
42975@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42976
42977 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
42978 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
42979- atomic_inc(&cm_accepts);
42980+ atomic_inc_unchecked(&cm_accepts);
42981
42982 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
42983 netdev_refcnt_read(nesvnic->netdev));
42984@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
42985 struct nes_cm_core *cm_core;
42986 u8 *start_buff;
42987
42988- atomic_inc(&cm_rejects);
42989+ atomic_inc_unchecked(&cm_rejects);
42990 cm_node = (struct nes_cm_node *)cm_id->provider_data;
42991 loopback = cm_node->loopbackpartner;
42992 cm_core = cm_node->cm_core;
42993@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42994 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
42995 ntohs(laddr->sin_port));
42996
42997- atomic_inc(&cm_connects);
42998+ atomic_inc_unchecked(&cm_connects);
42999 nesqp->active_conn = 1;
43000
43001 /* cache the cm_id in the qp */
43002@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43003 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43004 return err;
43005 }
43006- atomic_inc(&cm_listens_created);
43007+ atomic_inc_unchecked(&cm_listens_created);
43008 }
43009
43010 cm_id->add_ref(cm_id);
43011@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43012
43013 if (nesqp->destroyed)
43014 return;
43015- atomic_inc(&cm_connecteds);
43016+ atomic_inc_unchecked(&cm_connecteds);
43017 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43018 " local port 0x%04X. jiffies = %lu.\n",
43019 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43020@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43021
43022 cm_id->add_ref(cm_id);
43023 ret = cm_id->event_handler(cm_id, &cm_event);
43024- atomic_inc(&cm_closes);
43025+ atomic_inc_unchecked(&cm_closes);
43026 cm_event.event = IW_CM_EVENT_CLOSE;
43027 cm_event.status = 0;
43028 cm_event.provider_data = cm_id->provider_data;
43029@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43030 return;
43031 cm_id = cm_node->cm_id;
43032
43033- atomic_inc(&cm_connect_reqs);
43034+ atomic_inc_unchecked(&cm_connect_reqs);
43035 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43036 cm_node, cm_id, jiffies);
43037
43038@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43039 return;
43040 cm_id = cm_node->cm_id;
43041
43042- atomic_inc(&cm_connect_reqs);
43043+ atomic_inc_unchecked(&cm_connect_reqs);
43044 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43045 cm_node, cm_id, jiffies);
43046
43047diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43048index 4166452..fc952c3 100644
43049--- a/drivers/infiniband/hw/nes/nes_mgt.c
43050+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43051@@ -40,8 +40,8 @@
43052 #include "nes.h"
43053 #include "nes_mgt.h"
43054
43055-atomic_t pau_qps_created;
43056-atomic_t pau_qps_destroyed;
43057+atomic_unchecked_t pau_qps_created;
43058+atomic_unchecked_t pau_qps_destroyed;
43059
43060 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43061 {
43062@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43063 {
43064 struct sk_buff *skb;
43065 unsigned long flags;
43066- atomic_inc(&pau_qps_destroyed);
43067+ atomic_inc_unchecked(&pau_qps_destroyed);
43068
43069 /* Free packets that have not yet been forwarded */
43070 /* Lock is acquired by skb_dequeue when removing the skb */
43071@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43072 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43073 skb_queue_head_init(&nesqp->pau_list);
43074 spin_lock_init(&nesqp->pau_lock);
43075- atomic_inc(&pau_qps_created);
43076+ atomic_inc_unchecked(&pau_qps_created);
43077 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43078 }
43079
43080diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43081index 49eb511..a774366 100644
43082--- a/drivers/infiniband/hw/nes/nes_nic.c
43083+++ b/drivers/infiniband/hw/nes/nes_nic.c
43084@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43085 target_stat_values[++index] = mh_detected;
43086 target_stat_values[++index] = mh_pauses_sent;
43087 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43088- target_stat_values[++index] = atomic_read(&cm_connects);
43089- target_stat_values[++index] = atomic_read(&cm_accepts);
43090- target_stat_values[++index] = atomic_read(&cm_disconnects);
43091- target_stat_values[++index] = atomic_read(&cm_connecteds);
43092- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43093- target_stat_values[++index] = atomic_read(&cm_rejects);
43094- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43095- target_stat_values[++index] = atomic_read(&qps_created);
43096- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43097- target_stat_values[++index] = atomic_read(&qps_destroyed);
43098- target_stat_values[++index] = atomic_read(&cm_closes);
43099+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43100+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43101+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43102+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43103+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43104+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43105+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43106+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43107+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43108+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43109+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43110 target_stat_values[++index] = cm_packets_sent;
43111 target_stat_values[++index] = cm_packets_bounced;
43112 target_stat_values[++index] = cm_packets_created;
43113 target_stat_values[++index] = cm_packets_received;
43114 target_stat_values[++index] = cm_packets_dropped;
43115 target_stat_values[++index] = cm_packets_retrans;
43116- target_stat_values[++index] = atomic_read(&cm_listens_created);
43117- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43118+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43119+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43120 target_stat_values[++index] = cm_backlog_drops;
43121- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43122- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43123- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43124- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43125- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43126+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43127+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43128+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43129+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43130+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43131 target_stat_values[++index] = nesadapter->free_4kpbl;
43132 target_stat_values[++index] = nesadapter->free_256pbl;
43133 target_stat_values[++index] = int_mod_timer_init;
43134 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43135 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43136 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43137- target_stat_values[++index] = atomic_read(&pau_qps_created);
43138- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43139+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43140+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43141 }
43142
43143 /**
43144diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43145index c0d0296..3185f57 100644
43146--- a/drivers/infiniband/hw/nes/nes_verbs.c
43147+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43148@@ -46,9 +46,9 @@
43149
43150 #include <rdma/ib_umem.h>
43151
43152-atomic_t mod_qp_timouts;
43153-atomic_t qps_created;
43154-atomic_t sw_qps_destroyed;
43155+atomic_unchecked_t mod_qp_timouts;
43156+atomic_unchecked_t qps_created;
43157+atomic_unchecked_t sw_qps_destroyed;
43158
43159 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43160
43161@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43162 if (init_attr->create_flags)
43163 return ERR_PTR(-EINVAL);
43164
43165- atomic_inc(&qps_created);
43166+ atomic_inc_unchecked(&qps_created);
43167 switch (init_attr->qp_type) {
43168 case IB_QPT_RC:
43169 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43170@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43171 struct iw_cm_event cm_event;
43172 int ret = 0;
43173
43174- atomic_inc(&sw_qps_destroyed);
43175+ atomic_inc_unchecked(&sw_qps_destroyed);
43176 nesqp->destroyed = 1;
43177
43178 /* Blow away the connection if it exists. */
43179diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43180index c00ae09..04e91be 100644
43181--- a/drivers/infiniband/hw/qib/qib.h
43182+++ b/drivers/infiniband/hw/qib/qib.h
43183@@ -52,6 +52,7 @@
43184 #include <linux/kref.h>
43185 #include <linux/sched.h>
43186 #include <linux/kthread.h>
43187+#include <linux/slab.h>
43188
43189 #include "qib_common.h"
43190 #include "qib_verbs.h"
43191diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43192index cdc7df4..a2fdfdb 100644
43193--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43194+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43195@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43196 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43197 }
43198
43199-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43200+static struct rtnl_link_ops ipoib_link_ops = {
43201 .kind = "ipoib",
43202 .maxtype = IFLA_IPOIB_MAX,
43203 .policy = ipoib_policy,
43204diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43205index e853a21..56fc5a8 100644
43206--- a/drivers/input/gameport/gameport.c
43207+++ b/drivers/input/gameport/gameport.c
43208@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43209 */
43210 static void gameport_init_port(struct gameport *gameport)
43211 {
43212- static atomic_t gameport_no = ATOMIC_INIT(-1);
43213+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43214
43215 __module_get(THIS_MODULE);
43216
43217 mutex_init(&gameport->drv_mutex);
43218 device_initialize(&gameport->dev);
43219 dev_set_name(&gameport->dev, "gameport%lu",
43220- (unsigned long)atomic_inc_return(&gameport_no));
43221+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43222 gameport->dev.bus = &gameport_bus;
43223 gameport->dev.release = gameport_release_port;
43224 if (gameport->parent)
43225diff --git a/drivers/input/input.c b/drivers/input/input.c
43226index 213e3a1..4fea837 100644
43227--- a/drivers/input/input.c
43228+++ b/drivers/input/input.c
43229@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
43230 */
43231 struct input_dev *input_allocate_device(void)
43232 {
43233- static atomic_t input_no = ATOMIC_INIT(-1);
43234+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43235 struct input_dev *dev;
43236
43237 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43238@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
43239 INIT_LIST_HEAD(&dev->node);
43240
43241 dev_set_name(&dev->dev, "input%lu",
43242- (unsigned long)atomic_inc_return(&input_no));
43243+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43244
43245 __module_get(THIS_MODULE);
43246 }
43247diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43248index 4a95b22..874c182 100644
43249--- a/drivers/input/joystick/sidewinder.c
43250+++ b/drivers/input/joystick/sidewinder.c
43251@@ -30,6 +30,7 @@
43252 #include <linux/kernel.h>
43253 #include <linux/module.h>
43254 #include <linux/slab.h>
43255+#include <linux/sched.h>
43256 #include <linux/input.h>
43257 #include <linux/gameport.h>
43258 #include <linux/jiffies.h>
43259diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43260index 3aa2f3f..53c00ea 100644
43261--- a/drivers/input/joystick/xpad.c
43262+++ b/drivers/input/joystick/xpad.c
43263@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43264
43265 static int xpad_led_probe(struct usb_xpad *xpad)
43266 {
43267- static atomic_t led_seq = ATOMIC_INIT(-1);
43268+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43269 unsigned long led_no;
43270 struct xpad_led *led;
43271 struct led_classdev *led_cdev;
43272@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43273 if (!led)
43274 return -ENOMEM;
43275
43276- led_no = atomic_inc_return(&led_seq);
43277+ led_no = atomic_inc_return_unchecked(&led_seq);
43278
43279 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43280 led->xpad = xpad;
43281diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43282index ac1fa5f..5f7502c 100644
43283--- a/drivers/input/misc/ims-pcu.c
43284+++ b/drivers/input/misc/ims-pcu.c
43285@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43286
43287 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43288 {
43289- static atomic_t device_no = ATOMIC_INIT(-1);
43290+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43291
43292 const struct ims_pcu_device_info *info;
43293 int error;
43294@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43295 }
43296
43297 /* Device appears to be operable, complete initialization */
43298- pcu->device_no = atomic_inc_return(&device_no);
43299+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43300
43301 /*
43302 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43303diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43304index f4cf664..3204fda 100644
43305--- a/drivers/input/mouse/psmouse.h
43306+++ b/drivers/input/mouse/psmouse.h
43307@@ -117,7 +117,7 @@ struct psmouse_attribute {
43308 ssize_t (*set)(struct psmouse *psmouse, void *data,
43309 const char *buf, size_t count);
43310 bool protect;
43311-};
43312+} __do_const;
43313 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43314
43315 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43316diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43317index b604564..3f14ae4 100644
43318--- a/drivers/input/mousedev.c
43319+++ b/drivers/input/mousedev.c
43320@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43321
43322 spin_unlock_irq(&client->packet_lock);
43323
43324- if (copy_to_user(buffer, data, count))
43325+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43326 return -EFAULT;
43327
43328 return count;
43329diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43330index a05a517..323a2fd 100644
43331--- a/drivers/input/serio/serio.c
43332+++ b/drivers/input/serio/serio.c
43333@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43334 */
43335 static void serio_init_port(struct serio *serio)
43336 {
43337- static atomic_t serio_no = ATOMIC_INIT(-1);
43338+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43339
43340 __module_get(THIS_MODULE);
43341
43342@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43343 mutex_init(&serio->drv_mutex);
43344 device_initialize(&serio->dev);
43345 dev_set_name(&serio->dev, "serio%lu",
43346- (unsigned long)atomic_inc_return(&serio_no));
43347+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43348 serio->dev.bus = &serio_bus;
43349 serio->dev.release = serio_release_port;
43350 serio->dev.groups = serio_device_attr_groups;
43351diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43352index 71ef5d6..93380a9 100644
43353--- a/drivers/input/serio/serio_raw.c
43354+++ b/drivers/input/serio/serio_raw.c
43355@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43356
43357 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43358 {
43359- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43360+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43361 struct serio_raw *serio_raw;
43362 int err;
43363
43364@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43365 }
43366
43367 snprintf(serio_raw->name, sizeof(serio_raw->name),
43368- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43369+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43370 kref_init(&serio_raw->kref);
43371 INIT_LIST_HEAD(&serio_raw->client_list);
43372 init_waitqueue_head(&serio_raw->wait);
43373diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43374index 9802485..2e9941d 100644
43375--- a/drivers/iommu/amd_iommu.c
43376+++ b/drivers/iommu/amd_iommu.c
43377@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43378
43379 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43380 {
43381+ phys_addr_t physaddr;
43382 WARN_ON(address & 0x7ULL);
43383
43384 memset(cmd, 0, sizeof(*cmd));
43385- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43386- cmd->data[1] = upper_32_bits(__pa(address));
43387+
43388+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43389+ if (object_starts_on_stack((void *)address)) {
43390+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43391+ physaddr = __pa((u64)adjbuf);
43392+ } else
43393+#endif
43394+ physaddr = __pa(address);
43395+
43396+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43397+ cmd->data[1] = upper_32_bits(physaddr);
43398 cmd->data[2] = 1;
43399 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43400 }
43401diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
43402index 6cd47b7..264d14a 100644
43403--- a/drivers/iommu/arm-smmu.c
43404+++ b/drivers/iommu/arm-smmu.c
43405@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
43406 cfg->irptndx = cfg->cbndx;
43407 }
43408
43409- ACCESS_ONCE(smmu_domain->smmu) = smmu;
43410+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
43411 arm_smmu_init_context_bank(smmu_domain);
43412 spin_unlock_irqrestore(&smmu_domain->lock, flags);
43413
43414diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43415index f7718d7..3ef740b 100644
43416--- a/drivers/iommu/iommu.c
43417+++ b/drivers/iommu/iommu.c
43418@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43419 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43420 {
43421 int err;
43422- struct notifier_block *nb;
43423+ notifier_block_no_const *nb;
43424 struct iommu_callback_data cb = {
43425 .ops = ops,
43426 };
43427diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43428index 89c4846..1de796f 100644
43429--- a/drivers/iommu/irq_remapping.c
43430+++ b/drivers/iommu/irq_remapping.c
43431@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43432 void panic_if_irq_remap(const char *msg)
43433 {
43434 if (irq_remapping_enabled)
43435- panic(msg);
43436+ panic("%s", msg);
43437 }
43438
43439 static void ir_ack_apic_edge(struct irq_data *data)
43440@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43441
43442 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43443 {
43444- chip->irq_print_chip = ir_print_prefix;
43445- chip->irq_ack = ir_ack_apic_edge;
43446- chip->irq_eoi = ir_ack_apic_level;
43447- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43448+ pax_open_kernel();
43449+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43450+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43451+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43452+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43453+ pax_close_kernel();
43454 }
43455
43456 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43457diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43458index d617ee5..df8be8b 100644
43459--- a/drivers/irqchip/irq-gic.c
43460+++ b/drivers/irqchip/irq-gic.c
43461@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43462 * Supported arch specific GIC irq extension.
43463 * Default make them NULL.
43464 */
43465-struct irq_chip gic_arch_extn = {
43466+irq_chip_no_const gic_arch_extn = {
43467 .irq_eoi = NULL,
43468 .irq_mask = NULL,
43469 .irq_unmask = NULL,
43470@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43471 chained_irq_exit(chip, desc);
43472 }
43473
43474-static struct irq_chip gic_chip = {
43475+static irq_chip_no_const gic_chip __read_only = {
43476 .name = "GIC",
43477 .irq_mask = gic_mask_irq,
43478 .irq_unmask = gic_unmask_irq,
43479diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43480index 078cac5..fb0f846 100644
43481--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43482+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43483@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43484 struct intc_irqpin_iomem *i;
43485 struct resource *io[INTC_IRQPIN_REG_NR];
43486 struct resource *irq;
43487- struct irq_chip *irq_chip;
43488+ irq_chip_no_const *irq_chip;
43489 void (*enable_fn)(struct irq_data *d);
43490 void (*disable_fn)(struct irq_data *d);
43491 const char *name = dev_name(dev);
43492diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43493index 384e6ed..7a771b2 100644
43494--- a/drivers/irqchip/irq-renesas-irqc.c
43495+++ b/drivers/irqchip/irq-renesas-irqc.c
43496@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43497 struct irqc_priv *p;
43498 struct resource *io;
43499 struct resource *irq;
43500- struct irq_chip *irq_chip;
43501+ irq_chip_no_const *irq_chip;
43502 const char *name = dev_name(&pdev->dev);
43503 int ret;
43504 int k;
43505diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43506index 6a2df32..dc962f1 100644
43507--- a/drivers/isdn/capi/capi.c
43508+++ b/drivers/isdn/capi/capi.c
43509@@ -81,8 +81,8 @@ struct capiminor {
43510
43511 struct capi20_appl *ap;
43512 u32 ncci;
43513- atomic_t datahandle;
43514- atomic_t msgid;
43515+ atomic_unchecked_t datahandle;
43516+ atomic_unchecked_t msgid;
43517
43518 struct tty_port port;
43519 int ttyinstop;
43520@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43521 capimsg_setu16(s, 2, mp->ap->applid);
43522 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43523 capimsg_setu8 (s, 5, CAPI_RESP);
43524- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43525+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43526 capimsg_setu32(s, 8, mp->ncci);
43527 capimsg_setu16(s, 12, datahandle);
43528 }
43529@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43530 mp->outbytes -= len;
43531 spin_unlock_bh(&mp->outlock);
43532
43533- datahandle = atomic_inc_return(&mp->datahandle);
43534+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43535 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43536 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43537 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43538 capimsg_setu16(skb->data, 2, mp->ap->applid);
43539 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43540 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43541- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43542+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43543 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43544 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43545 capimsg_setu16(skb->data, 16, len); /* Data length */
43546diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43547index aecec6d..11e13c5 100644
43548--- a/drivers/isdn/gigaset/bas-gigaset.c
43549+++ b/drivers/isdn/gigaset/bas-gigaset.c
43550@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43551
43552
43553 static const struct gigaset_ops gigops = {
43554- gigaset_write_cmd,
43555- gigaset_write_room,
43556- gigaset_chars_in_buffer,
43557- gigaset_brkchars,
43558- gigaset_init_bchannel,
43559- gigaset_close_bchannel,
43560- gigaset_initbcshw,
43561- gigaset_freebcshw,
43562- gigaset_reinitbcshw,
43563- gigaset_initcshw,
43564- gigaset_freecshw,
43565- gigaset_set_modem_ctrl,
43566- gigaset_baud_rate,
43567- gigaset_set_line_ctrl,
43568- gigaset_isoc_send_skb,
43569- gigaset_isoc_input,
43570+ .write_cmd = gigaset_write_cmd,
43571+ .write_room = gigaset_write_room,
43572+ .chars_in_buffer = gigaset_chars_in_buffer,
43573+ .brkchars = gigaset_brkchars,
43574+ .init_bchannel = gigaset_init_bchannel,
43575+ .close_bchannel = gigaset_close_bchannel,
43576+ .initbcshw = gigaset_initbcshw,
43577+ .freebcshw = gigaset_freebcshw,
43578+ .reinitbcshw = gigaset_reinitbcshw,
43579+ .initcshw = gigaset_initcshw,
43580+ .freecshw = gigaset_freecshw,
43581+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43582+ .baud_rate = gigaset_baud_rate,
43583+ .set_line_ctrl = gigaset_set_line_ctrl,
43584+ .send_skb = gigaset_isoc_send_skb,
43585+ .handle_input = gigaset_isoc_input,
43586 };
43587
43588 /* bas_gigaset_init
43589diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43590index 600c79b..3752bab 100644
43591--- a/drivers/isdn/gigaset/interface.c
43592+++ b/drivers/isdn/gigaset/interface.c
43593@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43594 }
43595 tty->driver_data = cs;
43596
43597- ++cs->port.count;
43598+ atomic_inc(&cs->port.count);
43599
43600- if (cs->port.count == 1) {
43601+ if (atomic_read(&cs->port.count) == 1) {
43602 tty_port_tty_set(&cs->port, tty);
43603 cs->port.low_latency = 1;
43604 }
43605@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43606
43607 if (!cs->connected)
43608 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43609- else if (!cs->port.count)
43610+ else if (!atomic_read(&cs->port.count))
43611 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43612- else if (!--cs->port.count)
43613+ else if (!atomic_dec_return(&cs->port.count))
43614 tty_port_tty_set(&cs->port, NULL);
43615
43616 mutex_unlock(&cs->mutex);
43617diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43618index 8c91fd5..14f13ce 100644
43619--- a/drivers/isdn/gigaset/ser-gigaset.c
43620+++ b/drivers/isdn/gigaset/ser-gigaset.c
43621@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43622 }
43623
43624 static const struct gigaset_ops ops = {
43625- gigaset_write_cmd,
43626- gigaset_write_room,
43627- gigaset_chars_in_buffer,
43628- gigaset_brkchars,
43629- gigaset_init_bchannel,
43630- gigaset_close_bchannel,
43631- gigaset_initbcshw,
43632- gigaset_freebcshw,
43633- gigaset_reinitbcshw,
43634- gigaset_initcshw,
43635- gigaset_freecshw,
43636- gigaset_set_modem_ctrl,
43637- gigaset_baud_rate,
43638- gigaset_set_line_ctrl,
43639- gigaset_m10x_send_skb, /* asyncdata.c */
43640- gigaset_m10x_input, /* asyncdata.c */
43641+ .write_cmd = gigaset_write_cmd,
43642+ .write_room = gigaset_write_room,
43643+ .chars_in_buffer = gigaset_chars_in_buffer,
43644+ .brkchars = gigaset_brkchars,
43645+ .init_bchannel = gigaset_init_bchannel,
43646+ .close_bchannel = gigaset_close_bchannel,
43647+ .initbcshw = gigaset_initbcshw,
43648+ .freebcshw = gigaset_freebcshw,
43649+ .reinitbcshw = gigaset_reinitbcshw,
43650+ .initcshw = gigaset_initcshw,
43651+ .freecshw = gigaset_freecshw,
43652+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43653+ .baud_rate = gigaset_baud_rate,
43654+ .set_line_ctrl = gigaset_set_line_ctrl,
43655+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43656+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43657 };
43658
43659
43660diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43661index 5f306e2..5342f88 100644
43662--- a/drivers/isdn/gigaset/usb-gigaset.c
43663+++ b/drivers/isdn/gigaset/usb-gigaset.c
43664@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43665 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43666 memcpy(cs->hw.usb->bchars, buf, 6);
43667 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43668- 0, 0, &buf, 6, 2000);
43669+ 0, 0, buf, 6, 2000);
43670 }
43671
43672 static void gigaset_freebcshw(struct bc_state *bcs)
43673@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43674 }
43675
43676 static const struct gigaset_ops ops = {
43677- gigaset_write_cmd,
43678- gigaset_write_room,
43679- gigaset_chars_in_buffer,
43680- gigaset_brkchars,
43681- gigaset_init_bchannel,
43682- gigaset_close_bchannel,
43683- gigaset_initbcshw,
43684- gigaset_freebcshw,
43685- gigaset_reinitbcshw,
43686- gigaset_initcshw,
43687- gigaset_freecshw,
43688- gigaset_set_modem_ctrl,
43689- gigaset_baud_rate,
43690- gigaset_set_line_ctrl,
43691- gigaset_m10x_send_skb,
43692- gigaset_m10x_input,
43693+ .write_cmd = gigaset_write_cmd,
43694+ .write_room = gigaset_write_room,
43695+ .chars_in_buffer = gigaset_chars_in_buffer,
43696+ .brkchars = gigaset_brkchars,
43697+ .init_bchannel = gigaset_init_bchannel,
43698+ .close_bchannel = gigaset_close_bchannel,
43699+ .initbcshw = gigaset_initbcshw,
43700+ .freebcshw = gigaset_freebcshw,
43701+ .reinitbcshw = gigaset_reinitbcshw,
43702+ .initcshw = gigaset_initcshw,
43703+ .freecshw = gigaset_freecshw,
43704+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43705+ .baud_rate = gigaset_baud_rate,
43706+ .set_line_ctrl = gigaset_set_line_ctrl,
43707+ .send_skb = gigaset_m10x_send_skb,
43708+ .handle_input = gigaset_m10x_input,
43709 };
43710
43711 /*
43712diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43713index 4d9b195..455075c 100644
43714--- a/drivers/isdn/hardware/avm/b1.c
43715+++ b/drivers/isdn/hardware/avm/b1.c
43716@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43717 }
43718 if (left) {
43719 if (t4file->user) {
43720- if (copy_from_user(buf, dp, left))
43721+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43722 return -EFAULT;
43723 } else {
43724 memcpy(buf, dp, left);
43725@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43726 }
43727 if (left) {
43728 if (config->user) {
43729- if (copy_from_user(buf, dp, left))
43730+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43731 return -EFAULT;
43732 } else {
43733 memcpy(buf, dp, left);
43734diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43735index 9b856e1..fa03c92 100644
43736--- a/drivers/isdn/i4l/isdn_common.c
43737+++ b/drivers/isdn/i4l/isdn_common.c
43738@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43739 } else
43740 return -EINVAL;
43741 case IIOCDBGVAR:
43742+ if (!capable(CAP_SYS_RAWIO))
43743+ return -EPERM;
43744 if (arg) {
43745 if (copy_to_user(argp, &dev, sizeof(ulong)))
43746 return -EFAULT;
43747diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43748index 91d5730..336523e 100644
43749--- a/drivers/isdn/i4l/isdn_concap.c
43750+++ b/drivers/isdn/i4l/isdn_concap.c
43751@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43752 }
43753
43754 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43755- &isdn_concap_dl_data_req,
43756- &isdn_concap_dl_connect_req,
43757- &isdn_concap_dl_disconn_req
43758+ .data_req = &isdn_concap_dl_data_req,
43759+ .connect_req = &isdn_concap_dl_connect_req,
43760+ .disconn_req = &isdn_concap_dl_disconn_req
43761 };
43762
43763 /* The following should better go into a dedicated source file such that
43764diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43765index bc91261..2ef7e36 100644
43766--- a/drivers/isdn/i4l/isdn_tty.c
43767+++ b/drivers/isdn/i4l/isdn_tty.c
43768@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43769
43770 #ifdef ISDN_DEBUG_MODEM_OPEN
43771 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43772- port->count);
43773+ atomic_read(&port->count));
43774 #endif
43775- port->count++;
43776+ atomic_inc(&port->count);
43777 port->tty = tty;
43778 /*
43779 * Start up serial port
43780@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43781 #endif
43782 return;
43783 }
43784- if ((tty->count == 1) && (port->count != 1)) {
43785+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43786 /*
43787 * Uh, oh. tty->count is 1, which means that the tty
43788 * structure will be freed. Info->count should always
43789@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43790 * serial port won't be shutdown.
43791 */
43792 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43793- "info->count is %d\n", port->count);
43794- port->count = 1;
43795+ "info->count is %d\n", atomic_read(&port->count));
43796+ atomic_set(&port->count, 1);
43797 }
43798- if (--port->count < 0) {
43799+ if (atomic_dec_return(&port->count) < 0) {
43800 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43801- info->line, port->count);
43802- port->count = 0;
43803+ info->line, atomic_read(&port->count));
43804+ atomic_set(&port->count, 0);
43805 }
43806- if (port->count) {
43807+ if (atomic_read(&port->count)) {
43808 #ifdef ISDN_DEBUG_MODEM_OPEN
43809 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43810 #endif
43811@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43812 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43813 return;
43814 isdn_tty_shutdown(info);
43815- port->count = 0;
43816+ atomic_set(&port->count, 0);
43817 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43818 port->tty = NULL;
43819 wake_up_interruptible(&port->open_wait);
43820@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43821 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43822 modem_info *info = &dev->mdm.info[i];
43823
43824- if (info->port.count == 0)
43825+ if (atomic_read(&info->port.count) == 0)
43826 continue;
43827 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43828 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43829diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43830index e2d4e58..40cd045 100644
43831--- a/drivers/isdn/i4l/isdn_x25iface.c
43832+++ b/drivers/isdn/i4l/isdn_x25iface.c
43833@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43834
43835
43836 static struct concap_proto_ops ix25_pops = {
43837- &isdn_x25iface_proto_new,
43838- &isdn_x25iface_proto_del,
43839- &isdn_x25iface_proto_restart,
43840- &isdn_x25iface_proto_close,
43841- &isdn_x25iface_xmit,
43842- &isdn_x25iface_receive,
43843- &isdn_x25iface_connect_ind,
43844- &isdn_x25iface_disconn_ind
43845+ .proto_new = &isdn_x25iface_proto_new,
43846+ .proto_del = &isdn_x25iface_proto_del,
43847+ .restart = &isdn_x25iface_proto_restart,
43848+ .close = &isdn_x25iface_proto_close,
43849+ .encap_and_xmit = &isdn_x25iface_xmit,
43850+ .data_ind = &isdn_x25iface_receive,
43851+ .connect_ind = &isdn_x25iface_connect_ind,
43852+ .disconn_ind = &isdn_x25iface_disconn_ind
43853 };
43854
43855 /* error message helper function */
43856diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
43857index 6a7447c..cae33fe 100644
43858--- a/drivers/isdn/icn/icn.c
43859+++ b/drivers/isdn/icn/icn.c
43860@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
43861 if (count > len)
43862 count = len;
43863 if (user) {
43864- if (copy_from_user(msg, buf, count))
43865+ if (count > sizeof msg || copy_from_user(msg, buf, count))
43866 return -EFAULT;
43867 } else
43868 memcpy(msg, buf, count);
43869diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
43870index 87f7dff..7300125 100644
43871--- a/drivers/isdn/mISDN/dsp_cmx.c
43872+++ b/drivers/isdn/mISDN/dsp_cmx.c
43873@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
43874 static u16 dsp_count; /* last sample count */
43875 static int dsp_count_valid; /* if we have last sample count */
43876
43877-void
43878+void __intentional_overflow(-1)
43879 dsp_cmx_send(void *arg)
43880 {
43881 struct dsp_conf *conf;
43882diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
43883index 0f9ed1e..2715d6f 100644
43884--- a/drivers/leds/leds-clevo-mail.c
43885+++ b/drivers/leds/leds-clevo-mail.c
43886@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
43887 * detected as working, but in reality it is not) as low as
43888 * possible.
43889 */
43890-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
43891+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
43892 {
43893 .callback = clevo_mail_led_dmi_callback,
43894 .ident = "Clevo D410J",
43895diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
43896index 046cb70..6b20d39 100644
43897--- a/drivers/leds/leds-ss4200.c
43898+++ b/drivers/leds/leds-ss4200.c
43899@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
43900 * detected as working, but in reality it is not) as low as
43901 * possible.
43902 */
43903-static struct dmi_system_id nas_led_whitelist[] __initdata = {
43904+static struct dmi_system_id nas_led_whitelist[] __initconst = {
43905 {
43906 .callback = ss4200_led_dmi_callback,
43907 .ident = "Intel SS4200-E",
43908diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
43909index 6590558..a74c5dd 100644
43910--- a/drivers/lguest/core.c
43911+++ b/drivers/lguest/core.c
43912@@ -96,9 +96,17 @@ static __init int map_switcher(void)
43913 * The end address needs +1 because __get_vm_area allocates an
43914 * extra guard page, so we need space for that.
43915 */
43916+
43917+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
43918+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43919+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
43920+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43921+#else
43922 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43923 VM_ALLOC, switcher_addr, switcher_addr
43924 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43925+#endif
43926+
43927 if (!switcher_vma) {
43928 err = -ENOMEM;
43929 printk("lguest: could not map switcher pages high\n");
43930@@ -121,7 +129,7 @@ static __init int map_switcher(void)
43931 * Now the Switcher is mapped at the right address, we can't fail!
43932 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
43933 */
43934- memcpy(switcher_vma->addr, start_switcher_text,
43935+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
43936 end_switcher_text - start_switcher_text);
43937
43938 printk(KERN_INFO "lguest: mapped switcher at %p\n",
43939diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
43940index e8b55c3..3514c37 100644
43941--- a/drivers/lguest/page_tables.c
43942+++ b/drivers/lguest/page_tables.c
43943@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
43944 /*:*/
43945
43946 #ifdef CONFIG_X86_PAE
43947-static void release_pmd(pmd_t *spmd)
43948+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
43949 {
43950 /* If the entry's not present, there's nothing to release. */
43951 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
43952diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
43953index 922a1ac..9dd0c2a 100644
43954--- a/drivers/lguest/x86/core.c
43955+++ b/drivers/lguest/x86/core.c
43956@@ -59,7 +59,7 @@ static struct {
43957 /* Offset from where switcher.S was compiled to where we've copied it */
43958 static unsigned long switcher_offset(void)
43959 {
43960- return switcher_addr - (unsigned long)start_switcher_text;
43961+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
43962 }
43963
43964 /* This cpu's struct lguest_pages (after the Switcher text page) */
43965@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
43966 * These copies are pretty cheap, so we do them unconditionally: */
43967 /* Save the current Host top-level page directory.
43968 */
43969+
43970+#ifdef CONFIG_PAX_PER_CPU_PGD
43971+ pages->state.host_cr3 = read_cr3();
43972+#else
43973 pages->state.host_cr3 = __pa(current->mm->pgd);
43974+#endif
43975+
43976 /*
43977 * Set up the Guest's page tables to see this CPU's pages (and no
43978 * other CPU's pages).
43979@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
43980 * compiled-in switcher code and the high-mapped copy we just made.
43981 */
43982 for (i = 0; i < IDT_ENTRIES; i++)
43983- default_idt_entries[i] += switcher_offset();
43984+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
43985
43986 /*
43987 * Set up the Switcher's per-cpu areas.
43988@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
43989 * it will be undisturbed when we switch. To change %cs and jump we
43990 * need this structure to feed to Intel's "lcall" instruction.
43991 */
43992- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
43993+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
43994 lguest_entry.segment = LGUEST_CS;
43995
43996 /*
43997diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
43998index 40634b0..4f5855e 100644
43999--- a/drivers/lguest/x86/switcher_32.S
44000+++ b/drivers/lguest/x86/switcher_32.S
44001@@ -87,6 +87,7 @@
44002 #include <asm/page.h>
44003 #include <asm/segment.h>
44004 #include <asm/lguest.h>
44005+#include <asm/processor-flags.h>
44006
44007 // We mark the start of the code to copy
44008 // It's placed in .text tho it's never run here
44009@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44010 // Changes type when we load it: damn Intel!
44011 // For after we switch over our page tables
44012 // That entry will be read-only: we'd crash.
44013+
44014+#ifdef CONFIG_PAX_KERNEXEC
44015+ mov %cr0, %edx
44016+ xor $X86_CR0_WP, %edx
44017+ mov %edx, %cr0
44018+#endif
44019+
44020 movl $(GDT_ENTRY_TSS*8), %edx
44021 ltr %dx
44022
44023@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44024 // Let's clear it again for our return.
44025 // The GDT descriptor of the Host
44026 // Points to the table after two "size" bytes
44027- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44028+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44029 // Clear "used" from type field (byte 5, bit 2)
44030- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44031+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44032+
44033+#ifdef CONFIG_PAX_KERNEXEC
44034+ mov %cr0, %eax
44035+ xor $X86_CR0_WP, %eax
44036+ mov %eax, %cr0
44037+#endif
44038
44039 // Once our page table's switched, the Guest is live!
44040 // The Host fades as we run this final step.
44041@@ -295,13 +309,12 @@ deliver_to_host:
44042 // I consulted gcc, and it gave
44043 // These instructions, which I gladly credit:
44044 leal (%edx,%ebx,8), %eax
44045- movzwl (%eax),%edx
44046- movl 4(%eax), %eax
44047- xorw %ax, %ax
44048- orl %eax, %edx
44049+ movl 4(%eax), %edx
44050+ movw (%eax), %dx
44051 // Now the address of the handler's in %edx
44052 // We call it now: its "iret" drops us home.
44053- jmp *%edx
44054+ ljmp $__KERNEL_CS, $1f
44055+1: jmp *%edx
44056
44057 // Every interrupt can come to us here
44058 // But we must truly tell each apart.
44059diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44060index a08e3ee..df8ade2 100644
44061--- a/drivers/md/bcache/closure.h
44062+++ b/drivers/md/bcache/closure.h
44063@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44064 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44065 struct workqueue_struct *wq)
44066 {
44067- BUG_ON(object_is_on_stack(cl));
44068+ BUG_ON(object_starts_on_stack(cl));
44069 closure_set_ip(cl);
44070 cl->fn = fn;
44071 cl->wq = wq;
44072diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44073index 1695ee5..89f18ab 100644
44074--- a/drivers/md/bitmap.c
44075+++ b/drivers/md/bitmap.c
44076@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44077 chunk_kb ? "KB" : "B");
44078 if (bitmap->storage.file) {
44079 seq_printf(seq, ", file: ");
44080- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44081+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44082 }
44083
44084 seq_printf(seq, "\n");
44085diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44086index 73f791b..8c5d3ac 100644
44087--- a/drivers/md/dm-ioctl.c
44088+++ b/drivers/md/dm-ioctl.c
44089@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44090 cmd == DM_LIST_VERSIONS_CMD)
44091 return 0;
44092
44093- if ((cmd == DM_DEV_CREATE_CMD)) {
44094+ if (cmd == DM_DEV_CREATE_CMD) {
44095 if (!*param->name) {
44096 DMWARN("name not supplied when creating device");
44097 return -EINVAL;
44098diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44099index 7dfdb5c..4caada6 100644
44100--- a/drivers/md/dm-raid1.c
44101+++ b/drivers/md/dm-raid1.c
44102@@ -40,7 +40,7 @@ enum dm_raid1_error {
44103
44104 struct mirror {
44105 struct mirror_set *ms;
44106- atomic_t error_count;
44107+ atomic_unchecked_t error_count;
44108 unsigned long error_type;
44109 struct dm_dev *dev;
44110 sector_t offset;
44111@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44112 struct mirror *m;
44113
44114 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44115- if (!atomic_read(&m->error_count))
44116+ if (!atomic_read_unchecked(&m->error_count))
44117 return m;
44118
44119 return NULL;
44120@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44121 * simple way to tell if a device has encountered
44122 * errors.
44123 */
44124- atomic_inc(&m->error_count);
44125+ atomic_inc_unchecked(&m->error_count);
44126
44127 if (test_and_set_bit(error_type, &m->error_type))
44128 return;
44129@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44130 struct mirror *m = get_default_mirror(ms);
44131
44132 do {
44133- if (likely(!atomic_read(&m->error_count)))
44134+ if (likely(!atomic_read_unchecked(&m->error_count)))
44135 return m;
44136
44137 if (m-- == ms->mirror)
44138@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44139 {
44140 struct mirror *default_mirror = get_default_mirror(m->ms);
44141
44142- return !atomic_read(&default_mirror->error_count);
44143+ return !atomic_read_unchecked(&default_mirror->error_count);
44144 }
44145
44146 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44147@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44148 */
44149 if (likely(region_in_sync(ms, region, 1)))
44150 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44151- else if (m && atomic_read(&m->error_count))
44152+ else if (m && atomic_read_unchecked(&m->error_count))
44153 m = NULL;
44154
44155 if (likely(m))
44156@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44157 }
44158
44159 ms->mirror[mirror].ms = ms;
44160- atomic_set(&(ms->mirror[mirror].error_count), 0);
44161+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44162 ms->mirror[mirror].error_type = 0;
44163 ms->mirror[mirror].offset = offset;
44164
44165@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
44166 */
44167 static char device_status_char(struct mirror *m)
44168 {
44169- if (!atomic_read(&(m->error_count)))
44170+ if (!atomic_read_unchecked(&(m->error_count)))
44171 return 'A';
44172
44173 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44174diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44175index f478a4c..4b8e5ef 100644
44176--- a/drivers/md/dm-stats.c
44177+++ b/drivers/md/dm-stats.c
44178@@ -382,7 +382,7 @@ do_sync_free:
44179 synchronize_rcu_expedited();
44180 dm_stat_free(&s->rcu_head);
44181 } else {
44182- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44183+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44184 call_rcu(&s->rcu_head, dm_stat_free);
44185 }
44186 return 0;
44187@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44188 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44189 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44190 ));
44191- ACCESS_ONCE(last->last_sector) = end_sector;
44192- ACCESS_ONCE(last->last_rw) = bi_rw;
44193+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44194+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44195 }
44196
44197 rcu_read_lock();
44198diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44199index f8b37d4..5c5cafd 100644
44200--- a/drivers/md/dm-stripe.c
44201+++ b/drivers/md/dm-stripe.c
44202@@ -21,7 +21,7 @@ struct stripe {
44203 struct dm_dev *dev;
44204 sector_t physical_start;
44205
44206- atomic_t error_count;
44207+ atomic_unchecked_t error_count;
44208 };
44209
44210 struct stripe_c {
44211@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44212 kfree(sc);
44213 return r;
44214 }
44215- atomic_set(&(sc->stripe[i].error_count), 0);
44216+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44217 }
44218
44219 ti->private = sc;
44220@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44221 DMEMIT("%d ", sc->stripes);
44222 for (i = 0; i < sc->stripes; i++) {
44223 DMEMIT("%s ", sc->stripe[i].dev->name);
44224- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44225+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44226 'D' : 'A';
44227 }
44228 buffer[i] = '\0';
44229@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44230 */
44231 for (i = 0; i < sc->stripes; i++)
44232 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44233- atomic_inc(&(sc->stripe[i].error_count));
44234- if (atomic_read(&(sc->stripe[i].error_count)) <
44235+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44236+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44237 DM_IO_ERROR_THRESHOLD)
44238 schedule_work(&sc->trigger_event);
44239 }
44240diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44241index 3afae9e..4e1c954 100644
44242--- a/drivers/md/dm-table.c
44243+++ b/drivers/md/dm-table.c
44244@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44245 if (!dev_size)
44246 return 0;
44247
44248- if ((start >= dev_size) || (start + len > dev_size)) {
44249+ if ((start >= dev_size) || (len > dev_size - start)) {
44250 DMWARN("%s: %s too small for target: "
44251 "start=%llu, len=%llu, dev_size=%llu",
44252 dm_device_name(ti->table->md), bdevname(bdev, b),
44253diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44254index 43adbb8..7b34305 100644
44255--- a/drivers/md/dm-thin-metadata.c
44256+++ b/drivers/md/dm-thin-metadata.c
44257@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44258 {
44259 pmd->info.tm = pmd->tm;
44260 pmd->info.levels = 2;
44261- pmd->info.value_type.context = pmd->data_sm;
44262+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44263 pmd->info.value_type.size = sizeof(__le64);
44264 pmd->info.value_type.inc = data_block_inc;
44265 pmd->info.value_type.dec = data_block_dec;
44266@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44267
44268 pmd->bl_info.tm = pmd->tm;
44269 pmd->bl_info.levels = 1;
44270- pmd->bl_info.value_type.context = pmd->data_sm;
44271+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44272 pmd->bl_info.value_type.size = sizeof(__le64);
44273 pmd->bl_info.value_type.inc = data_block_inc;
44274 pmd->bl_info.value_type.dec = data_block_dec;
44275diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44276index 2caf5b3..104f98f 100644
44277--- a/drivers/md/dm.c
44278+++ b/drivers/md/dm.c
44279@@ -185,9 +185,9 @@ struct mapped_device {
44280 /*
44281 * Event handling.
44282 */
44283- atomic_t event_nr;
44284+ atomic_unchecked_t event_nr;
44285 wait_queue_head_t eventq;
44286- atomic_t uevent_seq;
44287+ atomic_unchecked_t uevent_seq;
44288 struct list_head uevent_list;
44289 spinlock_t uevent_lock; /* Protect access to uevent_list */
44290
44291@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
44292 spin_lock_init(&md->deferred_lock);
44293 atomic_set(&md->holders, 1);
44294 atomic_set(&md->open_count, 0);
44295- atomic_set(&md->event_nr, 0);
44296- atomic_set(&md->uevent_seq, 0);
44297+ atomic_set_unchecked(&md->event_nr, 0);
44298+ atomic_set_unchecked(&md->uevent_seq, 0);
44299 INIT_LIST_HEAD(&md->uevent_list);
44300 INIT_LIST_HEAD(&md->table_devices);
44301 spin_lock_init(&md->uevent_lock);
44302@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
44303
44304 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44305
44306- atomic_inc(&md->event_nr);
44307+ atomic_inc_unchecked(&md->event_nr);
44308 wake_up(&md->eventq);
44309 }
44310
44311@@ -3041,18 +3041,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44312
44313 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44314 {
44315- return atomic_add_return(1, &md->uevent_seq);
44316+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44317 }
44318
44319 uint32_t dm_get_event_nr(struct mapped_device *md)
44320 {
44321- return atomic_read(&md->event_nr);
44322+ return atomic_read_unchecked(&md->event_nr);
44323 }
44324
44325 int dm_wait_event(struct mapped_device *md, int event_nr)
44326 {
44327 return wait_event_interruptible(md->eventq,
44328- (event_nr != atomic_read(&md->event_nr)));
44329+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44330 }
44331
44332 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44333diff --git a/drivers/md/md.c b/drivers/md/md.c
44334index 709755f..5bc3fa4 100644
44335--- a/drivers/md/md.c
44336+++ b/drivers/md/md.c
44337@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44338 * start build, activate spare
44339 */
44340 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44341-static atomic_t md_event_count;
44342+static atomic_unchecked_t md_event_count;
44343 void md_new_event(struct mddev *mddev)
44344 {
44345- atomic_inc(&md_event_count);
44346+ atomic_inc_unchecked(&md_event_count);
44347 wake_up(&md_event_waiters);
44348 }
44349 EXPORT_SYMBOL_GPL(md_new_event);
44350@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44351 */
44352 static void md_new_event_inintr(struct mddev *mddev)
44353 {
44354- atomic_inc(&md_event_count);
44355+ atomic_inc_unchecked(&md_event_count);
44356 wake_up(&md_event_waiters);
44357 }
44358
44359@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44360 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44361 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44362 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44363- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44364+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44365
44366 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44367 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44368@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44369 else
44370 sb->resync_offset = cpu_to_le64(0);
44371
44372- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44373+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44374
44375 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44376 sb->size = cpu_to_le64(mddev->dev_sectors);
44377@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44378 static ssize_t
44379 errors_show(struct md_rdev *rdev, char *page)
44380 {
44381- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44382+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44383 }
44384
44385 static ssize_t
44386@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44387 char *e;
44388 unsigned long n = simple_strtoul(buf, &e, 10);
44389 if (*buf && (*e == 0 || *e == '\n')) {
44390- atomic_set(&rdev->corrected_errors, n);
44391+ atomic_set_unchecked(&rdev->corrected_errors, n);
44392 return len;
44393 }
44394 return -EINVAL;
44395@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
44396 rdev->sb_loaded = 0;
44397 rdev->bb_page = NULL;
44398 atomic_set(&rdev->nr_pending, 0);
44399- atomic_set(&rdev->read_errors, 0);
44400- atomic_set(&rdev->corrected_errors, 0);
44401+ atomic_set_unchecked(&rdev->read_errors, 0);
44402+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44403
44404 INIT_LIST_HEAD(&rdev->same_set);
44405 init_waitqueue_head(&rdev->blocked_wait);
44406@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44407
44408 spin_unlock(&pers_lock);
44409 seq_printf(seq, "\n");
44410- seq->poll_event = atomic_read(&md_event_count);
44411+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44412 return 0;
44413 }
44414 if (v == (void*)2) {
44415@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44416 return error;
44417
44418 seq = file->private_data;
44419- seq->poll_event = atomic_read(&md_event_count);
44420+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44421 return error;
44422 }
44423
44424@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44425 /* always allow read */
44426 mask = POLLIN | POLLRDNORM;
44427
44428- if (seq->poll_event != atomic_read(&md_event_count))
44429+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44430 mask |= POLLERR | POLLPRI;
44431 return mask;
44432 }
44433@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44434 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44435 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44436 (int)part_stat_read(&disk->part0, sectors[1]) -
44437- atomic_read(&disk->sync_io);
44438+ atomic_read_unchecked(&disk->sync_io);
44439 /* sync IO will cause sync_io to increase before the disk_stats
44440 * as sync_io is counted when a request starts, and
44441 * disk_stats is counted when it completes.
44442diff --git a/drivers/md/md.h b/drivers/md/md.h
44443index 03cec5b..0a658c1 100644
44444--- a/drivers/md/md.h
44445+++ b/drivers/md/md.h
44446@@ -94,13 +94,13 @@ struct md_rdev {
44447 * only maintained for arrays that
44448 * support hot removal
44449 */
44450- atomic_t read_errors; /* number of consecutive read errors that
44451+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44452 * we have tried to ignore.
44453 */
44454 struct timespec last_read_error; /* monotonic time since our
44455 * last read error
44456 */
44457- atomic_t corrected_errors; /* number of corrected read errors,
44458+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44459 * for reporting to userspace and storing
44460 * in superblock.
44461 */
44462@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44463
44464 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44465 {
44466- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44467+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44468 }
44469
44470 struct md_personality
44471diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44472index e8a9042..35bd145 100644
44473--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44474+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44475@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44476 * Flick into a mode where all blocks get allocated in the new area.
44477 */
44478 smm->begin = old_len;
44479- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44480+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44481
44482 /*
44483 * Extend.
44484@@ -714,7 +714,7 @@ out:
44485 /*
44486 * Switch back to normal behaviour.
44487 */
44488- memcpy(sm, &ops, sizeof(*sm));
44489+ memcpy((void *)sm, &ops, sizeof(*sm));
44490 return r;
44491 }
44492
44493diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44494index 3e6d115..ffecdeb 100644
44495--- a/drivers/md/persistent-data/dm-space-map.h
44496+++ b/drivers/md/persistent-data/dm-space-map.h
44497@@ -71,6 +71,7 @@ struct dm_space_map {
44498 dm_sm_threshold_fn fn,
44499 void *context);
44500 };
44501+typedef struct dm_space_map __no_const dm_space_map_no_const;
44502
44503 /*----------------------------------------------------------------*/
44504
44505diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44506index 2f2f38f..f6a8ebe 100644
44507--- a/drivers/md/raid1.c
44508+++ b/drivers/md/raid1.c
44509@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44510 if (r1_sync_page_io(rdev, sect, s,
44511 bio->bi_io_vec[idx].bv_page,
44512 READ) != 0)
44513- atomic_add(s, &rdev->corrected_errors);
44514+ atomic_add_unchecked(s, &rdev->corrected_errors);
44515 }
44516 sectors -= s;
44517 sect += s;
44518@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44519 !test_bit(Faulty, &rdev->flags)) {
44520 if (r1_sync_page_io(rdev, sect, s,
44521 conf->tmppage, READ)) {
44522- atomic_add(s, &rdev->corrected_errors);
44523+ atomic_add_unchecked(s, &rdev->corrected_errors);
44524 printk(KERN_INFO
44525 "md/raid1:%s: read error corrected "
44526 "(%d sectors at %llu on %s)\n",
44527diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44528index 32e282f..5cec803 100644
44529--- a/drivers/md/raid10.c
44530+++ b/drivers/md/raid10.c
44531@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
44532 /* The write handler will notice the lack of
44533 * R10BIO_Uptodate and record any errors etc
44534 */
44535- atomic_add(r10_bio->sectors,
44536+ atomic_add_unchecked(r10_bio->sectors,
44537 &conf->mirrors[d].rdev->corrected_errors);
44538
44539 /* for reconstruct, we always reschedule after a read.
44540@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44541 {
44542 struct timespec cur_time_mon;
44543 unsigned long hours_since_last;
44544- unsigned int read_errors = atomic_read(&rdev->read_errors);
44545+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44546
44547 ktime_get_ts(&cur_time_mon);
44548
44549@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44550 * overflowing the shift of read_errors by hours_since_last.
44551 */
44552 if (hours_since_last >= 8 * sizeof(read_errors))
44553- atomic_set(&rdev->read_errors, 0);
44554+ atomic_set_unchecked(&rdev->read_errors, 0);
44555 else
44556- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44557+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44558 }
44559
44560 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44561@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44562 return;
44563
44564 check_decay_read_errors(mddev, rdev);
44565- atomic_inc(&rdev->read_errors);
44566- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44567+ atomic_inc_unchecked(&rdev->read_errors);
44568+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44569 char b[BDEVNAME_SIZE];
44570 bdevname(rdev->bdev, b);
44571
44572@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44573 "md/raid10:%s: %s: Raid device exceeded "
44574 "read_error threshold [cur %d:max %d]\n",
44575 mdname(mddev), b,
44576- atomic_read(&rdev->read_errors), max_read_errors);
44577+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44578 printk(KERN_NOTICE
44579 "md/raid10:%s: %s: Failing raid device\n",
44580 mdname(mddev), b);
44581@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44582 sect +
44583 choose_data_offset(r10_bio, rdev)),
44584 bdevname(rdev->bdev, b));
44585- atomic_add(s, &rdev->corrected_errors);
44586+ atomic_add_unchecked(s, &rdev->corrected_errors);
44587 }
44588
44589 rdev_dec_pending(rdev, mddev);
44590diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44591index 8577cc7..e80e05d 100644
44592--- a/drivers/md/raid5.c
44593+++ b/drivers/md/raid5.c
44594@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44595 return 1;
44596 }
44597
44598+#ifdef CONFIG_GRKERNSEC_HIDESYM
44599+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44600+#endif
44601+
44602 static int grow_stripes(struct r5conf *conf, int num)
44603 {
44604 struct kmem_cache *sc;
44605@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44606 "raid%d-%s", conf->level, mdname(conf->mddev));
44607 else
44608 sprintf(conf->cache_name[0],
44609+#ifdef CONFIG_GRKERNSEC_HIDESYM
44610+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44611+#else
44612 "raid%d-%p", conf->level, conf->mddev);
44613+#endif
44614 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44615
44616 conf->active_name = 0;
44617@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44618 mdname(conf->mddev), STRIPE_SECTORS,
44619 (unsigned long long)s,
44620 bdevname(rdev->bdev, b));
44621- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44622+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44623 clear_bit(R5_ReadError, &sh->dev[i].flags);
44624 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44625 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44626 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44627
44628- if (atomic_read(&rdev->read_errors))
44629- atomic_set(&rdev->read_errors, 0);
44630+ if (atomic_read_unchecked(&rdev->read_errors))
44631+ atomic_set_unchecked(&rdev->read_errors, 0);
44632 } else {
44633 const char *bdn = bdevname(rdev->bdev, b);
44634 int retry = 0;
44635 int set_bad = 0;
44636
44637 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44638- atomic_inc(&rdev->read_errors);
44639+ atomic_inc_unchecked(&rdev->read_errors);
44640 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44641 printk_ratelimited(
44642 KERN_WARNING
44643@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44644 mdname(conf->mddev),
44645 (unsigned long long)s,
44646 bdn);
44647- } else if (atomic_read(&rdev->read_errors)
44648+ } else if (atomic_read_unchecked(&rdev->read_errors)
44649 > conf->max_nr_stripes)
44650 printk(KERN_WARNING
44651 "md/raid:%s: Too many read errors, failing device %s.\n",
44652diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44653index 983db75..ef9248c 100644
44654--- a/drivers/media/dvb-core/dvbdev.c
44655+++ b/drivers/media/dvb-core/dvbdev.c
44656@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44657 const struct dvb_device *template, void *priv, int type)
44658 {
44659 struct dvb_device *dvbdev;
44660- struct file_operations *dvbdevfops;
44661+ file_operations_no_const *dvbdevfops;
44662 struct device *clsdev;
44663 int minor;
44664 int id;
44665diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44666index 6ad22b6..6e90e2a 100644
44667--- a/drivers/media/dvb-frontends/af9033.h
44668+++ b/drivers/media/dvb-frontends/af9033.h
44669@@ -96,6 +96,6 @@ struct af9033_ops {
44670 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44671 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44672 int onoff);
44673-};
44674+} __no_const;
44675
44676 #endif /* AF9033_H */
44677diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44678index 9b6c3bb..baeb5c7 100644
44679--- a/drivers/media/dvb-frontends/dib3000.h
44680+++ b/drivers/media/dvb-frontends/dib3000.h
44681@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44682 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44683 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44684 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44685-};
44686+} __no_const;
44687
44688 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44689 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44690diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44691index 1fea0e9..321ce8f 100644
44692--- a/drivers/media/dvb-frontends/dib7000p.h
44693+++ b/drivers/media/dvb-frontends/dib7000p.h
44694@@ -64,7 +64,7 @@ struct dib7000p_ops {
44695 int (*get_adc_power)(struct dvb_frontend *fe);
44696 int (*slave_reset)(struct dvb_frontend *fe);
44697 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44698-};
44699+} __no_const;
44700
44701 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
44702 void *dib7000p_attach(struct dib7000p_ops *ops);
44703diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
44704index 84cc103..5780c54 100644
44705--- a/drivers/media/dvb-frontends/dib8000.h
44706+++ b/drivers/media/dvb-frontends/dib8000.h
44707@@ -61,7 +61,7 @@ struct dib8000_ops {
44708 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
44709 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
44710 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
44711-};
44712+} __no_const;
44713
44714 #if IS_ENABLED(CONFIG_DVB_DIB8000)
44715 void *dib8000_attach(struct dib8000_ops *ops);
44716diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44717index 860c98fc..497fa25 100644
44718--- a/drivers/media/pci/cx88/cx88-video.c
44719+++ b/drivers/media/pci/cx88/cx88-video.c
44720@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44721
44722 /* ------------------------------------------------------------------ */
44723
44724-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44725-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44726-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44727+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44728+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44729+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44730
44731 module_param_array(video_nr, int, NULL, 0444);
44732 module_param_array(vbi_nr, int, NULL, 0444);
44733diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44734index 802642d..5534900 100644
44735--- a/drivers/media/pci/ivtv/ivtv-driver.c
44736+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44737@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44738 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44739
44740 /* ivtv instance counter */
44741-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44742+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44743
44744 /* Parameter declarations */
44745 static int cardtype[IVTV_MAX_CARDS];
44746diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
44747index 8cbe6b4..ea3601c 100644
44748--- a/drivers/media/pci/solo6x10/solo6x10-core.c
44749+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
44750@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
44751
44752 static int solo_sysfs_init(struct solo_dev *solo_dev)
44753 {
44754- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
44755+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
44756 struct device *dev = &solo_dev->dev;
44757 const char *driver;
44758 int i;
44759diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
44760index c7141f2..5301fec 100644
44761--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
44762+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
44763@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
44764
44765 int solo_g723_init(struct solo_dev *solo_dev)
44766 {
44767- static struct snd_device_ops ops = { NULL };
44768+ static struct snd_device_ops ops = { };
44769 struct snd_card *card;
44770 struct snd_kcontrol_new kctl;
44771 char name[32];
44772diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44773index 8c84846..27b4f83 100644
44774--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
44775+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44776@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
44777
44778 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
44779 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
44780- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
44781+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
44782 if (p2m_id < 0)
44783 p2m_id = -p2m_id;
44784 }
44785diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
44786index bd8edfa..e82ed85 100644
44787--- a/drivers/media/pci/solo6x10/solo6x10.h
44788+++ b/drivers/media/pci/solo6x10/solo6x10.h
44789@@ -220,7 +220,7 @@ struct solo_dev {
44790
44791 /* P2M DMA Engine */
44792 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
44793- atomic_t p2m_count;
44794+ atomic_unchecked_t p2m_count;
44795 int p2m_jiffies;
44796 unsigned int p2m_timeouts;
44797
44798diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
44799index c135165..dc69499 100644
44800--- a/drivers/media/pci/tw68/tw68-core.c
44801+++ b/drivers/media/pci/tw68/tw68-core.c
44802@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
44803 module_param_array(card, int, NULL, 0444);
44804 MODULE_PARM_DESC(card, "card type");
44805
44806-static atomic_t tw68_instance = ATOMIC_INIT(0);
44807+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
44808
44809 /* ------------------------------------------------------------------ */
44810
44811diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44812index ba2d8f9..1566684 100644
44813--- a/drivers/media/platform/omap/omap_vout.c
44814+++ b/drivers/media/platform/omap/omap_vout.c
44815@@ -63,7 +63,6 @@ enum omap_vout_channels {
44816 OMAP_VIDEO2,
44817 };
44818
44819-static struct videobuf_queue_ops video_vbq_ops;
44820 /* Variables configurable through module params*/
44821 static u32 video1_numbuffers = 3;
44822 static u32 video2_numbuffers = 3;
44823@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
44824 {
44825 struct videobuf_queue *q;
44826 struct omap_vout_device *vout = NULL;
44827+ static struct videobuf_queue_ops video_vbq_ops = {
44828+ .buf_setup = omap_vout_buffer_setup,
44829+ .buf_prepare = omap_vout_buffer_prepare,
44830+ .buf_release = omap_vout_buffer_release,
44831+ .buf_queue = omap_vout_buffer_queue,
44832+ };
44833
44834 vout = video_drvdata(file);
44835 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44836@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
44837 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44838
44839 q = &vout->vbq;
44840- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44841- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44842- video_vbq_ops.buf_release = omap_vout_buffer_release;
44843- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44844 spin_lock_init(&vout->vbq_lock);
44845
44846 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44847diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44848index fb2acc5..a2fcbdc4 100644
44849--- a/drivers/media/platform/s5p-tv/mixer.h
44850+++ b/drivers/media/platform/s5p-tv/mixer.h
44851@@ -156,7 +156,7 @@ struct mxr_layer {
44852 /** layer index (unique identifier) */
44853 int idx;
44854 /** callbacks for layer methods */
44855- struct mxr_layer_ops ops;
44856+ struct mxr_layer_ops *ops;
44857 /** format array */
44858 const struct mxr_format **fmt_array;
44859 /** size of format array */
44860diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44861index 74344c7..a39e70e 100644
44862--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44863+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44864@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44865 {
44866 struct mxr_layer *layer;
44867 int ret;
44868- struct mxr_layer_ops ops = {
44869+ static struct mxr_layer_ops ops = {
44870 .release = mxr_graph_layer_release,
44871 .buffer_set = mxr_graph_buffer_set,
44872 .stream_set = mxr_graph_stream_set,
44873diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44874index b713403..53cb5ad 100644
44875--- a/drivers/media/platform/s5p-tv/mixer_reg.c
44876+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44877@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44878 layer->update_buf = next;
44879 }
44880
44881- layer->ops.buffer_set(layer, layer->update_buf);
44882+ layer->ops->buffer_set(layer, layer->update_buf);
44883
44884 if (done && done != layer->shadow_buf)
44885 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
44886diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
44887index b4d2696..91df48e 100644
44888--- a/drivers/media/platform/s5p-tv/mixer_video.c
44889+++ b/drivers/media/platform/s5p-tv/mixer_video.c
44890@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
44891 layer->geo.src.height = layer->geo.src.full_height;
44892
44893 mxr_geometry_dump(mdev, &layer->geo);
44894- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44895+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44896 mxr_geometry_dump(mdev, &layer->geo);
44897 }
44898
44899@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
44900 layer->geo.dst.full_width = mbus_fmt.width;
44901 layer->geo.dst.full_height = mbus_fmt.height;
44902 layer->geo.dst.field = mbus_fmt.field;
44903- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44904+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44905
44906 mxr_geometry_dump(mdev, &layer->geo);
44907 }
44908@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
44909 /* set source size to highest accepted value */
44910 geo->src.full_width = max(geo->dst.full_width, pix->width);
44911 geo->src.full_height = max(geo->dst.full_height, pix->height);
44912- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44913+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44914 mxr_geometry_dump(mdev, &layer->geo);
44915 /* set cropping to total visible screen */
44916 geo->src.width = pix->width;
44917@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
44918 geo->src.x_offset = 0;
44919 geo->src.y_offset = 0;
44920 /* assure consistency of geometry */
44921- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44922+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44923 mxr_geometry_dump(mdev, &layer->geo);
44924 /* set full size to lowest possible value */
44925 geo->src.full_width = 0;
44926 geo->src.full_height = 0;
44927- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44928+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44929 mxr_geometry_dump(mdev, &layer->geo);
44930
44931 /* returning results */
44932@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
44933 target->width = s->r.width;
44934 target->height = s->r.height;
44935
44936- layer->ops.fix_geometry(layer, stage, s->flags);
44937+ layer->ops->fix_geometry(layer, stage, s->flags);
44938
44939 /* retrieve update selection rectangle */
44940 res.left = target->x_offset;
44941@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
44942 mxr_output_get(mdev);
44943
44944 mxr_layer_update_output(layer);
44945- layer->ops.format_set(layer);
44946+ layer->ops->format_set(layer);
44947 /* enabling layer in hardware */
44948 spin_lock_irqsave(&layer->enq_slock, flags);
44949 layer->state = MXR_LAYER_STREAMING;
44950 spin_unlock_irqrestore(&layer->enq_slock, flags);
44951
44952- layer->ops.stream_set(layer, MXR_ENABLE);
44953+ layer->ops->stream_set(layer, MXR_ENABLE);
44954 mxr_streamer_get(mdev);
44955
44956 return 0;
44957@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
44958 spin_unlock_irqrestore(&layer->enq_slock, flags);
44959
44960 /* disabling layer in hardware */
44961- layer->ops.stream_set(layer, MXR_DISABLE);
44962+ layer->ops->stream_set(layer, MXR_DISABLE);
44963 /* remove one streamer */
44964 mxr_streamer_put(mdev);
44965 /* allow changes in output configuration */
44966@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
44967
44968 void mxr_layer_release(struct mxr_layer *layer)
44969 {
44970- if (layer->ops.release)
44971- layer->ops.release(layer);
44972+ if (layer->ops->release)
44973+ layer->ops->release(layer);
44974 }
44975
44976 void mxr_base_layer_release(struct mxr_layer *layer)
44977@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
44978
44979 layer->mdev = mdev;
44980 layer->idx = idx;
44981- layer->ops = *ops;
44982+ layer->ops = ops;
44983
44984 spin_lock_init(&layer->enq_slock);
44985 INIT_LIST_HEAD(&layer->enq_list);
44986diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44987index c9388c4..ce71ece 100644
44988--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44989+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44990@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
44991 {
44992 struct mxr_layer *layer;
44993 int ret;
44994- struct mxr_layer_ops ops = {
44995+ static struct mxr_layer_ops ops = {
44996 .release = mxr_vp_layer_release,
44997 .buffer_set = mxr_vp_buffer_set,
44998 .stream_set = mxr_vp_stream_set,
44999diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45000index 82affae..42833ec 100644
45001--- a/drivers/media/radio/radio-cadet.c
45002+++ b/drivers/media/radio/radio-cadet.c
45003@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45004 unsigned char readbuf[RDS_BUFFER];
45005 int i = 0;
45006
45007+ if (count > RDS_BUFFER)
45008+ return -EFAULT;
45009 mutex_lock(&dev->lock);
45010 if (dev->rdsstat == 0)
45011 cadet_start_rds(dev);
45012@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45013 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45014 mutex_unlock(&dev->lock);
45015
45016- if (i && copy_to_user(data, readbuf, i))
45017- return -EFAULT;
45018+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45019+ i = -EFAULT;
45020+
45021 return i;
45022 }
45023
45024diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45025index 5236035..c622c74 100644
45026--- a/drivers/media/radio/radio-maxiradio.c
45027+++ b/drivers/media/radio/radio-maxiradio.c
45028@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45029 /* TEA5757 pin mappings */
45030 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45031
45032-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45033+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45034
45035 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45036 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45037diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45038index 050b3bb..79f62b9 100644
45039--- a/drivers/media/radio/radio-shark.c
45040+++ b/drivers/media/radio/radio-shark.c
45041@@ -79,7 +79,7 @@ struct shark_device {
45042 u32 last_val;
45043 };
45044
45045-static atomic_t shark_instance = ATOMIC_INIT(0);
45046+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45047
45048 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45049 {
45050diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45051index 8654e0d..0608a64 100644
45052--- a/drivers/media/radio/radio-shark2.c
45053+++ b/drivers/media/radio/radio-shark2.c
45054@@ -74,7 +74,7 @@ struct shark_device {
45055 u8 *transfer_buffer;
45056 };
45057
45058-static atomic_t shark_instance = ATOMIC_INIT(0);
45059+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45060
45061 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45062 {
45063diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45064index dccf586..d5db411 100644
45065--- a/drivers/media/radio/radio-si476x.c
45066+++ b/drivers/media/radio/radio-si476x.c
45067@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45068 struct si476x_radio *radio;
45069 struct v4l2_ctrl *ctrl;
45070
45071- static atomic_t instance = ATOMIC_INIT(0);
45072+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45073
45074 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45075 if (!radio)
45076diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45077index 704397f..4d05977 100644
45078--- a/drivers/media/radio/wl128x/fmdrv_common.c
45079+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45080@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45081 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45082
45083 /* Radio Nr */
45084-static u32 radio_nr = -1;
45085+static int radio_nr = -1;
45086 module_param(radio_nr, int, 0444);
45087 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45088
45089diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45090index 9fd1527..8927230 100644
45091--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45092+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45093@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45094
45095 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45096 {
45097- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45098- char result[64];
45099- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45100- sizeof(result), 0);
45101+ char *buf;
45102+ char *result;
45103+ int retval;
45104+
45105+ buf = kmalloc(2, GFP_KERNEL);
45106+ if (buf == NULL)
45107+ return -ENOMEM;
45108+ result = kmalloc(64, GFP_KERNEL);
45109+ if (result == NULL) {
45110+ kfree(buf);
45111+ return -ENOMEM;
45112+ }
45113+
45114+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45115+ buf[1] = enable ? 1 : 0;
45116+
45117+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45118+
45119+ kfree(buf);
45120+ kfree(result);
45121+ return retval;
45122 }
45123
45124 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45125 {
45126- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45127- char state[3];
45128- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45129+ char *buf;
45130+ char *state;
45131+ int retval;
45132+
45133+ buf = kmalloc(2, GFP_KERNEL);
45134+ if (buf == NULL)
45135+ return -ENOMEM;
45136+ state = kmalloc(3, GFP_KERNEL);
45137+ if (state == NULL) {
45138+ kfree(buf);
45139+ return -ENOMEM;
45140+ }
45141+
45142+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45143+ buf[1] = enable ? 1 : 0;
45144+
45145+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45146+
45147+ kfree(buf);
45148+ kfree(state);
45149+ return retval;
45150 }
45151
45152 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45153 {
45154- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45155- char state[3];
45156+ char *query;
45157+ char *state;
45158 int ret;
45159+ query = kmalloc(1, GFP_KERNEL);
45160+ if (query == NULL)
45161+ return -ENOMEM;
45162+ state = kmalloc(3, GFP_KERNEL);
45163+ if (state == NULL) {
45164+ kfree(query);
45165+ return -ENOMEM;
45166+ }
45167+
45168+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45169
45170 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45171
45172- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45173- sizeof(state), 0);
45174+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45175 if (ret < 0) {
45176 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45177 "state info\n");
45178@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45179
45180 /* Copy this pointer as we are gonna need it in the release phase */
45181 cinergyt2_usb_device = adap->dev;
45182-
45183+ kfree(query);
45184+ kfree(state);
45185 return 0;
45186 }
45187
45188@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45189 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45190 {
45191 struct cinergyt2_state *st = d->priv;
45192- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45193+ u8 *key, *cmd;
45194 int i;
45195
45196+ cmd = kmalloc(1, GFP_KERNEL);
45197+ if (cmd == NULL)
45198+ return -EINVAL;
45199+ key = kzalloc(5, GFP_KERNEL);
45200+ if (key == NULL) {
45201+ kfree(cmd);
45202+ return -EINVAL;
45203+ }
45204+
45205+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45206+
45207 *state = REMOTE_NO_KEY_PRESSED;
45208
45209- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45210+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45211 if (key[4] == 0xff) {
45212 /* key repeat */
45213 st->rc_counter++;
45214@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45215 *event = d->last_event;
45216 deb_rc("repeat key, event %x\n",
45217 *event);
45218- return 0;
45219+ goto out;
45220 }
45221 }
45222 deb_rc("repeated key (non repeatable)\n");
45223 }
45224- return 0;
45225+ goto out;
45226 }
45227
45228 /* hack to pass checksum on the custom field */
45229@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45230
45231 deb_rc("key: %*ph\n", 5, key);
45232 }
45233+out:
45234+ kfree(cmd);
45235+ kfree(key);
45236 return 0;
45237 }
45238
45239diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45240index c890fe4..f9b2ae6 100644
45241--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45242+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45243@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45244 fe_status_t *status)
45245 {
45246 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45247- struct dvbt_get_status_msg result;
45248- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45249+ struct dvbt_get_status_msg *result;
45250+ u8 *cmd;
45251 int ret;
45252
45253- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45254- sizeof(result), 0);
45255+ cmd = kmalloc(1, GFP_KERNEL);
45256+ if (cmd == NULL)
45257+ return -ENOMEM;
45258+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45259+ if (result == NULL) {
45260+ kfree(cmd);
45261+ return -ENOMEM;
45262+ }
45263+
45264+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45265+
45266+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45267+ sizeof(*result), 0);
45268 if (ret < 0)
45269- return ret;
45270+ goto out;
45271
45272 *status = 0;
45273
45274- if (0xffff - le16_to_cpu(result.gain) > 30)
45275+ if (0xffff - le16_to_cpu(result->gain) > 30)
45276 *status |= FE_HAS_SIGNAL;
45277- if (result.lock_bits & (1 << 6))
45278+ if (result->lock_bits & (1 << 6))
45279 *status |= FE_HAS_LOCK;
45280- if (result.lock_bits & (1 << 5))
45281+ if (result->lock_bits & (1 << 5))
45282 *status |= FE_HAS_SYNC;
45283- if (result.lock_bits & (1 << 4))
45284+ if (result->lock_bits & (1 << 4))
45285 *status |= FE_HAS_CARRIER;
45286- if (result.lock_bits & (1 << 1))
45287+ if (result->lock_bits & (1 << 1))
45288 *status |= FE_HAS_VITERBI;
45289
45290 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45291 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45292 *status &= ~FE_HAS_LOCK;
45293
45294- return 0;
45295+out:
45296+ kfree(cmd);
45297+ kfree(result);
45298+ return ret;
45299 }
45300
45301 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45302 {
45303 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45304- struct dvbt_get_status_msg status;
45305- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45306+ struct dvbt_get_status_msg *status;
45307+ char *cmd;
45308 int ret;
45309
45310- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45311- sizeof(status), 0);
45312+ cmd = kmalloc(1, GFP_KERNEL);
45313+ if (cmd == NULL)
45314+ return -ENOMEM;
45315+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45316+ if (status == NULL) {
45317+ kfree(cmd);
45318+ return -ENOMEM;
45319+ }
45320+
45321+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45322+
45323+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45324+ sizeof(*status), 0);
45325 if (ret < 0)
45326- return ret;
45327+ goto out;
45328
45329- *ber = le32_to_cpu(status.viterbi_error_rate);
45330+ *ber = le32_to_cpu(status->viterbi_error_rate);
45331+out:
45332+ kfree(cmd);
45333+ kfree(status);
45334 return 0;
45335 }
45336
45337 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45338 {
45339 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45340- struct dvbt_get_status_msg status;
45341- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45342+ struct dvbt_get_status_msg *status;
45343+ u8 *cmd;
45344 int ret;
45345
45346- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45347- sizeof(status), 0);
45348+ cmd = kmalloc(1, GFP_KERNEL);
45349+ if (cmd == NULL)
45350+ return -ENOMEM;
45351+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45352+ if (status == NULL) {
45353+ kfree(cmd);
45354+ return -ENOMEM;
45355+ }
45356+
45357+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45358+
45359+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45360+ sizeof(*status), 0);
45361 if (ret < 0) {
45362 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45363 ret);
45364- return ret;
45365+ goto out;
45366 }
45367- *unc = le32_to_cpu(status.uncorrected_block_count);
45368- return 0;
45369+ *unc = le32_to_cpu(status->uncorrected_block_count);
45370+
45371+out:
45372+ kfree(cmd);
45373+ kfree(status);
45374+ return ret;
45375 }
45376
45377 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45378 u16 *strength)
45379 {
45380 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45381- struct dvbt_get_status_msg status;
45382- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45383+ struct dvbt_get_status_msg *status;
45384+ char *cmd;
45385 int ret;
45386
45387- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45388- sizeof(status), 0);
45389+ cmd = kmalloc(1, GFP_KERNEL);
45390+ if (cmd == NULL)
45391+ return -ENOMEM;
45392+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45393+ if (status == NULL) {
45394+ kfree(cmd);
45395+ return -ENOMEM;
45396+ }
45397+
45398+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45399+
45400+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45401+ sizeof(*status), 0);
45402 if (ret < 0) {
45403 err("cinergyt2_fe_read_signal_strength() Failed!"
45404 " (Error=%d)\n", ret);
45405- return ret;
45406+ goto out;
45407 }
45408- *strength = (0xffff - le16_to_cpu(status.gain));
45409+ *strength = (0xffff - le16_to_cpu(status->gain));
45410+
45411+out:
45412+ kfree(cmd);
45413+ kfree(status);
45414 return 0;
45415 }
45416
45417 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45418 {
45419 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45420- struct dvbt_get_status_msg status;
45421- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45422+ struct dvbt_get_status_msg *status;
45423+ char *cmd;
45424 int ret;
45425
45426- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45427- sizeof(status), 0);
45428+ cmd = kmalloc(1, GFP_KERNEL);
45429+ if (cmd == NULL)
45430+ return -ENOMEM;
45431+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45432+ if (status == NULL) {
45433+ kfree(cmd);
45434+ return -ENOMEM;
45435+ }
45436+
45437+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45438+
45439+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45440+ sizeof(*status), 0);
45441 if (ret < 0) {
45442 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45443- return ret;
45444+ goto out;
45445 }
45446- *snr = (status.snr << 8) | status.snr;
45447- return 0;
45448+ *snr = (status->snr << 8) | status->snr;
45449+
45450+out:
45451+ kfree(cmd);
45452+ kfree(status);
45453+ return ret;
45454 }
45455
45456 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45457@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45458 {
45459 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45460 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45461- struct dvbt_set_parameters_msg param;
45462- char result[2];
45463+ struct dvbt_set_parameters_msg *param;
45464+ char *result;
45465 int err;
45466
45467- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45468- param.tps = cpu_to_le16(compute_tps(fep));
45469- param.freq = cpu_to_le32(fep->frequency / 1000);
45470- param.flags = 0;
45471+ result = kmalloc(2, GFP_KERNEL);
45472+ if (result == NULL)
45473+ return -ENOMEM;
45474+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45475+ if (param == NULL) {
45476+ kfree(result);
45477+ return -ENOMEM;
45478+ }
45479+
45480+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45481+ param->tps = cpu_to_le16(compute_tps(fep));
45482+ param->freq = cpu_to_le32(fep->frequency / 1000);
45483+ param->flags = 0;
45484
45485 switch (fep->bandwidth_hz) {
45486 default:
45487 case 8000000:
45488- param.bandwidth = 8;
45489+ param->bandwidth = 8;
45490 break;
45491 case 7000000:
45492- param.bandwidth = 7;
45493+ param->bandwidth = 7;
45494 break;
45495 case 6000000:
45496- param.bandwidth = 6;
45497+ param->bandwidth = 6;
45498 break;
45499 }
45500
45501 err = dvb_usb_generic_rw(state->d,
45502- (char *)&param, sizeof(param),
45503- result, sizeof(result), 0);
45504+ (char *)param, sizeof(*param),
45505+ result, 2, 0);
45506 if (err < 0)
45507 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45508
45509- return (err < 0) ? err : 0;
45510+ kfree(result);
45511+ kfree(param);
45512+ return err;
45513 }
45514
45515 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45516diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45517index 733a7ff..f8b52e3 100644
45518--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45519+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45520@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45521
45522 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45523 {
45524- struct hexline hx;
45525- u8 reset;
45526+ struct hexline *hx;
45527+ u8 *reset;
45528 int ret,pos=0;
45529
45530+ reset = kmalloc(1, GFP_KERNEL);
45531+ if (reset == NULL)
45532+ return -ENOMEM;
45533+
45534+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45535+ if (hx == NULL) {
45536+ kfree(reset);
45537+ return -ENOMEM;
45538+ }
45539+
45540 /* stop the CPU */
45541- reset = 1;
45542- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45543+ reset[0] = 1;
45544+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45545 err("could not stop the USB controller CPU.");
45546
45547- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45548- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45549- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45550+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45551+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45552+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45553
45554- if (ret != hx.len) {
45555+ if (ret != hx->len) {
45556 err("error while transferring firmware "
45557 "(transferred size: %d, block size: %d)",
45558- ret,hx.len);
45559+ ret,hx->len);
45560 ret = -EINVAL;
45561 break;
45562 }
45563 }
45564 if (ret < 0) {
45565 err("firmware download failed at %d with %d",pos,ret);
45566+ kfree(reset);
45567+ kfree(hx);
45568 return ret;
45569 }
45570
45571 if (ret == 0) {
45572 /* restart the CPU */
45573- reset = 0;
45574- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45575+ reset[0] = 0;
45576+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45577 err("could not restart the USB controller CPU.");
45578 ret = -EINVAL;
45579 }
45580 } else
45581 ret = -EIO;
45582
45583+ kfree(reset);
45584+ kfree(hx);
45585+
45586 return ret;
45587 }
45588 EXPORT_SYMBOL(usb_cypress_load_firmware);
45589diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45590index 1a3df10..57997a5 100644
45591--- a/drivers/media/usb/dvb-usb/dw2102.c
45592+++ b/drivers/media/usb/dvb-usb/dw2102.c
45593@@ -118,7 +118,7 @@ struct su3000_state {
45594
45595 struct s6x0_state {
45596 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45597-};
45598+} __no_const;
45599
45600 /* debug */
45601 static int dvb_usb_dw2102_debug;
45602diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45603index 5801ae7..83f71fa 100644
45604--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45605+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45606@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45607 static int technisat_usb2_i2c_access(struct usb_device *udev,
45608 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45609 {
45610- u8 b[64];
45611- int ret, actual_length;
45612+ u8 *b = kmalloc(64, GFP_KERNEL);
45613+ int ret, actual_length, error = 0;
45614+
45615+ if (b == NULL)
45616+ return -ENOMEM;
45617
45618 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45619 debug_dump(tx, txlen, deb_i2c);
45620@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45621
45622 if (ret < 0) {
45623 err("i2c-error: out failed %02x = %d", device_addr, ret);
45624- return -ENODEV;
45625+ error = -ENODEV;
45626+ goto out;
45627 }
45628
45629 ret = usb_bulk_msg(udev,
45630@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45631 b, 64, &actual_length, 1000);
45632 if (ret < 0) {
45633 err("i2c-error: in failed %02x = %d", device_addr, ret);
45634- return -ENODEV;
45635+ error = -ENODEV;
45636+ goto out;
45637 }
45638
45639 if (b[0] != I2C_STATUS_OK) {
45640@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45641 /* handle tuner-i2c-nak */
45642 if (!(b[0] == I2C_STATUS_NAK &&
45643 device_addr == 0x60
45644- /* && device_is_technisat_usb2 */))
45645- return -ENODEV;
45646+ /* && device_is_technisat_usb2 */)) {
45647+ error = -ENODEV;
45648+ goto out;
45649+ }
45650 }
45651
45652 deb_i2c("status: %d, ", b[0]);
45653@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45654
45655 deb_i2c("\n");
45656
45657- return 0;
45658+out:
45659+ kfree(b);
45660+ return error;
45661 }
45662
45663 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45664@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45665 {
45666 int ret;
45667
45668- u8 led[8] = {
45669- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45670- 0
45671- };
45672+ u8 *led = kzalloc(8, GFP_KERNEL);
45673+
45674+ if (led == NULL)
45675+ return -ENOMEM;
45676
45677 if (disable_led_control && state != TECH_LED_OFF)
45678 return 0;
45679
45680+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45681+
45682 switch (state) {
45683 case TECH_LED_ON:
45684 led[1] = 0x82;
45685@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45686 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45687 USB_TYPE_VENDOR | USB_DIR_OUT,
45688 0, 0,
45689- led, sizeof(led), 500);
45690+ led, 8, 500);
45691
45692 mutex_unlock(&d->i2c_mutex);
45693+
45694+ kfree(led);
45695+
45696 return ret;
45697 }
45698
45699 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
45700 {
45701 int ret;
45702- u8 b = 0;
45703+ u8 *b = kzalloc(1, GFP_KERNEL);
45704+
45705+ if (b == NULL)
45706+ return -ENOMEM;
45707
45708 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
45709 return -EAGAIN;
45710@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
45711 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
45712 USB_TYPE_VENDOR | USB_DIR_OUT,
45713 (red << 8) | green, 0,
45714- &b, 1, 500);
45715+ b, 1, 500);
45716
45717 mutex_unlock(&d->i2c_mutex);
45718
45719+ kfree(b);
45720+
45721 return ret;
45722 }
45723
45724@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45725 struct dvb_usb_device_description **desc, int *cold)
45726 {
45727 int ret;
45728- u8 version[3];
45729+ u8 *version = kmalloc(3, GFP_KERNEL);
45730
45731 /* first select the interface */
45732 if (usb_set_interface(udev, 0, 1) != 0)
45733@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45734
45735 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
45736
45737+ if (version == NULL)
45738+ return 0;
45739+
45740 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
45741 GET_VERSION_INFO_VENDOR_REQUEST,
45742 USB_TYPE_VENDOR | USB_DIR_IN,
45743 0, 0,
45744- version, sizeof(version), 500);
45745+ version, 3, 500);
45746
45747 if (ret < 0)
45748 *cold = 1;
45749@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45750 *cold = 0;
45751 }
45752
45753+ kfree(version);
45754+
45755 return 0;
45756 }
45757
45758@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
45759
45760 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45761 {
45762- u8 buf[62], *b;
45763+ u8 *buf, *b;
45764 int ret;
45765 struct ir_raw_event ev;
45766
45767+ buf = kmalloc(62, GFP_KERNEL);
45768+
45769+ if (buf == NULL)
45770+ return -ENOMEM;
45771+
45772 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
45773 buf[1] = 0x08;
45774 buf[2] = 0x8f;
45775@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45776 GET_IR_DATA_VENDOR_REQUEST,
45777 USB_TYPE_VENDOR | USB_DIR_IN,
45778 0x8080, 0,
45779- buf, sizeof(buf), 500);
45780+ buf, 62, 500);
45781
45782 unlock:
45783 mutex_unlock(&d->i2c_mutex);
45784
45785- if (ret < 0)
45786+ if (ret < 0) {
45787+ kfree(buf);
45788 return ret;
45789+ }
45790
45791- if (ret == 1)
45792+ if (ret == 1) {
45793+ kfree(buf);
45794 return 0; /* no key pressed */
45795+ }
45796
45797 /* decoding */
45798 b = buf+1;
45799@@ -656,6 +689,8 @@ unlock:
45800
45801 ir_raw_event_handle(d->rc_dev);
45802
45803+ kfree(buf);
45804+
45805 return 1;
45806 }
45807
45808diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45809index af63543..0436f20 100644
45810--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45811+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45812@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45813 * by passing a very big num_planes value */
45814 uplane = compat_alloc_user_space(num_planes *
45815 sizeof(struct v4l2_plane));
45816- kp->m.planes = (__force struct v4l2_plane *)uplane;
45817+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
45818
45819 while (--num_planes >= 0) {
45820 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
45821@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45822 if (num_planes == 0)
45823 return 0;
45824
45825- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
45826+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
45827 if (get_user(p, &up->m.planes))
45828 return -EFAULT;
45829 uplane32 = compat_ptr(p);
45830@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
45831 get_user(kp->flags, &up->flags) ||
45832 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
45833 return -EFAULT;
45834- kp->base = (__force void *)compat_ptr(tmp);
45835+ kp->base = (__force_kernel void *)compat_ptr(tmp);
45836 return 0;
45837 }
45838
45839@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45840 n * sizeof(struct v4l2_ext_control32)))
45841 return -EFAULT;
45842 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
45843- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
45844+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
45845 while (--n >= 0) {
45846 u32 id;
45847
45848@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45849 {
45850 struct v4l2_ext_control32 __user *ucontrols;
45851 struct v4l2_ext_control __user *kcontrols =
45852- (__force struct v4l2_ext_control __user *)kp->controls;
45853+ (struct v4l2_ext_control __force_user *)kp->controls;
45854 int n = kp->count;
45855 compat_caddr_t p;
45856
45857@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
45858 get_user(tmp, &up->edid) ||
45859 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
45860 return -EFAULT;
45861- kp->edid = (__force u8 *)compat_ptr(tmp);
45862+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
45863 return 0;
45864 }
45865
45866diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
45867index 015f92a..59e311e 100644
45868--- a/drivers/media/v4l2-core/v4l2-device.c
45869+++ b/drivers/media/v4l2-core/v4l2-device.c
45870@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
45871 EXPORT_SYMBOL_GPL(v4l2_device_put);
45872
45873 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
45874- atomic_t *instance)
45875+ atomic_unchecked_t *instance)
45876 {
45877- int num = atomic_inc_return(instance) - 1;
45878+ int num = atomic_inc_return_unchecked(instance) - 1;
45879 int len = strlen(basename);
45880
45881 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
45882diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
45883index faac2f4..e39dcd9 100644
45884--- a/drivers/media/v4l2-core/v4l2-ioctl.c
45885+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
45886@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
45887 struct file *file, void *fh, void *p);
45888 } u;
45889 void (*debug)(const void *arg, bool write_only);
45890-};
45891+} __do_const;
45892+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
45893
45894 /* This control needs a priority check */
45895 #define INFO_FL_PRIO (1 << 0)
45896@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
45897 struct video_device *vfd = video_devdata(file);
45898 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
45899 bool write_only = false;
45900- struct v4l2_ioctl_info default_info;
45901+ v4l2_ioctl_info_no_const default_info;
45902 const struct v4l2_ioctl_info *info;
45903 void *fh = file->private_data;
45904 struct v4l2_fh *vfh = NULL;
45905@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45906 ret = -EINVAL;
45907 break;
45908 }
45909- *user_ptr = (void __user *)buf->m.planes;
45910+ *user_ptr = (void __force_user *)buf->m.planes;
45911 *kernel_ptr = (void **)&buf->m.planes;
45912 *array_size = sizeof(struct v4l2_plane) * buf->length;
45913 ret = 1;
45914@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45915 ret = -EINVAL;
45916 break;
45917 }
45918- *user_ptr = (void __user *)edid->edid;
45919+ *user_ptr = (void __force_user *)edid->edid;
45920 *kernel_ptr = (void **)&edid->edid;
45921 *array_size = edid->blocks * 128;
45922 ret = 1;
45923@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45924 ret = -EINVAL;
45925 break;
45926 }
45927- *user_ptr = (void __user *)ctrls->controls;
45928+ *user_ptr = (void __force_user *)ctrls->controls;
45929 *kernel_ptr = (void **)&ctrls->controls;
45930 *array_size = sizeof(struct v4l2_ext_control)
45931 * ctrls->count;
45932@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
45933 }
45934
45935 if (has_array_args) {
45936- *kernel_ptr = (void __force *)user_ptr;
45937+ *kernel_ptr = (void __force_kernel *)user_ptr;
45938 if (copy_to_user(user_ptr, mbuf, array_size))
45939 err = -EFAULT;
45940 goto out_array_args;
45941diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
45942index 24696f5..3637780 100644
45943--- a/drivers/memory/omap-gpmc.c
45944+++ b/drivers/memory/omap-gpmc.c
45945@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
45946 };
45947
45948 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
45949-static struct irq_chip gpmc_irq_chip;
45950 static int gpmc_irq_start;
45951
45952 static struct resource gpmc_mem_root;
45953@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
45954
45955 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
45956
45957+static struct irq_chip gpmc_irq_chip = {
45958+ .name = "gpmc",
45959+ .irq_startup = gpmc_irq_noop_ret,
45960+ .irq_enable = gpmc_irq_enable,
45961+ .irq_disable = gpmc_irq_disable,
45962+ .irq_shutdown = gpmc_irq_noop,
45963+ .irq_ack = gpmc_irq_noop,
45964+ .irq_mask = gpmc_irq_noop,
45965+ .irq_unmask = gpmc_irq_noop,
45966+};
45967+
45968 static int gpmc_setup_irq(void)
45969 {
45970 int i;
45971@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
45972 return gpmc_irq_start;
45973 }
45974
45975- gpmc_irq_chip.name = "gpmc";
45976- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
45977- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
45978- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
45979- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
45980- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
45981- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
45982- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
45983-
45984 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
45985 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
45986
45987diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
45988index 187f836..679544b 100644
45989--- a/drivers/message/fusion/mptbase.c
45990+++ b/drivers/message/fusion/mptbase.c
45991@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
45992 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
45993 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
45994
45995+#ifdef CONFIG_GRKERNSEC_HIDESYM
45996+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
45997+#else
45998 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
45999 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46000+#endif
46001+
46002 /*
46003 * Rounding UP to nearest 4-kB boundary here...
46004 */
46005@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46006 ioc->facts.GlobalCredits);
46007
46008 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46009+#ifdef CONFIG_GRKERNSEC_HIDESYM
46010+ NULL, NULL);
46011+#else
46012 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46013+#endif
46014 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46015 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46016 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46017diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46018index 5bdaae1..eced16f 100644
46019--- a/drivers/message/fusion/mptsas.c
46020+++ b/drivers/message/fusion/mptsas.c
46021@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46022 return 0;
46023 }
46024
46025+static inline void
46026+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46027+{
46028+ if (phy_info->port_details) {
46029+ phy_info->port_details->rphy = rphy;
46030+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46031+ ioc->name, rphy));
46032+ }
46033+
46034+ if (rphy) {
46035+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46036+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46037+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46038+ ioc->name, rphy, rphy->dev.release));
46039+ }
46040+}
46041+
46042 /* no mutex */
46043 static void
46044 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46045@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46046 return NULL;
46047 }
46048
46049-static inline void
46050-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46051-{
46052- if (phy_info->port_details) {
46053- phy_info->port_details->rphy = rphy;
46054- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46055- ioc->name, rphy));
46056- }
46057-
46058- if (rphy) {
46059- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46060- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46061- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46062- ioc->name, rphy, rphy->dev.release));
46063- }
46064-}
46065-
46066 static inline struct sas_port *
46067 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46068 {
46069diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46070index b7d87cd..3fb36da 100644
46071--- a/drivers/message/i2o/i2o_proc.c
46072+++ b/drivers/message/i2o/i2o_proc.c
46073@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46074 "Array Controller Device"
46075 };
46076
46077-static char *chtostr(char *tmp, u8 *chars, int n)
46078-{
46079- tmp[0] = 0;
46080- return strncat(tmp, (char *)chars, n);
46081-}
46082-
46083 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46084 char *group)
46085 {
46086@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46087 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46088 {
46089 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46090- static u32 work32[5];
46091- static u8 *work8 = (u8 *) work32;
46092- static u16 *work16 = (u16 *) work32;
46093+ u32 work32[5];
46094+ u8 *work8 = (u8 *) work32;
46095+ u16 *work16 = (u16 *) work32;
46096 int token;
46097 u32 hwcap;
46098
46099@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46100 } *result;
46101
46102 i2o_exec_execute_ddm_table ddm_table;
46103- char tmp[28 + 1];
46104
46105 result = kmalloc(sizeof(*result), GFP_KERNEL);
46106 if (!result)
46107@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46108
46109 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46110 seq_printf(seq, "%-#8x", ddm_table.module_id);
46111- seq_printf(seq, "%-29s",
46112- chtostr(tmp, ddm_table.module_name_version, 28));
46113+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46114 seq_printf(seq, "%9d ", ddm_table.data_size);
46115 seq_printf(seq, "%8d", ddm_table.code_size);
46116
46117@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46118
46119 i2o_driver_result_table *result;
46120 i2o_driver_store_table *dst;
46121- char tmp[28 + 1];
46122
46123 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46124 if (result == NULL)
46125@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46126
46127 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46128 seq_printf(seq, "%-#8x", dst->module_id);
46129- seq_printf(seq, "%-29s",
46130- chtostr(tmp, dst->module_name_version, 28));
46131- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46132+ seq_printf(seq, "%-.28s", dst->module_name_version);
46133+ seq_printf(seq, "%-.8s", dst->date);
46134 seq_printf(seq, "%8d ", dst->module_size);
46135 seq_printf(seq, "%8d ", dst->mpb_size);
46136 seq_printf(seq, "0x%04x", dst->module_flags);
46137@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46138 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46139 {
46140 struct i2o_device *d = (struct i2o_device *)seq->private;
46141- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46142+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46143 // == (allow) 512d bytes (max)
46144- static u16 *work16 = (u16 *) work32;
46145+ u16 *work16 = (u16 *) work32;
46146 int token;
46147- char tmp[16 + 1];
46148
46149 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46150
46151@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46152 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46153 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46154 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46155- seq_printf(seq, "Vendor info : %s\n",
46156- chtostr(tmp, (u8 *) (work32 + 2), 16));
46157- seq_printf(seq, "Product info : %s\n",
46158- chtostr(tmp, (u8 *) (work32 + 6), 16));
46159- seq_printf(seq, "Description : %s\n",
46160- chtostr(tmp, (u8 *) (work32 + 10), 16));
46161- seq_printf(seq, "Product rev. : %s\n",
46162- chtostr(tmp, (u8 *) (work32 + 14), 8));
46163+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46164+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46165+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46166+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46167
46168 seq_printf(seq, "Serial number : ");
46169 print_serial_number(seq, (u8 *) (work32 + 16),
46170@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46171 u8 pad[256]; // allow up to 256 byte (max) serial number
46172 } result;
46173
46174- char tmp[24 + 1];
46175-
46176 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46177
46178 if (token < 0) {
46179@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46180 }
46181
46182 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46183- seq_printf(seq, "Module name : %s\n",
46184- chtostr(tmp, result.module_name, 24));
46185- seq_printf(seq, "Module revision : %s\n",
46186- chtostr(tmp, result.module_rev, 8));
46187+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46188+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46189
46190 seq_printf(seq, "Serial number : ");
46191 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46192@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46193 u8 instance_number[4];
46194 } result;
46195
46196- char tmp[64 + 1];
46197-
46198 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46199
46200 if (token < 0) {
46201@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46202 return 0;
46203 }
46204
46205- seq_printf(seq, "Device name : %s\n",
46206- chtostr(tmp, result.device_name, 64));
46207- seq_printf(seq, "Service name : %s\n",
46208- chtostr(tmp, result.service_name, 64));
46209- seq_printf(seq, "Physical name : %s\n",
46210- chtostr(tmp, result.physical_location, 64));
46211- seq_printf(seq, "Instance number : %s\n",
46212- chtostr(tmp, result.instance_number, 4));
46213+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46214+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46215+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46216+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46217
46218 return 0;
46219 }
46220@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46221 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46222 {
46223 struct i2o_device *d = (struct i2o_device *)seq->private;
46224- static u32 work32[12];
46225- static u16 *work16 = (u16 *) work32;
46226- static u8 *work8 = (u8 *) work32;
46227+ u32 work32[12];
46228+ u16 *work16 = (u16 *) work32;
46229+ u8 *work8 = (u8 *) work32;
46230 int token;
46231
46232 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46233diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46234index 92752fb..a7494f6 100644
46235--- a/drivers/message/i2o/iop.c
46236+++ b/drivers/message/i2o/iop.c
46237@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46238
46239 spin_lock_irqsave(&c->context_list_lock, flags);
46240
46241- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46242- atomic_inc(&c->context_list_counter);
46243+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46244+ atomic_inc_unchecked(&c->context_list_counter);
46245
46246- entry->context = atomic_read(&c->context_list_counter);
46247+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46248
46249 list_add(&entry->list, &c->context_list);
46250
46251@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46252
46253 #if BITS_PER_LONG == 64
46254 spin_lock_init(&c->context_list_lock);
46255- atomic_set(&c->context_list_counter, 0);
46256+ atomic_set_unchecked(&c->context_list_counter, 0);
46257 INIT_LIST_HEAD(&c->context_list);
46258 #endif
46259
46260diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46261index 9a8e185..27ff17d 100644
46262--- a/drivers/mfd/ab8500-debugfs.c
46263+++ b/drivers/mfd/ab8500-debugfs.c
46264@@ -100,7 +100,7 @@ static int irq_last;
46265 static u32 *irq_count;
46266 static int num_irqs;
46267
46268-static struct device_attribute **dev_attr;
46269+static device_attribute_no_const **dev_attr;
46270 static char **event_name;
46271
46272 static u8 avg_sample = SAMPLE_16;
46273diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46274index c880c89..45a7c68 100644
46275--- a/drivers/mfd/max8925-i2c.c
46276+++ b/drivers/mfd/max8925-i2c.c
46277@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46278 const struct i2c_device_id *id)
46279 {
46280 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46281- static struct max8925_chip *chip;
46282+ struct max8925_chip *chip;
46283 struct device_node *node = client->dev.of_node;
46284
46285 if (node && !pdata) {
46286diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46287index 7612d89..70549c2 100644
46288--- a/drivers/mfd/tps65910.c
46289+++ b/drivers/mfd/tps65910.c
46290@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46291 struct tps65910_platform_data *pdata)
46292 {
46293 int ret = 0;
46294- static struct regmap_irq_chip *tps6591x_irqs_chip;
46295+ struct regmap_irq_chip *tps6591x_irqs_chip;
46296
46297 if (!irq) {
46298 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46299diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46300index 1b772ef..01e77d33 100644
46301--- a/drivers/mfd/twl4030-irq.c
46302+++ b/drivers/mfd/twl4030-irq.c
46303@@ -34,6 +34,7 @@
46304 #include <linux/of.h>
46305 #include <linux/irqdomain.h>
46306 #include <linux/i2c/twl.h>
46307+#include <asm/pgtable.h>
46308
46309 #include "twl-core.h"
46310
46311@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46312 * Install an irq handler for each of the SIH modules;
46313 * clone dummy irq_chip since PIH can't *do* anything
46314 */
46315- twl4030_irq_chip = dummy_irq_chip;
46316- twl4030_irq_chip.name = "twl4030";
46317+ pax_open_kernel();
46318+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46319+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46320
46321- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46322+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46323+ pax_close_kernel();
46324
46325 for (i = irq_base; i < irq_end; i++) {
46326 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46327diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46328index 464419b..64bae8d 100644
46329--- a/drivers/misc/c2port/core.c
46330+++ b/drivers/misc/c2port/core.c
46331@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46332 goto error_idr_alloc;
46333 c2dev->id = ret;
46334
46335- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46336+ pax_open_kernel();
46337+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46338+ pax_close_kernel();
46339
46340 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46341 "c2port%d", c2dev->id);
46342diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46343index 8385177..2f54635 100644
46344--- a/drivers/misc/eeprom/sunxi_sid.c
46345+++ b/drivers/misc/eeprom/sunxi_sid.c
46346@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46347
46348 platform_set_drvdata(pdev, sid_data);
46349
46350- sid_bin_attr.size = sid_data->keysize;
46351+ pax_open_kernel();
46352+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46353+ pax_close_kernel();
46354 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46355 return -ENODEV;
46356
46357diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46358index 36f5d52..32311c3 100644
46359--- a/drivers/misc/kgdbts.c
46360+++ b/drivers/misc/kgdbts.c
46361@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46362 char before[BREAK_INSTR_SIZE];
46363 char after[BREAK_INSTR_SIZE];
46364
46365- probe_kernel_read(before, (char *)kgdbts_break_test,
46366+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46367 BREAK_INSTR_SIZE);
46368 init_simple_test();
46369 ts.tst = plant_and_detach_test;
46370@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46371 /* Activate test with initial breakpoint */
46372 if (!is_early)
46373 kgdb_breakpoint();
46374- probe_kernel_read(after, (char *)kgdbts_break_test,
46375+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46376 BREAK_INSTR_SIZE);
46377 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46378 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46379diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46380index 3ef4627..8d00486 100644
46381--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46382+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46383@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46384 * the lid is closed. This leads to interrupts as soon as a little move
46385 * is done.
46386 */
46387- atomic_inc(&lis3->count);
46388+ atomic_inc_unchecked(&lis3->count);
46389
46390 wake_up_interruptible(&lis3->misc_wait);
46391 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46392@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46393 if (lis3->pm_dev)
46394 pm_runtime_get_sync(lis3->pm_dev);
46395
46396- atomic_set(&lis3->count, 0);
46397+ atomic_set_unchecked(&lis3->count, 0);
46398 return 0;
46399 }
46400
46401@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46402 add_wait_queue(&lis3->misc_wait, &wait);
46403 while (true) {
46404 set_current_state(TASK_INTERRUPTIBLE);
46405- data = atomic_xchg(&lis3->count, 0);
46406+ data = atomic_xchg_unchecked(&lis3->count, 0);
46407 if (data)
46408 break;
46409
46410@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46411 struct lis3lv02d, miscdev);
46412
46413 poll_wait(file, &lis3->misc_wait, wait);
46414- if (atomic_read(&lis3->count))
46415+ if (atomic_read_unchecked(&lis3->count))
46416 return POLLIN | POLLRDNORM;
46417 return 0;
46418 }
46419diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46420index c439c82..1f20f57 100644
46421--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46422+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46423@@ -297,7 +297,7 @@ struct lis3lv02d {
46424 struct input_polled_dev *idev; /* input device */
46425 struct platform_device *pdev; /* platform device */
46426 struct regulator_bulk_data regulators[2];
46427- atomic_t count; /* interrupt count after last read */
46428+ atomic_unchecked_t count; /* interrupt count after last read */
46429 union axis_conversion ac; /* hw -> logical axis */
46430 int mapped_btns[3];
46431
46432diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46433index 2f30bad..c4c13d0 100644
46434--- a/drivers/misc/sgi-gru/gruhandles.c
46435+++ b/drivers/misc/sgi-gru/gruhandles.c
46436@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46437 unsigned long nsec;
46438
46439 nsec = CLKS2NSEC(clks);
46440- atomic_long_inc(&mcs_op_statistics[op].count);
46441- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46442+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46443+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46444 if (mcs_op_statistics[op].max < nsec)
46445 mcs_op_statistics[op].max = nsec;
46446 }
46447diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46448index 4f76359..cdfcb2e 100644
46449--- a/drivers/misc/sgi-gru/gruprocfs.c
46450+++ b/drivers/misc/sgi-gru/gruprocfs.c
46451@@ -32,9 +32,9 @@
46452
46453 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46454
46455-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46456+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46457 {
46458- unsigned long val = atomic_long_read(v);
46459+ unsigned long val = atomic_long_read_unchecked(v);
46460
46461 seq_printf(s, "%16lu %s\n", val, id);
46462 }
46463@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46464
46465 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46466 for (op = 0; op < mcsop_last; op++) {
46467- count = atomic_long_read(&mcs_op_statistics[op].count);
46468- total = atomic_long_read(&mcs_op_statistics[op].total);
46469+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46470+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46471 max = mcs_op_statistics[op].max;
46472 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46473 count ? total / count : 0, max);
46474diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46475index 5c3ce24..4915ccb 100644
46476--- a/drivers/misc/sgi-gru/grutables.h
46477+++ b/drivers/misc/sgi-gru/grutables.h
46478@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46479 * GRU statistics.
46480 */
46481 struct gru_stats_s {
46482- atomic_long_t vdata_alloc;
46483- atomic_long_t vdata_free;
46484- atomic_long_t gts_alloc;
46485- atomic_long_t gts_free;
46486- atomic_long_t gms_alloc;
46487- atomic_long_t gms_free;
46488- atomic_long_t gts_double_allocate;
46489- atomic_long_t assign_context;
46490- atomic_long_t assign_context_failed;
46491- atomic_long_t free_context;
46492- atomic_long_t load_user_context;
46493- atomic_long_t load_kernel_context;
46494- atomic_long_t lock_kernel_context;
46495- atomic_long_t unlock_kernel_context;
46496- atomic_long_t steal_user_context;
46497- atomic_long_t steal_kernel_context;
46498- atomic_long_t steal_context_failed;
46499- atomic_long_t nopfn;
46500- atomic_long_t asid_new;
46501- atomic_long_t asid_next;
46502- atomic_long_t asid_wrap;
46503- atomic_long_t asid_reuse;
46504- atomic_long_t intr;
46505- atomic_long_t intr_cbr;
46506- atomic_long_t intr_tfh;
46507- atomic_long_t intr_spurious;
46508- atomic_long_t intr_mm_lock_failed;
46509- atomic_long_t call_os;
46510- atomic_long_t call_os_wait_queue;
46511- atomic_long_t user_flush_tlb;
46512- atomic_long_t user_unload_context;
46513- atomic_long_t user_exception;
46514- atomic_long_t set_context_option;
46515- atomic_long_t check_context_retarget_intr;
46516- atomic_long_t check_context_unload;
46517- atomic_long_t tlb_dropin;
46518- atomic_long_t tlb_preload_page;
46519- atomic_long_t tlb_dropin_fail_no_asid;
46520- atomic_long_t tlb_dropin_fail_upm;
46521- atomic_long_t tlb_dropin_fail_invalid;
46522- atomic_long_t tlb_dropin_fail_range_active;
46523- atomic_long_t tlb_dropin_fail_idle;
46524- atomic_long_t tlb_dropin_fail_fmm;
46525- atomic_long_t tlb_dropin_fail_no_exception;
46526- atomic_long_t tfh_stale_on_fault;
46527- atomic_long_t mmu_invalidate_range;
46528- atomic_long_t mmu_invalidate_page;
46529- atomic_long_t flush_tlb;
46530- atomic_long_t flush_tlb_gru;
46531- atomic_long_t flush_tlb_gru_tgh;
46532- atomic_long_t flush_tlb_gru_zero_asid;
46533+ atomic_long_unchecked_t vdata_alloc;
46534+ atomic_long_unchecked_t vdata_free;
46535+ atomic_long_unchecked_t gts_alloc;
46536+ atomic_long_unchecked_t gts_free;
46537+ atomic_long_unchecked_t gms_alloc;
46538+ atomic_long_unchecked_t gms_free;
46539+ atomic_long_unchecked_t gts_double_allocate;
46540+ atomic_long_unchecked_t assign_context;
46541+ atomic_long_unchecked_t assign_context_failed;
46542+ atomic_long_unchecked_t free_context;
46543+ atomic_long_unchecked_t load_user_context;
46544+ atomic_long_unchecked_t load_kernel_context;
46545+ atomic_long_unchecked_t lock_kernel_context;
46546+ atomic_long_unchecked_t unlock_kernel_context;
46547+ atomic_long_unchecked_t steal_user_context;
46548+ atomic_long_unchecked_t steal_kernel_context;
46549+ atomic_long_unchecked_t steal_context_failed;
46550+ atomic_long_unchecked_t nopfn;
46551+ atomic_long_unchecked_t asid_new;
46552+ atomic_long_unchecked_t asid_next;
46553+ atomic_long_unchecked_t asid_wrap;
46554+ atomic_long_unchecked_t asid_reuse;
46555+ atomic_long_unchecked_t intr;
46556+ atomic_long_unchecked_t intr_cbr;
46557+ atomic_long_unchecked_t intr_tfh;
46558+ atomic_long_unchecked_t intr_spurious;
46559+ atomic_long_unchecked_t intr_mm_lock_failed;
46560+ atomic_long_unchecked_t call_os;
46561+ atomic_long_unchecked_t call_os_wait_queue;
46562+ atomic_long_unchecked_t user_flush_tlb;
46563+ atomic_long_unchecked_t user_unload_context;
46564+ atomic_long_unchecked_t user_exception;
46565+ atomic_long_unchecked_t set_context_option;
46566+ atomic_long_unchecked_t check_context_retarget_intr;
46567+ atomic_long_unchecked_t check_context_unload;
46568+ atomic_long_unchecked_t tlb_dropin;
46569+ atomic_long_unchecked_t tlb_preload_page;
46570+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46571+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46572+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46573+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46574+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46575+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46576+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46577+ atomic_long_unchecked_t tfh_stale_on_fault;
46578+ atomic_long_unchecked_t mmu_invalidate_range;
46579+ atomic_long_unchecked_t mmu_invalidate_page;
46580+ atomic_long_unchecked_t flush_tlb;
46581+ atomic_long_unchecked_t flush_tlb_gru;
46582+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46583+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46584
46585- atomic_long_t copy_gpa;
46586- atomic_long_t read_gpa;
46587+ atomic_long_unchecked_t copy_gpa;
46588+ atomic_long_unchecked_t read_gpa;
46589
46590- atomic_long_t mesq_receive;
46591- atomic_long_t mesq_receive_none;
46592- atomic_long_t mesq_send;
46593- atomic_long_t mesq_send_failed;
46594- atomic_long_t mesq_noop;
46595- atomic_long_t mesq_send_unexpected_error;
46596- atomic_long_t mesq_send_lb_overflow;
46597- atomic_long_t mesq_send_qlimit_reached;
46598- atomic_long_t mesq_send_amo_nacked;
46599- atomic_long_t mesq_send_put_nacked;
46600- atomic_long_t mesq_page_overflow;
46601- atomic_long_t mesq_qf_locked;
46602- atomic_long_t mesq_qf_noop_not_full;
46603- atomic_long_t mesq_qf_switch_head_failed;
46604- atomic_long_t mesq_qf_unexpected_error;
46605- atomic_long_t mesq_noop_unexpected_error;
46606- atomic_long_t mesq_noop_lb_overflow;
46607- atomic_long_t mesq_noop_qlimit_reached;
46608- atomic_long_t mesq_noop_amo_nacked;
46609- atomic_long_t mesq_noop_put_nacked;
46610- atomic_long_t mesq_noop_page_overflow;
46611+ atomic_long_unchecked_t mesq_receive;
46612+ atomic_long_unchecked_t mesq_receive_none;
46613+ atomic_long_unchecked_t mesq_send;
46614+ atomic_long_unchecked_t mesq_send_failed;
46615+ atomic_long_unchecked_t mesq_noop;
46616+ atomic_long_unchecked_t mesq_send_unexpected_error;
46617+ atomic_long_unchecked_t mesq_send_lb_overflow;
46618+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46619+ atomic_long_unchecked_t mesq_send_amo_nacked;
46620+ atomic_long_unchecked_t mesq_send_put_nacked;
46621+ atomic_long_unchecked_t mesq_page_overflow;
46622+ atomic_long_unchecked_t mesq_qf_locked;
46623+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46624+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46625+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46626+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46627+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46628+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46629+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46630+ atomic_long_unchecked_t mesq_noop_put_nacked;
46631+ atomic_long_unchecked_t mesq_noop_page_overflow;
46632
46633 };
46634
46635@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46636 tghop_invalidate, mcsop_last};
46637
46638 struct mcs_op_statistic {
46639- atomic_long_t count;
46640- atomic_long_t total;
46641+ atomic_long_unchecked_t count;
46642+ atomic_long_unchecked_t total;
46643 unsigned long max;
46644 };
46645
46646@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46647
46648 #define STAT(id) do { \
46649 if (gru_options & OPT_STATS) \
46650- atomic_long_inc(&gru_stats.id); \
46651+ atomic_long_inc_unchecked(&gru_stats.id); \
46652 } while (0)
46653
46654 #ifdef CONFIG_SGI_GRU_DEBUG
46655diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46656index c862cd4..0d176fe 100644
46657--- a/drivers/misc/sgi-xp/xp.h
46658+++ b/drivers/misc/sgi-xp/xp.h
46659@@ -288,7 +288,7 @@ struct xpc_interface {
46660 xpc_notify_func, void *);
46661 void (*received) (short, int, void *);
46662 enum xp_retval (*partid_to_nasids) (short, void *);
46663-};
46664+} __no_const;
46665
46666 extern struct xpc_interface xpc_interface;
46667
46668diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46669index 01be66d..e3a0c7e 100644
46670--- a/drivers/misc/sgi-xp/xp_main.c
46671+++ b/drivers/misc/sgi-xp/xp_main.c
46672@@ -78,13 +78,13 @@ xpc_notloaded(void)
46673 }
46674
46675 struct xpc_interface xpc_interface = {
46676- (void (*)(int))xpc_notloaded,
46677- (void (*)(int))xpc_notloaded,
46678- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46679- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46680+ .connect = (void (*)(int))xpc_notloaded,
46681+ .disconnect = (void (*)(int))xpc_notloaded,
46682+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46683+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46684 void *))xpc_notloaded,
46685- (void (*)(short, int, void *))xpc_notloaded,
46686- (enum xp_retval(*)(short, void *))xpc_notloaded
46687+ .received = (void (*)(short, int, void *))xpc_notloaded,
46688+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46689 };
46690 EXPORT_SYMBOL_GPL(xpc_interface);
46691
46692diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46693index b94d5f7..7f494c5 100644
46694--- a/drivers/misc/sgi-xp/xpc.h
46695+++ b/drivers/misc/sgi-xp/xpc.h
46696@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46697 void (*received_payload) (struct xpc_channel *, void *);
46698 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46699 };
46700+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46701
46702 /* struct xpc_partition act_state values (for XPC HB) */
46703
46704@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46705 /* found in xpc_main.c */
46706 extern struct device *xpc_part;
46707 extern struct device *xpc_chan;
46708-extern struct xpc_arch_operations xpc_arch_ops;
46709+extern xpc_arch_operations_no_const xpc_arch_ops;
46710 extern int xpc_disengage_timelimit;
46711 extern int xpc_disengage_timedout;
46712 extern int xpc_activate_IRQ_rcvd;
46713diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46714index 82dc574..8539ab2 100644
46715--- a/drivers/misc/sgi-xp/xpc_main.c
46716+++ b/drivers/misc/sgi-xp/xpc_main.c
46717@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46718 .notifier_call = xpc_system_die,
46719 };
46720
46721-struct xpc_arch_operations xpc_arch_ops;
46722+xpc_arch_operations_no_const xpc_arch_ops;
46723
46724 /*
46725 * Timer function to enforce the timelimit on the partition disengage.
46726@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46727
46728 if (((die_args->trapnr == X86_TRAP_MF) ||
46729 (die_args->trapnr == X86_TRAP_XF)) &&
46730- !user_mode_vm(die_args->regs))
46731+ !user_mode(die_args->regs))
46732 xpc_die_deactivate();
46733
46734 break;
46735diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46736index 4409d79..d7766d0 100644
46737--- a/drivers/mmc/card/block.c
46738+++ b/drivers/mmc/card/block.c
46739@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46740 if (idata->ic.postsleep_min_us)
46741 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46742
46743- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46744+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46745 err = -EFAULT;
46746 goto cmd_rel_host;
46747 }
46748diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46749index 0d0f7a2..45b8d60 100644
46750--- a/drivers/mmc/host/dw_mmc.h
46751+++ b/drivers/mmc/host/dw_mmc.h
46752@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
46753 int (*parse_dt)(struct dw_mci *host);
46754 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46755 struct dw_mci_tuning_data *tuning_data);
46756-};
46757+} __do_const;
46758 #endif /* _DW_MMC_H_ */
46759diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46760index 8232e9a..7776006 100644
46761--- a/drivers/mmc/host/mmci.c
46762+++ b/drivers/mmc/host/mmci.c
46763@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
46764 mmc->caps |= MMC_CAP_CMD23;
46765
46766 if (variant->busy_detect) {
46767- mmci_ops.card_busy = mmci_card_busy;
46768+ pax_open_kernel();
46769+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46770+ pax_close_kernel();
46771 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46772 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46773 mmc->max_busy_timeout = 0;
46774diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46775index 7c71dcd..74cb746 100644
46776--- a/drivers/mmc/host/omap_hsmmc.c
46777+++ b/drivers/mmc/host/omap_hsmmc.c
46778@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46779
46780 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46781 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46782- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46783+ pax_open_kernel();
46784+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46785+ pax_close_kernel();
46786 }
46787
46788 pm_runtime_enable(host->dev);
46789diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46790index af1f7c0..00d368a 100644
46791--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46792+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46793@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46794 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46795 }
46796
46797- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46798- sdhci_esdhc_ops.platform_execute_tuning =
46799+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46800+ pax_open_kernel();
46801+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46802 esdhc_executing_tuning;
46803+ pax_close_kernel();
46804+ }
46805
46806 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46807 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46808diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46809index c45b893..fba0144 100644
46810--- a/drivers/mmc/host/sdhci-s3c.c
46811+++ b/drivers/mmc/host/sdhci-s3c.c
46812@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46813 * we can use overriding functions instead of default.
46814 */
46815 if (sc->no_divider) {
46816- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46817- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46818- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46819+ pax_open_kernel();
46820+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46821+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46822+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46823+ pax_close_kernel();
46824 }
46825
46826 /* It supports additional host capabilities if needed */
46827diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46828index 423666b..81ff5eb 100644
46829--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46830+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46831@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46832 size_t totlen = 0, thislen;
46833 int ret = 0;
46834 size_t buflen = 0;
46835- static char *buffer;
46836+ char *buffer;
46837
46838 if (!ECCBUF_SIZE) {
46839 /* We should fall back to a general writev implementation.
46840diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46841index b3b7ca1..5dd4634 100644
46842--- a/drivers/mtd/nand/denali.c
46843+++ b/drivers/mtd/nand/denali.c
46844@@ -24,6 +24,7 @@
46845 #include <linux/slab.h>
46846 #include <linux/mtd/mtd.h>
46847 #include <linux/module.h>
46848+#include <linux/slab.h>
46849
46850 #include "denali.h"
46851
46852diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46853index 4f3851a..f477a23 100644
46854--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46855+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46856@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46857
46858 /* first try to map the upper buffer directly */
46859 if (virt_addr_valid(this->upper_buf) &&
46860- !object_is_on_stack(this->upper_buf)) {
46861+ !object_starts_on_stack(this->upper_buf)) {
46862 sg_init_one(sgl, this->upper_buf, this->upper_len);
46863 ret = dma_map_sg(this->dev, sgl, 1, dr);
46864 if (ret == 0)
46865diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
46866index 51b9d6a..52af9a7 100644
46867--- a/drivers/mtd/nftlmount.c
46868+++ b/drivers/mtd/nftlmount.c
46869@@ -24,6 +24,7 @@
46870 #include <asm/errno.h>
46871 #include <linux/delay.h>
46872 #include <linux/slab.h>
46873+#include <linux/sched.h>
46874 #include <linux/mtd/mtd.h>
46875 #include <linux/mtd/nand.h>
46876 #include <linux/mtd/nftl.h>
46877diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
46878index c23184a..4115c41 100644
46879--- a/drivers/mtd/sm_ftl.c
46880+++ b/drivers/mtd/sm_ftl.c
46881@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
46882 #define SM_CIS_VENDOR_OFFSET 0x59
46883 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
46884 {
46885- struct attribute_group *attr_group;
46886+ attribute_group_no_const *attr_group;
46887 struct attribute **attributes;
46888 struct sm_sysfs_attribute *vendor_attribute;
46889 char *vendor;
46890diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
46891index 7b11243..b3278a3 100644
46892--- a/drivers/net/bonding/bond_netlink.c
46893+++ b/drivers/net/bonding/bond_netlink.c
46894@@ -585,7 +585,7 @@ nla_put_failure:
46895 return -EMSGSIZE;
46896 }
46897
46898-struct rtnl_link_ops bond_link_ops __read_mostly = {
46899+struct rtnl_link_ops bond_link_ops = {
46900 .kind = "bond",
46901 .priv_size = sizeof(struct bonding),
46902 .setup = bond_setup,
46903diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
46904index b3b922a..80bba38 100644
46905--- a/drivers/net/caif/caif_hsi.c
46906+++ b/drivers/net/caif/caif_hsi.c
46907@@ -1444,7 +1444,7 @@ err:
46908 return -ENODEV;
46909 }
46910
46911-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
46912+static struct rtnl_link_ops caif_hsi_link_ops = {
46913 .kind = "cfhsi",
46914 .priv_size = sizeof(struct cfhsi),
46915 .setup = cfhsi_setup,
46916diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
46917index 98d73aa..63ef9da 100644
46918--- a/drivers/net/can/Kconfig
46919+++ b/drivers/net/can/Kconfig
46920@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
46921
46922 config CAN_FLEXCAN
46923 tristate "Support for Freescale FLEXCAN based chips"
46924- depends on ARM || PPC
46925+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
46926 ---help---
46927 Say Y here if you want to support for Freescale FlexCAN.
46928
46929diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
46930index 847c1f8..69a0df3 100644
46931--- a/drivers/net/can/dev.c
46932+++ b/drivers/net/can/dev.c
46933@@ -950,7 +950,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
46934 return -EOPNOTSUPP;
46935 }
46936
46937-static struct rtnl_link_ops can_link_ops __read_mostly = {
46938+static struct rtnl_link_ops can_link_ops = {
46939 .kind = "can",
46940 .maxtype = IFLA_CAN_MAX,
46941 .policy = can_policy,
46942diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
46943index 674f367..ec3a31f 100644
46944--- a/drivers/net/can/vcan.c
46945+++ b/drivers/net/can/vcan.c
46946@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
46947 dev->destructor = free_netdev;
46948 }
46949
46950-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
46951+static struct rtnl_link_ops vcan_link_ops = {
46952 .kind = "vcan",
46953 .setup = vcan_setup,
46954 };
46955diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
46956index 49adbf1..fff7ff8 100644
46957--- a/drivers/net/dummy.c
46958+++ b/drivers/net/dummy.c
46959@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
46960 return 0;
46961 }
46962
46963-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
46964+static struct rtnl_link_ops dummy_link_ops = {
46965 .kind = DRV_NAME,
46966 .setup = dummy_setup,
46967 .validate = dummy_validate,
46968diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
46969index 0443654..4f0aa18 100644
46970--- a/drivers/net/ethernet/8390/ax88796.c
46971+++ b/drivers/net/ethernet/8390/ax88796.c
46972@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
46973 if (ax->plat->reg_offsets)
46974 ei_local->reg_offset = ax->plat->reg_offsets;
46975 else {
46976+ resource_size_t _mem_size = mem_size;
46977+ do_div(_mem_size, 0x18);
46978 ei_local->reg_offset = ax->reg_offsets;
46979 for (ret = 0; ret < 0x18; ret++)
46980- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
46981+ ax->reg_offsets[ret] = _mem_size * ret;
46982 }
46983
46984 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
46985diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
46986index 760c72c..a99728c 100644
46987--- a/drivers/net/ethernet/altera/altera_tse_main.c
46988+++ b/drivers/net/ethernet/altera/altera_tse_main.c
46989@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
46990 return 0;
46991 }
46992
46993-static struct net_device_ops altera_tse_netdev_ops = {
46994+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
46995 .ndo_open = tse_open,
46996 .ndo_stop = tse_shutdown,
46997 .ndo_start_xmit = tse_start_xmit,
46998@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
46999 ndev->netdev_ops = &altera_tse_netdev_ops;
47000 altera_tse_set_ethtool_ops(ndev);
47001
47002+ pax_open_kernel();
47003 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47004
47005 if (priv->hash_filter)
47006 altera_tse_netdev_ops.ndo_set_rx_mode =
47007 tse_set_rx_mode_hashfilter;
47008+ pax_close_kernel();
47009
47010 /* Scatter/gather IO is not supported,
47011 * so it is turned off
47012diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47013index 29a0927..5a348e24 100644
47014--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47015+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47016@@ -1122,14 +1122,14 @@ do { \
47017 * operations, everything works on mask values.
47018 */
47019 #define XMDIO_READ(_pdata, _mmd, _reg) \
47020- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47021+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47022 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47023
47024 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47025 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47026
47027 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47028- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47029+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47030 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47031
47032 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47033diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47034index 8a50b01..39c1ad0 100644
47035--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47036+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47037@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47038
47039 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47040
47041- pdata->hw_if.config_dcb_tc(pdata);
47042+ pdata->hw_if->config_dcb_tc(pdata);
47043
47044 return 0;
47045 }
47046@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47047
47048 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47049
47050- pdata->hw_if.config_dcb_pfc(pdata);
47051+ pdata->hw_if->config_dcb_pfc(pdata);
47052
47053 return 0;
47054 }
47055diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47056index a50891f..b26fe24 100644
47057--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47058+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47059@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47060
47061 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47062 {
47063- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47064+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47065 struct xgbe_channel *channel;
47066 struct xgbe_ring *ring;
47067 struct xgbe_ring_data *rdata;
47068@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47069
47070 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47071 {
47072- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47073+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47074 struct xgbe_channel *channel;
47075 struct xgbe_ring *ring;
47076 struct xgbe_ring_desc *rdesc;
47077@@ -624,7 +624,7 @@ err_out:
47078 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47079 {
47080 struct xgbe_prv_data *pdata = channel->pdata;
47081- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47082+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47083 struct xgbe_ring *ring = channel->rx_ring;
47084 struct xgbe_ring_data *rdata;
47085 int i;
47086@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47087 DBGPR("<--xgbe_realloc_rx_buffer\n");
47088 }
47089
47090-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47091-{
47092- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47093-
47094- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47095- desc_if->free_ring_resources = xgbe_free_ring_resources;
47096- desc_if->map_tx_skb = xgbe_map_tx_skb;
47097- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
47098- desc_if->unmap_rdata = xgbe_unmap_rdata;
47099- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47100- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47101-
47102- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47103-}
47104+const struct xgbe_desc_if default_xgbe_desc_if = {
47105+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47106+ .free_ring_resources = xgbe_free_ring_resources,
47107+ .map_tx_skb = xgbe_map_tx_skb,
47108+ .realloc_rx_buffer = xgbe_realloc_rx_buffer,
47109+ .unmap_rdata = xgbe_unmap_rdata,
47110+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47111+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47112+};
47113diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47114index 4c66cd1..1a20aab 100644
47115--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47116+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47117@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47118
47119 static int xgbe_init(struct xgbe_prv_data *pdata)
47120 {
47121- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47122+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47123 int ret;
47124
47125 DBGPR("-->xgbe_init\n");
47126@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47127 return 0;
47128 }
47129
47130-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47131-{
47132- DBGPR("-->xgbe_init_function_ptrs\n");
47133-
47134- hw_if->tx_complete = xgbe_tx_complete;
47135-
47136- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47137- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47138- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47139- hw_if->set_mac_address = xgbe_set_mac_address;
47140-
47141- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47142- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47143-
47144- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47145- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47146- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47147- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47148- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47149-
47150- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47151- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47152-
47153- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47154- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47155- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47156-
47157- hw_if->enable_tx = xgbe_enable_tx;
47158- hw_if->disable_tx = xgbe_disable_tx;
47159- hw_if->enable_rx = xgbe_enable_rx;
47160- hw_if->disable_rx = xgbe_disable_rx;
47161-
47162- hw_if->powerup_tx = xgbe_powerup_tx;
47163- hw_if->powerdown_tx = xgbe_powerdown_tx;
47164- hw_if->powerup_rx = xgbe_powerup_rx;
47165- hw_if->powerdown_rx = xgbe_powerdown_rx;
47166-
47167- hw_if->dev_xmit = xgbe_dev_xmit;
47168- hw_if->dev_read = xgbe_dev_read;
47169- hw_if->enable_int = xgbe_enable_int;
47170- hw_if->disable_int = xgbe_disable_int;
47171- hw_if->init = xgbe_init;
47172- hw_if->exit = xgbe_exit;
47173+const struct xgbe_hw_if default_xgbe_hw_if = {
47174+ .tx_complete = xgbe_tx_complete,
47175+
47176+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47177+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47178+ .add_mac_addresses = xgbe_add_mac_addresses,
47179+ .set_mac_address = xgbe_set_mac_address,
47180+
47181+ .enable_rx_csum = xgbe_enable_rx_csum,
47182+ .disable_rx_csum = xgbe_disable_rx_csum,
47183+
47184+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47185+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47186+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47187+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47188+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47189+
47190+ .read_mmd_regs = xgbe_read_mmd_regs,
47191+ .write_mmd_regs = xgbe_write_mmd_regs,
47192+
47193+ .set_gmii_speed = xgbe_set_gmii_speed,
47194+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47195+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47196+
47197+ .enable_tx = xgbe_enable_tx,
47198+ .disable_tx = xgbe_disable_tx,
47199+ .enable_rx = xgbe_enable_rx,
47200+ .disable_rx = xgbe_disable_rx,
47201+
47202+ .powerup_tx = xgbe_powerup_tx,
47203+ .powerdown_tx = xgbe_powerdown_tx,
47204+ .powerup_rx = xgbe_powerup_rx,
47205+ .powerdown_rx = xgbe_powerdown_rx,
47206+
47207+ .dev_xmit = xgbe_dev_xmit,
47208+ .dev_read = xgbe_dev_read,
47209+ .enable_int = xgbe_enable_int,
47210+ .disable_int = xgbe_disable_int,
47211+ .init = xgbe_init,
47212+ .exit = xgbe_exit,
47213
47214 /* Descriptor related Sequences have to be initialized here */
47215- hw_if->tx_desc_init = xgbe_tx_desc_init;
47216- hw_if->rx_desc_init = xgbe_rx_desc_init;
47217- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47218- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47219- hw_if->is_last_desc = xgbe_is_last_desc;
47220- hw_if->is_context_desc = xgbe_is_context_desc;
47221- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47222+ .tx_desc_init = xgbe_tx_desc_init,
47223+ .rx_desc_init = xgbe_rx_desc_init,
47224+ .tx_desc_reset = xgbe_tx_desc_reset,
47225+ .rx_desc_reset = xgbe_rx_desc_reset,
47226+ .is_last_desc = xgbe_is_last_desc,
47227+ .is_context_desc = xgbe_is_context_desc,
47228+ .tx_start_xmit = xgbe_tx_start_xmit,
47229
47230 /* For FLOW ctrl */
47231- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47232- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47233+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47234+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47235
47236 /* For RX coalescing */
47237- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47238- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47239- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47240- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47241+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47242+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47243+ .usec_to_riwt = xgbe_usec_to_riwt,
47244+ .riwt_to_usec = xgbe_riwt_to_usec,
47245
47246 /* For RX and TX threshold config */
47247- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47248- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47249+ .config_rx_threshold = xgbe_config_rx_threshold,
47250+ .config_tx_threshold = xgbe_config_tx_threshold,
47251
47252 /* For RX and TX Store and Forward Mode config */
47253- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47254- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47255+ .config_rsf_mode = xgbe_config_rsf_mode,
47256+ .config_tsf_mode = xgbe_config_tsf_mode,
47257
47258 /* For TX DMA Operating on Second Frame config */
47259- hw_if->config_osp_mode = xgbe_config_osp_mode;
47260+ .config_osp_mode = xgbe_config_osp_mode,
47261
47262 /* For RX and TX PBL config */
47263- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47264- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47265- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47266- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47267- hw_if->config_pblx8 = xgbe_config_pblx8;
47268+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47269+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47270+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47271+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47272+ .config_pblx8 = xgbe_config_pblx8,
47273
47274 /* For MMC statistics support */
47275- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47276- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47277- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47278+ .tx_mmc_int = xgbe_tx_mmc_int,
47279+ .rx_mmc_int = xgbe_rx_mmc_int,
47280+ .read_mmc_stats = xgbe_read_mmc_stats,
47281
47282 /* For PTP config */
47283- hw_if->config_tstamp = xgbe_config_tstamp;
47284- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47285- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47286- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47287- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47288+ .config_tstamp = xgbe_config_tstamp,
47289+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47290+ .set_tstamp_time = xgbe_set_tstamp_time,
47291+ .get_tstamp_time = xgbe_get_tstamp_time,
47292+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47293
47294 /* For Data Center Bridging config */
47295- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47296- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47297+ .config_dcb_tc = xgbe_config_dcb_tc,
47298+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47299
47300 /* For Receive Side Scaling */
47301- hw_if->enable_rss = xgbe_enable_rss;
47302- hw_if->disable_rss = xgbe_disable_rss;
47303- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47304- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47305-
47306- DBGPR("<--xgbe_init_function_ptrs\n");
47307-}
47308+ .enable_rss = xgbe_enable_rss,
47309+ .disable_rss = xgbe_disable_rss,
47310+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47311+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47312+};
47313diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47314index e5ffb2c..e56d30b 100644
47315--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47316+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47317@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47318 * support, tell it now
47319 */
47320 if (ring->tx.xmit_more)
47321- pdata->hw_if.tx_start_xmit(channel, ring);
47322+ pdata->hw_if->tx_start_xmit(channel, ring);
47323
47324 return NETDEV_TX_BUSY;
47325 }
47326@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47327
47328 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47329 {
47330- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47331+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47332 struct xgbe_channel *channel;
47333 enum xgbe_int int_id;
47334 unsigned int i;
47335@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47336
47337 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47338 {
47339- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47340+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47341 struct xgbe_channel *channel;
47342 enum xgbe_int int_id;
47343 unsigned int i;
47344@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47345 static irqreturn_t xgbe_isr(int irq, void *data)
47346 {
47347 struct xgbe_prv_data *pdata = data;
47348- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47349+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47350 struct xgbe_channel *channel;
47351 unsigned int dma_isr, dma_ch_isr;
47352 unsigned int mac_isr, mac_tssr;
47353@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47354
47355 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47356 {
47357- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47358+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47359
47360 DBGPR("-->xgbe_init_tx_coalesce\n");
47361
47362@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47363
47364 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47365 {
47366- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47367+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47368
47369 DBGPR("-->xgbe_init_rx_coalesce\n");
47370
47371@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47372
47373 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47374 {
47375- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47376+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47377 struct xgbe_channel *channel;
47378 struct xgbe_ring *ring;
47379 struct xgbe_ring_data *rdata;
47380@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47381
47382 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47383 {
47384- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47385+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47386 struct xgbe_channel *channel;
47387 struct xgbe_ring *ring;
47388 struct xgbe_ring_data *rdata;
47389@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47390 static void xgbe_adjust_link(struct net_device *netdev)
47391 {
47392 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47393- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47394+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47395 struct phy_device *phydev = pdata->phydev;
47396 int new_state = 0;
47397
47398@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47399 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47400 {
47401 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47402- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47403+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47404 unsigned long flags;
47405
47406 DBGPR("-->xgbe_powerdown\n");
47407@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47408 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47409 {
47410 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47411- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47412+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47413 unsigned long flags;
47414
47415 DBGPR("-->xgbe_powerup\n");
47416@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47417
47418 static int xgbe_start(struct xgbe_prv_data *pdata)
47419 {
47420- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47421+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47422 struct net_device *netdev = pdata->netdev;
47423
47424 DBGPR("-->xgbe_start\n");
47425@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47426
47427 static void xgbe_stop(struct xgbe_prv_data *pdata)
47428 {
47429- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47430+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47431 struct xgbe_channel *channel;
47432 struct net_device *netdev = pdata->netdev;
47433 struct netdev_queue *txq;
47434@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47435 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47436 {
47437 struct xgbe_channel *channel;
47438- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47439+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47440 unsigned int i;
47441
47442 DBGPR("-->xgbe_restart_dev\n");
47443@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47444 return -ERANGE;
47445 }
47446
47447- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47448+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47449
47450 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47451
47452@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47453 static int xgbe_open(struct net_device *netdev)
47454 {
47455 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47456- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47457- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47458+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47459+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47460 struct xgbe_channel *channel = NULL;
47461 unsigned int i = 0;
47462 int ret;
47463@@ -1400,8 +1400,8 @@ err_phy_init:
47464 static int xgbe_close(struct net_device *netdev)
47465 {
47466 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47467- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47468- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47469+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47470+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47471 struct xgbe_channel *channel;
47472 unsigned int i;
47473
47474@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
47475 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47476 {
47477 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47478- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47479- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47480+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47481+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47482 struct xgbe_channel *channel;
47483 struct xgbe_ring *ring;
47484 struct xgbe_packet_data *packet;
47485@@ -1518,7 +1518,7 @@ tx_netdev_return:
47486 static void xgbe_set_rx_mode(struct net_device *netdev)
47487 {
47488 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47489- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47490+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47491 unsigned int pr_mode, am_mode;
47492
47493 DBGPR("-->xgbe_set_rx_mode\n");
47494@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47495 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47496 {
47497 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47498- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47499+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47500 struct sockaddr *saddr = addr;
47501
47502 DBGPR("-->xgbe_set_mac_address\n");
47503@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47504
47505 DBGPR("-->%s\n", __func__);
47506
47507- pdata->hw_if.read_mmc_stats(pdata);
47508+ pdata->hw_if->read_mmc_stats(pdata);
47509
47510 s->rx_packets = pstats->rxframecount_gb;
47511 s->rx_bytes = pstats->rxoctetcount_gb;
47512@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47513 u16 vid)
47514 {
47515 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47516- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47517+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47518
47519 DBGPR("-->%s\n", __func__);
47520
47521@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47522 u16 vid)
47523 {
47524 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47525- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47526+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47527
47528 DBGPR("-->%s\n", __func__);
47529
47530@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
47531 netdev_features_t features)
47532 {
47533 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47534- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47535+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47536 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47537 int ret = 0;
47538
47539@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47540 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47541 {
47542 struct xgbe_prv_data *pdata = channel->pdata;
47543- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47544+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47545 struct xgbe_ring *ring = channel->rx_ring;
47546 struct xgbe_ring_data *rdata;
47547
47548@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47549 static int xgbe_tx_poll(struct xgbe_channel *channel)
47550 {
47551 struct xgbe_prv_data *pdata = channel->pdata;
47552- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47553- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47554+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47555+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47556 struct xgbe_ring *ring = channel->tx_ring;
47557 struct xgbe_ring_data *rdata;
47558 struct xgbe_ring_desc *rdesc;
47559@@ -1891,7 +1891,7 @@ unlock:
47560 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47561 {
47562 struct xgbe_prv_data *pdata = channel->pdata;
47563- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47564+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47565 struct xgbe_ring *ring = channel->rx_ring;
47566 struct xgbe_ring_data *rdata;
47567 struct xgbe_packet_data *packet;
47568diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47569index ebf4893..28108c7 100644
47570--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47571+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47572@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47573
47574 DBGPR("-->%s\n", __func__);
47575
47576- pdata->hw_if.read_mmc_stats(pdata);
47577+ pdata->hw_if->read_mmc_stats(pdata);
47578 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47579 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47580 *data++ = *(u64 *)stat;
47581@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47582 struct ethtool_coalesce *ec)
47583 {
47584 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47585- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47586+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47587 unsigned int riwt;
47588
47589 DBGPR("-->xgbe_get_coalesce\n");
47590@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47591 struct ethtool_coalesce *ec)
47592 {
47593 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47594- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47595+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47596 unsigned int rx_frames, rx_riwt, rx_usecs;
47597 unsigned int tx_frames, tx_usecs;
47598
47599diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47600index dbd3850..4e31b38 100644
47601--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47602+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47603@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47604 DBGPR("<--xgbe_default_config\n");
47605 }
47606
47607-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47608-{
47609- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47610- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47611-}
47612-
47613 static int xgbe_probe(struct platform_device *pdev)
47614 {
47615 struct xgbe_prv_data *pdata;
47616@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
47617 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
47618
47619 /* Set all the function pointers */
47620- xgbe_init_all_fptrs(pdata);
47621- hw_if = &pdata->hw_if;
47622- desc_if = &pdata->desc_if;
47623+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47624+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47625
47626 /* Issue software reset to device */
47627 hw_if->exit(pdata);
47628diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47629index 363b210..b241389 100644
47630--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47631+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47632@@ -126,7 +126,7 @@
47633 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47634 {
47635 struct xgbe_prv_data *pdata = mii->priv;
47636- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47637+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47638 int mmd_data;
47639
47640 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47641@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47642 u16 mmd_val)
47643 {
47644 struct xgbe_prv_data *pdata = mii->priv;
47645- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47646+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47647 int mmd_data = mmd_val;
47648
47649 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47650diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47651index a1bf9d1c..84adcab 100644
47652--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47653+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47654@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47655 tstamp_cc);
47656 u64 nsec;
47657
47658- nsec = pdata->hw_if.get_tstamp_time(pdata);
47659+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47660
47661 return nsec;
47662 }
47663@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47664
47665 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47666
47667- pdata->hw_if.update_tstamp_addend(pdata, addend);
47668+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47669
47670 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47671
47672diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47673index f9ec762..988c969 100644
47674--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47675+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47676@@ -668,8 +668,8 @@ struct xgbe_prv_data {
47677 int dev_irq;
47678 unsigned int per_channel_irq;
47679
47680- struct xgbe_hw_if hw_if;
47681- struct xgbe_desc_if desc_if;
47682+ const struct xgbe_hw_if *hw_if;
47683+ const struct xgbe_desc_if *desc_if;
47684
47685 /* AXI DMA settings */
47686 unsigned int axdomain;
47687@@ -787,6 +787,9 @@ struct xgbe_prv_data {
47688 #endif
47689 };
47690
47691+extern const struct xgbe_hw_if default_xgbe_hw_if;
47692+extern const struct xgbe_desc_if default_xgbe_desc_if;
47693+
47694 /* Function prototypes*/
47695
47696 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47697diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47698index adcacda..fa6e0ae 100644
47699--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47700+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47701@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47702 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47703 {
47704 /* RX_MODE controlling object */
47705- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47706+ bnx2x_init_rx_mode_obj(bp);
47707
47708 /* multicast configuration controlling object */
47709 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47710diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47711index 07cdf9b..b08ecc7 100644
47712--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47713+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47714@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47715 return rc;
47716 }
47717
47718-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47719- struct bnx2x_rx_mode_obj *o)
47720+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47721 {
47722 if (CHIP_IS_E1x(bp)) {
47723- o->wait_comp = bnx2x_empty_rx_mode_wait;
47724- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47725+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47726+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47727 } else {
47728- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47729- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47730+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47731+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47732 }
47733 }
47734
47735diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47736index 86baecb..ff3bb46 100644
47737--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47738+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47739@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47740
47741 /********************* RX MODE ****************/
47742
47743-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47744- struct bnx2x_rx_mode_obj *o);
47745+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47746
47747 /**
47748 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47749diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47750index 31c9f82..e65e986 100644
47751--- a/drivers/net/ethernet/broadcom/tg3.h
47752+++ b/drivers/net/ethernet/broadcom/tg3.h
47753@@ -150,6 +150,7 @@
47754 #define CHIPREV_ID_5750_A0 0x4000
47755 #define CHIPREV_ID_5750_A1 0x4001
47756 #define CHIPREV_ID_5750_A3 0x4003
47757+#define CHIPREV_ID_5750_C1 0x4201
47758 #define CHIPREV_ID_5750_C2 0x4202
47759 #define CHIPREV_ID_5752_A0_HW 0x5000
47760 #define CHIPREV_ID_5752_A0 0x6000
47761diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47762index 903466e..b285864 100644
47763--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47764+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47765@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47766 }
47767
47768 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47769- bna_cb_ioceth_enable,
47770- bna_cb_ioceth_disable,
47771- bna_cb_ioceth_hbfail,
47772- bna_cb_ioceth_reset
47773+ .enable_cbfn = bna_cb_ioceth_enable,
47774+ .disable_cbfn = bna_cb_ioceth_disable,
47775+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47776+ .reset_cbfn = bna_cb_ioceth_reset
47777 };
47778
47779 static void bna_attr_init(struct bna_ioceth *ioceth)
47780diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47781index 8cffcdf..aadf043 100644
47782--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47783+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47784@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47785 */
47786 struct l2t_skb_cb {
47787 arp_failure_handler_func arp_failure_handler;
47788-};
47789+} __no_const;
47790
47791 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47792
47793diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47794index ccf3436..b720d77 100644
47795--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47796+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47797@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47798
47799 int i;
47800 struct adapter *ap = netdev2adap(dev);
47801- static const unsigned int *reg_ranges;
47802+ const unsigned int *reg_ranges;
47803 int arr_size = 0, buf_size = 0;
47804
47805 if (is_t4(ap->params.chip)) {
47806diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
47807index badff18..e15c4ec 100644
47808--- a/drivers/net/ethernet/dec/tulip/de4x5.c
47809+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
47810@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47811 for (i=0; i<ETH_ALEN; i++) {
47812 tmp.addr[i] = dev->dev_addr[i];
47813 }
47814- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47815+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47816 break;
47817
47818 case DE4X5_SET_HWADDR: /* Set the hardware address */
47819@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47820 spin_lock_irqsave(&lp->lock, flags);
47821 memcpy(&statbuf, &lp->pktStats, ioc->len);
47822 spin_unlock_irqrestore(&lp->lock, flags);
47823- if (copy_to_user(ioc->data, &statbuf, ioc->len))
47824+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
47825 return -EFAULT;
47826 break;
47827 }
47828diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
47829index d48806b..41cd80f 100644
47830--- a/drivers/net/ethernet/emulex/benet/be_main.c
47831+++ b/drivers/net/ethernet/emulex/benet/be_main.c
47832@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
47833
47834 if (wrapped)
47835 newacc += 65536;
47836- ACCESS_ONCE(*acc) = newacc;
47837+ ACCESS_ONCE_RW(*acc) = newacc;
47838 }
47839
47840 static void populate_erx_stats(struct be_adapter *adapter,
47841diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
47842index 6d0c5d5..55be363 100644
47843--- a/drivers/net/ethernet/faraday/ftgmac100.c
47844+++ b/drivers/net/ethernet/faraday/ftgmac100.c
47845@@ -30,6 +30,8 @@
47846 #include <linux/netdevice.h>
47847 #include <linux/phy.h>
47848 #include <linux/platform_device.h>
47849+#include <linux/interrupt.h>
47850+#include <linux/irqreturn.h>
47851 #include <net/ip.h>
47852
47853 #include "ftgmac100.h"
47854diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
47855index dce5f7b..2433466 100644
47856--- a/drivers/net/ethernet/faraday/ftmac100.c
47857+++ b/drivers/net/ethernet/faraday/ftmac100.c
47858@@ -31,6 +31,8 @@
47859 #include <linux/module.h>
47860 #include <linux/netdevice.h>
47861 #include <linux/platform_device.h>
47862+#include <linux/interrupt.h>
47863+#include <linux/irqreturn.h>
47864
47865 #include "ftmac100.h"
47866
47867diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47868index 6d1ec92..4d5d97d 100644
47869--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47870+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47871@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
47872 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
47873
47874 /* Update the base adjustement value. */
47875- ACCESS_ONCE(pf->ptp_base_adj) = incval;
47876+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
47877 smp_mb(); /* Force the above update. */
47878 }
47879
47880diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47881index 5fd4b52..87aa34b 100644
47882--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47883+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47884@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
47885 }
47886
47887 /* update the base incval used to calculate frequency adjustment */
47888- ACCESS_ONCE(adapter->base_incval) = incval;
47889+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
47890 smp_mb();
47891
47892 /* need lock to prevent incorrect read while modifying cyclecounter */
47893diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47894index e3357bf..d4d5348 100644
47895--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47896+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47897@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
47898 wmb();
47899
47900 /* we want to dirty this cache line once */
47901- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
47902- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
47903+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
47904+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
47905
47906 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
47907
47908diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47909index 2bbd01f..e8baa64 100644
47910--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
47911+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47912@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47913 struct __vxge_hw_fifo *fifo;
47914 struct vxge_hw_fifo_config *config;
47915 u32 txdl_size, txdl_per_memblock;
47916- struct vxge_hw_mempool_cbs fifo_mp_callback;
47917+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
47918+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
47919+ };
47920+
47921 struct __vxge_hw_virtualpath *vpath;
47922
47923 if ((vp == NULL) || (attr == NULL)) {
47924@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47925 goto exit;
47926 }
47927
47928- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
47929-
47930 fifo->mempool =
47931 __vxge_hw_mempool_create(vpath->hldev,
47932 fifo->config->memblock_size,
47933diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47934index 2bb48d5..d1a865d 100644
47935--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47936+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47937@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
47938 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
47939 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
47940 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
47941- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47942+ pax_open_kernel();
47943+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47944+ pax_close_kernel();
47945 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47946 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
47947 max_tx_rings = QLCNIC_MAX_TX_RINGS;
47948diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47949index be7d7a6..a8983f8 100644
47950--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47951+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47952@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
47953 case QLCNIC_NON_PRIV_FUNC:
47954 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
47955 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47956- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47957+ pax_open_kernel();
47958+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47959+ pax_close_kernel();
47960 break;
47961 case QLCNIC_PRIV_FUNC:
47962 ahw->op_mode = QLCNIC_PRIV_FUNC;
47963 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
47964- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
47965+ pax_open_kernel();
47966+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
47967+ pax_close_kernel();
47968 break;
47969 case QLCNIC_MGMT_FUNC:
47970 ahw->op_mode = QLCNIC_MGMT_FUNC;
47971 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47972- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
47973+ pax_open_kernel();
47974+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
47975+ pax_close_kernel();
47976 break;
47977 default:
47978 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
47979diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
47980index c9f57fb..208bdc1 100644
47981--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
47982+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
47983@@ -1285,7 +1285,7 @@ flash_temp:
47984 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
47985 {
47986 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
47987- static const struct qlcnic_dump_operations *fw_dump_ops;
47988+ const struct qlcnic_dump_operations *fw_dump_ops;
47989 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
47990 u32 entry_offset, dump, no_entries, buf_offset = 0;
47991 int i, k, ops_cnt, ops_index, dump_size = 0;
47992diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
47993index 14a1c5c..38a141d 100644
47994--- a/drivers/net/ethernet/realtek/r8169.c
47995+++ b/drivers/net/ethernet/realtek/r8169.c
47996@@ -788,22 +788,22 @@ struct rtl8169_private {
47997 struct mdio_ops {
47998 void (*write)(struct rtl8169_private *, int, int);
47999 int (*read)(struct rtl8169_private *, int);
48000- } mdio_ops;
48001+ } __no_const mdio_ops;
48002
48003 struct pll_power_ops {
48004 void (*down)(struct rtl8169_private *);
48005 void (*up)(struct rtl8169_private *);
48006- } pll_power_ops;
48007+ } __no_const pll_power_ops;
48008
48009 struct jumbo_ops {
48010 void (*enable)(struct rtl8169_private *);
48011 void (*disable)(struct rtl8169_private *);
48012- } jumbo_ops;
48013+ } __no_const jumbo_ops;
48014
48015 struct csi_ops {
48016 void (*write)(struct rtl8169_private *, int, int);
48017 u32 (*read)(struct rtl8169_private *, int);
48018- } csi_ops;
48019+ } __no_const csi_ops;
48020
48021 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48022 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48023diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48024index 6b861e3..204ac86 100644
48025--- a/drivers/net/ethernet/sfc/ptp.c
48026+++ b/drivers/net/ethernet/sfc/ptp.c
48027@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48028 ptp->start.dma_addr);
48029
48030 /* Clear flag that signals MC ready */
48031- ACCESS_ONCE(*start) = 0;
48032+ ACCESS_ONCE_RW(*start) = 0;
48033 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48034 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48035 EFX_BUG_ON_PARANOID(rc);
48036diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48037index 08c483b..2c4a553 100644
48038--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48039+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48040@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48041
48042 writel(value, ioaddr + MMC_CNTRL);
48043
48044- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48045- MMC_CNTRL, value);
48046+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48047+// MMC_CNTRL, value);
48048 }
48049
48050 /* To mask all all interrupts.*/
48051diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48052index 384ca4f..dd7d4f9 100644
48053--- a/drivers/net/hyperv/hyperv_net.h
48054+++ b/drivers/net/hyperv/hyperv_net.h
48055@@ -171,7 +171,7 @@ struct rndis_device {
48056 enum rndis_device_state state;
48057 bool link_state;
48058 bool link_change;
48059- atomic_t new_req_id;
48060+ atomic_unchecked_t new_req_id;
48061
48062 spinlock_t request_lock;
48063 struct list_head req_list;
48064diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48065index ec0c40a..c9e42eb 100644
48066--- a/drivers/net/hyperv/rndis_filter.c
48067+++ b/drivers/net/hyperv/rndis_filter.c
48068@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48069 * template
48070 */
48071 set = &rndis_msg->msg.set_req;
48072- set->req_id = atomic_inc_return(&dev->new_req_id);
48073+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48074
48075 /* Add to the request list */
48076 spin_lock_irqsave(&dev->request_lock, flags);
48077@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48078
48079 /* Setup the rndis set */
48080 halt = &request->request_msg.msg.halt_req;
48081- halt->req_id = atomic_inc_return(&dev->new_req_id);
48082+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48083
48084 /* Ignore return since this msg is optional. */
48085 rndis_filter_send_request(dev, request);
48086diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48087index 34f846b..4a0d5b1 100644
48088--- a/drivers/net/ifb.c
48089+++ b/drivers/net/ifb.c
48090@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48091 return 0;
48092 }
48093
48094-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48095+static struct rtnl_link_ops ifb_link_ops = {
48096 .kind = "ifb",
48097 .priv_size = sizeof(struct ifb_private),
48098 .setup = ifb_setup,
48099diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48100index 612e073..a9f5eda 100644
48101--- a/drivers/net/macvlan.c
48102+++ b/drivers/net/macvlan.c
48103@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48104 free_nskb:
48105 kfree_skb(nskb);
48106 err:
48107- atomic_long_inc(&skb->dev->rx_dropped);
48108+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48109 }
48110
48111 static void macvlan_flush_sources(struct macvlan_port *port,
48112@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48113 int macvlan_link_register(struct rtnl_link_ops *ops)
48114 {
48115 /* common fields */
48116- ops->priv_size = sizeof(struct macvlan_dev);
48117- ops->validate = macvlan_validate;
48118- ops->maxtype = IFLA_MACVLAN_MAX;
48119- ops->policy = macvlan_policy;
48120- ops->changelink = macvlan_changelink;
48121- ops->get_size = macvlan_get_size;
48122- ops->fill_info = macvlan_fill_info;
48123+ pax_open_kernel();
48124+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48125+ *(void **)&ops->validate = macvlan_validate;
48126+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48127+ *(const void **)&ops->policy = macvlan_policy;
48128+ *(void **)&ops->changelink = macvlan_changelink;
48129+ *(void **)&ops->get_size = macvlan_get_size;
48130+ *(void **)&ops->fill_info = macvlan_fill_info;
48131+ pax_close_kernel();
48132
48133 return rtnl_link_register(ops);
48134 };
48135@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48136 return NOTIFY_DONE;
48137 }
48138
48139-static struct notifier_block macvlan_notifier_block __read_mostly = {
48140+static struct notifier_block macvlan_notifier_block = {
48141 .notifier_call = macvlan_device_event,
48142 };
48143
48144diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48145index 919f4fc..012f6dd 100644
48146--- a/drivers/net/macvtap.c
48147+++ b/drivers/net/macvtap.c
48148@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48149 dev->tx_queue_len = TUN_READQ_SIZE;
48150 }
48151
48152-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48153+static struct rtnl_link_ops macvtap_link_ops = {
48154 .kind = "macvtap",
48155 .setup = macvtap_setup,
48156 .newlink = macvtap_newlink,
48157@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
48158 } /* else everything is zero */
48159 }
48160
48161+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
48162+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
48163+
48164 /* Get packet from user space buffer */
48165 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
48166 struct iov_iter *from, int noblock)
48167 {
48168- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
48169+ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
48170 struct sk_buff *skb;
48171 struct macvlan_dev *vlan;
48172 unsigned long total_len = iov_iter_count(from);
48173@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
48174 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
48175 }
48176
48177- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
48178+ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
48179 linear, noblock, &err);
48180 if (!skb)
48181 goto err;
48182@@ -1030,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48183
48184 ret = 0;
48185 u = q->flags;
48186- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48187+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48188 put_user(u, &ifr->ifr_flags))
48189 ret = -EFAULT;
48190 macvtap_put_vlan(vlan);
48191@@ -1214,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48192 return NOTIFY_DONE;
48193 }
48194
48195-static struct notifier_block macvtap_notifier_block __read_mostly = {
48196+static struct notifier_block macvtap_notifier_block = {
48197 .notifier_call = macvtap_device_event,
48198 };
48199
48200diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48201index 34924df..a747360 100644
48202--- a/drivers/net/nlmon.c
48203+++ b/drivers/net/nlmon.c
48204@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48205 return 0;
48206 }
48207
48208-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48209+static struct rtnl_link_ops nlmon_link_ops = {
48210 .kind = "nlmon",
48211 .priv_size = sizeof(struct nlmon),
48212 .setup = nlmon_setup,
48213diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48214index 3fc91e8..6c36337 100644
48215--- a/drivers/net/phy/phy_device.c
48216+++ b/drivers/net/phy/phy_device.c
48217@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48218 * zero on success.
48219 *
48220 */
48221-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48222+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48223 struct phy_c45_device_ids *c45_ids) {
48224 int phy_reg;
48225 int i, reg_addr;
48226@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48227 * its return value is in turn returned.
48228 *
48229 */
48230-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48231+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48232 bool is_c45, struct phy_c45_device_ids *c45_ids)
48233 {
48234 int phy_reg;
48235@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48236 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48237 {
48238 struct phy_c45_device_ids c45_ids = {0};
48239- u32 phy_id = 0;
48240+ int phy_id = 0;
48241 int r;
48242
48243 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48244diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48245index af034db..1611c0b2 100644
48246--- a/drivers/net/ppp/ppp_generic.c
48247+++ b/drivers/net/ppp/ppp_generic.c
48248@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48249 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48250 struct ppp_stats stats;
48251 struct ppp_comp_stats cstats;
48252- char *vers;
48253
48254 switch (cmd) {
48255 case SIOCGPPPSTATS:
48256@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48257 break;
48258
48259 case SIOCGPPPVER:
48260- vers = PPP_VERSION;
48261- if (copy_to_user(addr, vers, strlen(vers) + 1))
48262+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48263 break;
48264 err = 0;
48265 break;
48266diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48267index 079f7ad..b2a2bfa7 100644
48268--- a/drivers/net/slip/slhc.c
48269+++ b/drivers/net/slip/slhc.c
48270@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48271 register struct tcphdr *thp;
48272 register struct iphdr *ip;
48273 register struct cstate *cs;
48274- int len, hdrlen;
48275+ long len, hdrlen;
48276 unsigned char *cp = icp;
48277
48278 /* We've got a compressed packet; read the change byte */
48279diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48280index f7ff493..153e0198 100644
48281--- a/drivers/net/team/team.c
48282+++ b/drivers/net/team/team.c
48283@@ -2105,7 +2105,7 @@ static unsigned int team_get_num_rx_queues(void)
48284 return TEAM_DEFAULT_NUM_RX_QUEUES;
48285 }
48286
48287-static struct rtnl_link_ops team_link_ops __read_mostly = {
48288+static struct rtnl_link_ops team_link_ops = {
48289 .kind = DRV_NAME,
48290 .priv_size = sizeof(struct team),
48291 .setup = team_setup,
48292@@ -2895,7 +2895,7 @@ static int team_device_event(struct notifier_block *unused,
48293 return NOTIFY_DONE;
48294 }
48295
48296-static struct notifier_block team_notifier_block __read_mostly = {
48297+static struct notifier_block team_notifier_block = {
48298 .notifier_call = team_device_event,
48299 };
48300
48301diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48302index 10f9e40..3515e7e 100644
48303--- a/drivers/net/tun.c
48304+++ b/drivers/net/tun.c
48305@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48306 return -EINVAL;
48307 }
48308
48309-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48310+static struct rtnl_link_ops tun_link_ops = {
48311 .kind = DRV_NAME,
48312 .priv_size = sizeof(struct tun_struct),
48313 .setup = tun_setup,
48314@@ -1827,7 +1827,7 @@ unlock:
48315 }
48316
48317 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48318- unsigned long arg, int ifreq_len)
48319+ unsigned long arg, size_t ifreq_len)
48320 {
48321 struct tun_file *tfile = file->private_data;
48322 struct tun_struct *tun;
48323@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48324 int le;
48325 int ret;
48326
48327+ if (ifreq_len > sizeof ifr)
48328+ return -EFAULT;
48329+
48330 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48331 if (copy_from_user(&ifr, argp, ifreq_len))
48332 return -EFAULT;
48333diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48334index 9c5aa92..8cd0405 100644
48335--- a/drivers/net/usb/hso.c
48336+++ b/drivers/net/usb/hso.c
48337@@ -71,7 +71,7 @@
48338 #include <asm/byteorder.h>
48339 #include <linux/serial_core.h>
48340 #include <linux/serial.h>
48341-
48342+#include <asm/local.h>
48343
48344 #define MOD_AUTHOR "Option Wireless"
48345 #define MOD_DESCRIPTION "USB High Speed Option driver"
48346@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48347 struct urb *urb;
48348
48349 urb = serial->rx_urb[0];
48350- if (serial->port.count > 0) {
48351+ if (atomic_read(&serial->port.count) > 0) {
48352 count = put_rxbuf_data(urb, serial);
48353 if (count == -1)
48354 return;
48355@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48356 DUMP1(urb->transfer_buffer, urb->actual_length);
48357
48358 /* Anyone listening? */
48359- if (serial->port.count == 0)
48360+ if (atomic_read(&serial->port.count) == 0)
48361 return;
48362
48363 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48364@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48365 tty_port_tty_set(&serial->port, tty);
48366
48367 /* check for port already opened, if not set the termios */
48368- serial->port.count++;
48369- if (serial->port.count == 1) {
48370+ if (atomic_inc_return(&serial->port.count) == 1) {
48371 serial->rx_state = RX_IDLE;
48372 /* Force default termio settings */
48373 _hso_serial_set_termios(tty, NULL);
48374@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48375 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48376 if (result) {
48377 hso_stop_serial_device(serial->parent);
48378- serial->port.count--;
48379+ atomic_dec(&serial->port.count);
48380 kref_put(&serial->parent->ref, hso_serial_ref_free);
48381 }
48382 } else {
48383@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48384
48385 /* reset the rts and dtr */
48386 /* do the actual close */
48387- serial->port.count--;
48388+ atomic_dec(&serial->port.count);
48389
48390- if (serial->port.count <= 0) {
48391- serial->port.count = 0;
48392+ if (atomic_read(&serial->port.count) <= 0) {
48393+ atomic_set(&serial->port.count, 0);
48394 tty_port_tty_set(&serial->port, NULL);
48395 if (!usb_gone)
48396 hso_stop_serial_device(serial->parent);
48397@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48398
48399 /* the actual setup */
48400 spin_lock_irqsave(&serial->serial_lock, flags);
48401- if (serial->port.count)
48402+ if (atomic_read(&serial->port.count))
48403 _hso_serial_set_termios(tty, old);
48404 else
48405 tty->termios = *old;
48406@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48407 D1("Pending read interrupt on port %d\n", i);
48408 spin_lock(&serial->serial_lock);
48409 if (serial->rx_state == RX_IDLE &&
48410- serial->port.count > 0) {
48411+ atomic_read(&serial->port.count) > 0) {
48412 /* Setup and send a ctrl req read on
48413 * port i */
48414 if (!serial->rx_urb_filled[0]) {
48415@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
48416 /* Start all serial ports */
48417 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48418 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48419- if (dev2ser(serial_table[i])->port.count) {
48420+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48421 result =
48422 hso_start_serial_device(serial_table[i], GFP_NOIO);
48423 hso_kick_transmit(dev2ser(serial_table[i]));
48424diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48425index bf405f1..fd847ee 100644
48426--- a/drivers/net/usb/r8152.c
48427+++ b/drivers/net/usb/r8152.c
48428@@ -571,7 +571,7 @@ struct r8152 {
48429 void (*unload)(struct r8152 *);
48430 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48431 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48432- } rtl_ops;
48433+ } __no_const rtl_ops;
48434
48435 int intr_interval;
48436 u32 saved_wolopts;
48437diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48438index a2515887..6d13233 100644
48439--- a/drivers/net/usb/sierra_net.c
48440+++ b/drivers/net/usb/sierra_net.c
48441@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48442 /* atomic counter partially included in MAC address to make sure 2 devices
48443 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48444 */
48445-static atomic_t iface_counter = ATOMIC_INIT(0);
48446+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48447
48448 /*
48449 * SYNC Timer Delay definition used to set the expiry time
48450@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48451 dev->net->netdev_ops = &sierra_net_device_ops;
48452
48453 /* change MAC addr to include, ifacenum, and to be unique */
48454- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48455+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48456 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48457
48458 /* we will have to manufacture ethernet headers, prepare template */
48459diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48460index 059fdf1..7543217 100644
48461--- a/drivers/net/virtio_net.c
48462+++ b/drivers/net/virtio_net.c
48463@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48464 #define RECEIVE_AVG_WEIGHT 64
48465
48466 /* Minimum alignment for mergeable packet buffers. */
48467-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48468+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48469
48470 #define VIRTNET_DRIVER_VERSION "1.0.0"
48471
48472diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48473index a8c755d..a988b71 100644
48474--- a/drivers/net/vxlan.c
48475+++ b/drivers/net/vxlan.c
48476@@ -2702,7 +2702,7 @@ nla_put_failure:
48477 return -EMSGSIZE;
48478 }
48479
48480-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48481+static struct rtnl_link_ops vxlan_link_ops = {
48482 .kind = "vxlan",
48483 .maxtype = IFLA_VXLAN_MAX,
48484 .policy = vxlan_policy,
48485@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48486 return NOTIFY_DONE;
48487 }
48488
48489-static struct notifier_block vxlan_notifier_block __read_mostly = {
48490+static struct notifier_block vxlan_notifier_block = {
48491 .notifier_call = vxlan_lowerdev_event,
48492 };
48493
48494diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48495index 5920c99..ff2e4a5 100644
48496--- a/drivers/net/wan/lmc/lmc_media.c
48497+++ b/drivers/net/wan/lmc/lmc_media.c
48498@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48499 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48500
48501 lmc_media_t lmc_ds3_media = {
48502- lmc_ds3_init, /* special media init stuff */
48503- lmc_ds3_default, /* reset to default state */
48504- lmc_ds3_set_status, /* reset status to state provided */
48505- lmc_dummy_set_1, /* set clock source */
48506- lmc_dummy_set2_1, /* set line speed */
48507- lmc_ds3_set_100ft, /* set cable length */
48508- lmc_ds3_set_scram, /* set scrambler */
48509- lmc_ds3_get_link_status, /* get link status */
48510- lmc_dummy_set_1, /* set link status */
48511- lmc_ds3_set_crc_length, /* set CRC length */
48512- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48513- lmc_ds3_watchdog
48514+ .init = lmc_ds3_init, /* special media init stuff */
48515+ .defaults = lmc_ds3_default, /* reset to default state */
48516+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48517+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48518+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48519+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48520+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48521+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48522+ .set_link_status = lmc_dummy_set_1, /* set link status */
48523+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48524+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48525+ .watchdog = lmc_ds3_watchdog
48526 };
48527
48528 lmc_media_t lmc_hssi_media = {
48529- lmc_hssi_init, /* special media init stuff */
48530- lmc_hssi_default, /* reset to default state */
48531- lmc_hssi_set_status, /* reset status to state provided */
48532- lmc_hssi_set_clock, /* set clock source */
48533- lmc_dummy_set2_1, /* set line speed */
48534- lmc_dummy_set_1, /* set cable length */
48535- lmc_dummy_set_1, /* set scrambler */
48536- lmc_hssi_get_link_status, /* get link status */
48537- lmc_hssi_set_link_status, /* set link status */
48538- lmc_hssi_set_crc_length, /* set CRC length */
48539- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48540- lmc_hssi_watchdog
48541+ .init = lmc_hssi_init, /* special media init stuff */
48542+ .defaults = lmc_hssi_default, /* reset to default state */
48543+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48544+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48545+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48546+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48547+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48548+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48549+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48550+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48551+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48552+ .watchdog = lmc_hssi_watchdog
48553 };
48554
48555-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48556- lmc_ssi_default, /* reset to default state */
48557- lmc_ssi_set_status, /* reset status to state provided */
48558- lmc_ssi_set_clock, /* set clock source */
48559- lmc_ssi_set_speed, /* set line speed */
48560- lmc_dummy_set_1, /* set cable length */
48561- lmc_dummy_set_1, /* set scrambler */
48562- lmc_ssi_get_link_status, /* get link status */
48563- lmc_ssi_set_link_status, /* set link status */
48564- lmc_ssi_set_crc_length, /* set CRC length */
48565- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48566- lmc_ssi_watchdog
48567+lmc_media_t lmc_ssi_media = {
48568+ .init = lmc_ssi_init, /* special media init stuff */
48569+ .defaults = lmc_ssi_default, /* reset to default state */
48570+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48571+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48572+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48573+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48574+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48575+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48576+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48577+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48578+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48579+ .watchdog = lmc_ssi_watchdog
48580 };
48581
48582 lmc_media_t lmc_t1_media = {
48583- lmc_t1_init, /* special media init stuff */
48584- lmc_t1_default, /* reset to default state */
48585- lmc_t1_set_status, /* reset status to state provided */
48586- lmc_t1_set_clock, /* set clock source */
48587- lmc_dummy_set2_1, /* set line speed */
48588- lmc_dummy_set_1, /* set cable length */
48589- lmc_dummy_set_1, /* set scrambler */
48590- lmc_t1_get_link_status, /* get link status */
48591- lmc_dummy_set_1, /* set link status */
48592- lmc_t1_set_crc_length, /* set CRC length */
48593- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48594- lmc_t1_watchdog
48595+ .init = lmc_t1_init, /* special media init stuff */
48596+ .defaults = lmc_t1_default, /* reset to default state */
48597+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48598+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48599+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48600+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48601+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48602+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48603+ .set_link_status = lmc_dummy_set_1, /* set link status */
48604+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48605+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48606+ .watchdog = lmc_t1_watchdog
48607 };
48608
48609 static void
48610diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48611index feacc3b..5bac0de 100644
48612--- a/drivers/net/wan/z85230.c
48613+++ b/drivers/net/wan/z85230.c
48614@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48615
48616 struct z8530_irqhandler z8530_sync =
48617 {
48618- z8530_rx,
48619- z8530_tx,
48620- z8530_status
48621+ .rx = z8530_rx,
48622+ .tx = z8530_tx,
48623+ .status = z8530_status
48624 };
48625
48626 EXPORT_SYMBOL(z8530_sync);
48627@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48628 }
48629
48630 static struct z8530_irqhandler z8530_dma_sync = {
48631- z8530_dma_rx,
48632- z8530_dma_tx,
48633- z8530_dma_status
48634+ .rx = z8530_dma_rx,
48635+ .tx = z8530_dma_tx,
48636+ .status = z8530_dma_status
48637 };
48638
48639 static struct z8530_irqhandler z8530_txdma_sync = {
48640- z8530_rx,
48641- z8530_dma_tx,
48642- z8530_dma_status
48643+ .rx = z8530_rx,
48644+ .tx = z8530_dma_tx,
48645+ .status = z8530_dma_status
48646 };
48647
48648 /**
48649@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48650
48651 struct z8530_irqhandler z8530_nop=
48652 {
48653- z8530_rx_clear,
48654- z8530_tx_clear,
48655- z8530_status_clear
48656+ .rx = z8530_rx_clear,
48657+ .tx = z8530_tx_clear,
48658+ .status = z8530_status_clear
48659 };
48660
48661
48662diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48663index 0b60295..b8bfa5b 100644
48664--- a/drivers/net/wimax/i2400m/rx.c
48665+++ b/drivers/net/wimax/i2400m/rx.c
48666@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48667 if (i2400m->rx_roq == NULL)
48668 goto error_roq_alloc;
48669
48670- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48671+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48672 GFP_KERNEL);
48673 if (rd == NULL) {
48674 result = -ENOMEM;
48675diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48676index e71a2ce..2268d61 100644
48677--- a/drivers/net/wireless/airo.c
48678+++ b/drivers/net/wireless/airo.c
48679@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48680 struct airo_info *ai = dev->ml_priv;
48681 int ridcode;
48682 int enabled;
48683- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48684+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48685 unsigned char *iobuf;
48686
48687 /* Only super-user can write RIDs */
48688diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48689index da92bfa..5a9001a 100644
48690--- a/drivers/net/wireless/at76c50x-usb.c
48691+++ b/drivers/net/wireless/at76c50x-usb.c
48692@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48693 }
48694
48695 /* Convert timeout from the DFU status to jiffies */
48696-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48697+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48698 {
48699 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48700 | (s->poll_timeout[1] << 8)
48701diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48702index f1946a6..cd367fb 100644
48703--- a/drivers/net/wireless/ath/ath10k/htc.c
48704+++ b/drivers/net/wireless/ath/ath10k/htc.c
48705@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48706 /* registered target arrival callback from the HIF layer */
48707 int ath10k_htc_init(struct ath10k *ar)
48708 {
48709- struct ath10k_hif_cb htc_callbacks;
48710+ static struct ath10k_hif_cb htc_callbacks = {
48711+ .rx_completion = ath10k_htc_rx_completion_handler,
48712+ .tx_completion = ath10k_htc_tx_completion_handler,
48713+ };
48714 struct ath10k_htc_ep *ep = NULL;
48715 struct ath10k_htc *htc = &ar->htc;
48716
48717@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
48718 ath10k_htc_reset_endpoint_states(htc);
48719
48720 /* setup HIF layer callbacks */
48721- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48722- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48723 htc->ar = ar;
48724
48725 /* Get HIF default pipe for HTC message exchange */
48726diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48727index 527179c..a890150 100644
48728--- a/drivers/net/wireless/ath/ath10k/htc.h
48729+++ b/drivers/net/wireless/ath/ath10k/htc.h
48730@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48731
48732 struct ath10k_htc_ops {
48733 void (*target_send_suspend_complete)(struct ath10k *ar);
48734-};
48735+} __no_const;
48736
48737 struct ath10k_htc_ep_ops {
48738 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48739 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48740 void (*ep_tx_credits)(struct ath10k *);
48741-};
48742+} __no_const;
48743
48744 /* service connection information */
48745 struct ath10k_htc_svc_conn_req {
48746diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48747index f816909..e56cd8b 100644
48748--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48749+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48750@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48751 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48752 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48753
48754- ACCESS_ONCE(ads->ds_link) = i->link;
48755- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48756+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48757+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48758
48759 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48760 ctl6 = SM(i->keytype, AR_EncrType);
48761@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48762
48763 if ((i->is_first || i->is_last) &&
48764 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48765- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48766+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48767 | set11nTries(i->rates, 1)
48768 | set11nTries(i->rates, 2)
48769 | set11nTries(i->rates, 3)
48770 | (i->dur_update ? AR_DurUpdateEna : 0)
48771 | SM(0, AR_BurstDur);
48772
48773- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48774+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48775 | set11nRate(i->rates, 1)
48776 | set11nRate(i->rates, 2)
48777 | set11nRate(i->rates, 3);
48778 } else {
48779- ACCESS_ONCE(ads->ds_ctl2) = 0;
48780- ACCESS_ONCE(ads->ds_ctl3) = 0;
48781+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48782+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48783 }
48784
48785 if (!i->is_first) {
48786- ACCESS_ONCE(ads->ds_ctl0) = 0;
48787- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48788- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48789+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48790+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48791+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48792 return;
48793 }
48794
48795@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48796 break;
48797 }
48798
48799- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48800+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48801 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48802 | SM(i->txpower[0], AR_XmitPower0)
48803 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48804@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48805 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48806 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48807
48808- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48809- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48810+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48811+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48812
48813 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48814 return;
48815
48816- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48817+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48818 | set11nPktDurRTSCTS(i->rates, 1);
48819
48820- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48821+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48822 | set11nPktDurRTSCTS(i->rates, 3);
48823
48824- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48825+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48826 | set11nRateFlags(i->rates, 1)
48827 | set11nRateFlags(i->rates, 2)
48828 | set11nRateFlags(i->rates, 3)
48829 | SM(i->rtscts_rate, AR_RTSCTSRate);
48830
48831- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48832- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48833- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48834+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48835+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48836+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48837 }
48838
48839 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
48840diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48841index da84b70..83e4978 100644
48842--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48843+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48844@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48845 (i->qcu << AR_TxQcuNum_S) | desc_len;
48846
48847 checksum += val;
48848- ACCESS_ONCE(ads->info) = val;
48849+ ACCESS_ONCE_RW(ads->info) = val;
48850
48851 checksum += i->link;
48852- ACCESS_ONCE(ads->link) = i->link;
48853+ ACCESS_ONCE_RW(ads->link) = i->link;
48854
48855 checksum += i->buf_addr[0];
48856- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
48857+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
48858 checksum += i->buf_addr[1];
48859- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
48860+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
48861 checksum += i->buf_addr[2];
48862- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
48863+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
48864 checksum += i->buf_addr[3];
48865- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
48866+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
48867
48868 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
48869- ACCESS_ONCE(ads->ctl3) = val;
48870+ ACCESS_ONCE_RW(ads->ctl3) = val;
48871 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
48872- ACCESS_ONCE(ads->ctl5) = val;
48873+ ACCESS_ONCE_RW(ads->ctl5) = val;
48874 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
48875- ACCESS_ONCE(ads->ctl7) = val;
48876+ ACCESS_ONCE_RW(ads->ctl7) = val;
48877 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
48878- ACCESS_ONCE(ads->ctl9) = val;
48879+ ACCESS_ONCE_RW(ads->ctl9) = val;
48880
48881 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
48882- ACCESS_ONCE(ads->ctl10) = checksum;
48883+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
48884
48885 if (i->is_first || i->is_last) {
48886- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
48887+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
48888 | set11nTries(i->rates, 1)
48889 | set11nTries(i->rates, 2)
48890 | set11nTries(i->rates, 3)
48891 | (i->dur_update ? AR_DurUpdateEna : 0)
48892 | SM(0, AR_BurstDur);
48893
48894- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
48895+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
48896 | set11nRate(i->rates, 1)
48897 | set11nRate(i->rates, 2)
48898 | set11nRate(i->rates, 3);
48899 } else {
48900- ACCESS_ONCE(ads->ctl13) = 0;
48901- ACCESS_ONCE(ads->ctl14) = 0;
48902+ ACCESS_ONCE_RW(ads->ctl13) = 0;
48903+ ACCESS_ONCE_RW(ads->ctl14) = 0;
48904 }
48905
48906 ads->ctl20 = 0;
48907@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48908
48909 ctl17 = SM(i->keytype, AR_EncrType);
48910 if (!i->is_first) {
48911- ACCESS_ONCE(ads->ctl11) = 0;
48912- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48913- ACCESS_ONCE(ads->ctl15) = 0;
48914- ACCESS_ONCE(ads->ctl16) = 0;
48915- ACCESS_ONCE(ads->ctl17) = ctl17;
48916- ACCESS_ONCE(ads->ctl18) = 0;
48917- ACCESS_ONCE(ads->ctl19) = 0;
48918+ ACCESS_ONCE_RW(ads->ctl11) = 0;
48919+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48920+ ACCESS_ONCE_RW(ads->ctl15) = 0;
48921+ ACCESS_ONCE_RW(ads->ctl16) = 0;
48922+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48923+ ACCESS_ONCE_RW(ads->ctl18) = 0;
48924+ ACCESS_ONCE_RW(ads->ctl19) = 0;
48925 return;
48926 }
48927
48928- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48929+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48930 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48931 | SM(i->txpower[0], AR_XmitPower0)
48932 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48933@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48934 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
48935 ctl12 |= SM(val, AR_PAPRDChainMask);
48936
48937- ACCESS_ONCE(ads->ctl12) = ctl12;
48938- ACCESS_ONCE(ads->ctl17) = ctl17;
48939+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
48940+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48941
48942- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48943+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48944 | set11nPktDurRTSCTS(i->rates, 1);
48945
48946- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48947+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48948 | set11nPktDurRTSCTS(i->rates, 3);
48949
48950- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
48951+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
48952 | set11nRateFlags(i->rates, 1)
48953 | set11nRateFlags(i->rates, 2)
48954 | set11nRateFlags(i->rates, 3)
48955 | SM(i->rtscts_rate, AR_RTSCTSRate);
48956
48957- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
48958+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
48959
48960- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48961- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48962- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48963+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48964+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48965+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48966 }
48967
48968 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
48969diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
48970index 1cbd335..27dfb40 100644
48971--- a/drivers/net/wireless/ath/ath9k/hw.h
48972+++ b/drivers/net/wireless/ath/ath9k/hw.h
48973@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
48974
48975 /* ANI */
48976 void (*ani_cache_ini_regs)(struct ath_hw *ah);
48977-};
48978+} __no_const;
48979
48980 /**
48981 * struct ath_spec_scan - parameters for Atheros spectral scan
48982@@ -716,7 +716,7 @@ struct ath_hw_ops {
48983 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
48984 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
48985 #endif
48986-};
48987+} __no_const;
48988
48989 struct ath_nf_limits {
48990 s16 max;
48991diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
48992index 62b0bf4..4ae094c 100644
48993--- a/drivers/net/wireless/ath/ath9k/main.c
48994+++ b/drivers/net/wireless/ath/ath9k/main.c
48995@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
48996 if (!ath9k_is_chanctx_enabled())
48997 return;
48998
48999- ath9k_ops.hw_scan = ath9k_hw_scan;
49000- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49001- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49002- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49003- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49004- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49005- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49006- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49007- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49008- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49009+ pax_open_kernel();
49010+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49011+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49012+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49013+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49014+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49015+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49016+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49017+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49018+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49019+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49020+ pax_close_kernel();
49021 }
49022
49023 #endif
49024diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49025index 058a9f2..d5cb1ba 100644
49026--- a/drivers/net/wireless/b43/phy_lp.c
49027+++ b/drivers/net/wireless/b43/phy_lp.c
49028@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49029 {
49030 struct ssb_bus *bus = dev->dev->sdev->bus;
49031
49032- static const struct b206x_channel *chandata = NULL;
49033+ const struct b206x_channel *chandata = NULL;
49034 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49035 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49036 u16 old_comm15, scale;
49037diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49038index dc1d20c..f7a4f06 100644
49039--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49040+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49041@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49042 */
49043 if (il3945_mod_params.disable_hw_scan) {
49044 D_INFO("Disabling hw_scan\n");
49045- il3945_mac_ops.hw_scan = NULL;
49046+ pax_open_kernel();
49047+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49048+ pax_close_kernel();
49049 }
49050
49051 D_INFO("*** LOAD DRIVER ***\n");
49052diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49053index 0ffb6ff..c0b7f0e 100644
49054--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49055+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49056@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49057 {
49058 struct iwl_priv *priv = file->private_data;
49059 char buf[64];
49060- int buf_size;
49061+ size_t buf_size;
49062 u32 offset, len;
49063
49064 memset(buf, 0, sizeof(buf));
49065@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49066 struct iwl_priv *priv = file->private_data;
49067
49068 char buf[8];
49069- int buf_size;
49070+ size_t buf_size;
49071 u32 reset_flag;
49072
49073 memset(buf, 0, sizeof(buf));
49074@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49075 {
49076 struct iwl_priv *priv = file->private_data;
49077 char buf[8];
49078- int buf_size;
49079+ size_t buf_size;
49080 int ht40;
49081
49082 memset(buf, 0, sizeof(buf));
49083@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49084 {
49085 struct iwl_priv *priv = file->private_data;
49086 char buf[8];
49087- int buf_size;
49088+ size_t buf_size;
49089 int value;
49090
49091 memset(buf, 0, sizeof(buf));
49092@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49093 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49094 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49095
49096-static const char *fmt_value = " %-30s %10u\n";
49097-static const char *fmt_hex = " %-30s 0x%02X\n";
49098-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49099-static const char *fmt_header =
49100+static const char fmt_value[] = " %-30s %10u\n";
49101+static const char fmt_hex[] = " %-30s 0x%02X\n";
49102+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49103+static const char fmt_header[] =
49104 "%-32s current cumulative delta max\n";
49105
49106 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49107@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49108 {
49109 struct iwl_priv *priv = file->private_data;
49110 char buf[8];
49111- int buf_size;
49112+ size_t buf_size;
49113 int clear;
49114
49115 memset(buf, 0, sizeof(buf));
49116@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49117 {
49118 struct iwl_priv *priv = file->private_data;
49119 char buf[8];
49120- int buf_size;
49121+ size_t buf_size;
49122 int trace;
49123
49124 memset(buf, 0, sizeof(buf));
49125@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49126 {
49127 struct iwl_priv *priv = file->private_data;
49128 char buf[8];
49129- int buf_size;
49130+ size_t buf_size;
49131 int missed;
49132
49133 memset(buf, 0, sizeof(buf));
49134@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49135
49136 struct iwl_priv *priv = file->private_data;
49137 char buf[8];
49138- int buf_size;
49139+ size_t buf_size;
49140 int plcp;
49141
49142 memset(buf, 0, sizeof(buf));
49143@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49144
49145 struct iwl_priv *priv = file->private_data;
49146 char buf[8];
49147- int buf_size;
49148+ size_t buf_size;
49149 int flush;
49150
49151 memset(buf, 0, sizeof(buf));
49152@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49153
49154 struct iwl_priv *priv = file->private_data;
49155 char buf[8];
49156- int buf_size;
49157+ size_t buf_size;
49158 int rts;
49159
49160 if (!priv->cfg->ht_params)
49161@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49162 {
49163 struct iwl_priv *priv = file->private_data;
49164 char buf[8];
49165- int buf_size;
49166+ size_t buf_size;
49167
49168 memset(buf, 0, sizeof(buf));
49169 buf_size = min(count, sizeof(buf) - 1);
49170@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49171 struct iwl_priv *priv = file->private_data;
49172 u32 event_log_flag;
49173 char buf[8];
49174- int buf_size;
49175+ size_t buf_size;
49176
49177 /* check that the interface is up */
49178 if (!iwl_is_ready(priv))
49179@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49180 struct iwl_priv *priv = file->private_data;
49181 char buf[8];
49182 u32 calib_disabled;
49183- int buf_size;
49184+ size_t buf_size;
49185
49186 memset(buf, 0, sizeof(buf));
49187 buf_size = min(count, sizeof(buf) - 1);
49188diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49189index 523fe0c..0d9473b 100644
49190--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49191+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49192@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49193 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49194
49195 char buf[8];
49196- int buf_size;
49197+ size_t buf_size;
49198 u32 reset_flag;
49199
49200 memset(buf, 0, sizeof(buf));
49201@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49202 {
49203 struct iwl_trans *trans = file->private_data;
49204 char buf[8];
49205- int buf_size;
49206+ size_t buf_size;
49207 int csr;
49208
49209 memset(buf, 0, sizeof(buf));
49210diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49211index ef58a88..fafa731 100644
49212--- a/drivers/net/wireless/mac80211_hwsim.c
49213+++ b/drivers/net/wireless/mac80211_hwsim.c
49214@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
49215 if (channels < 1)
49216 return -EINVAL;
49217
49218- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49219- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49220- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49221- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49222- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49223- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49224- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49225- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49226- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49227- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49228- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49229- mac80211_hwsim_assign_vif_chanctx;
49230- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49231- mac80211_hwsim_unassign_vif_chanctx;
49232+ pax_open_kernel();
49233+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49234+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49235+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49236+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49237+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49238+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49239+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49240+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49241+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49242+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49243+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49244+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49245+ pax_close_kernel();
49246
49247 spin_lock_init(&hwsim_radio_lock);
49248 INIT_LIST_HEAD(&hwsim_radios);
49249diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49250index 1a4facd..a2ecbbd 100644
49251--- a/drivers/net/wireless/rndis_wlan.c
49252+++ b/drivers/net/wireless/rndis_wlan.c
49253@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49254
49255 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49256
49257- if (rts_threshold < 0 || rts_threshold > 2347)
49258+ if (rts_threshold > 2347)
49259 rts_threshold = 2347;
49260
49261 tmp = cpu_to_le32(rts_threshold);
49262diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49263index 9bb398b..b0cc047 100644
49264--- a/drivers/net/wireless/rt2x00/rt2x00.h
49265+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49266@@ -375,7 +375,7 @@ struct rt2x00_intf {
49267 * for hardware which doesn't support hardware
49268 * sequence counting.
49269 */
49270- atomic_t seqno;
49271+ atomic_unchecked_t seqno;
49272 };
49273
49274 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49275diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49276index 66ff364..3ce34f7 100644
49277--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49278+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49279@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49280 * sequence counter given by mac80211.
49281 */
49282 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49283- seqno = atomic_add_return(0x10, &intf->seqno);
49284+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49285 else
49286- seqno = atomic_read(&intf->seqno);
49287+ seqno = atomic_read_unchecked(&intf->seqno);
49288
49289 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49290 hdr->seq_ctrl |= cpu_to_le16(seqno);
49291diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49292index b661f896..ddf7d2b 100644
49293--- a/drivers/net/wireless/ti/wl1251/sdio.c
49294+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49295@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49296
49297 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49298
49299- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49300- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49301+ pax_open_kernel();
49302+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49303+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49304+ pax_close_kernel();
49305
49306 wl1251_info("using dedicated interrupt line");
49307 } else {
49308- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49309- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49310+ pax_open_kernel();
49311+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49312+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49313+ pax_close_kernel();
49314
49315 wl1251_info("using SDIO interrupt");
49316 }
49317diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49318index d6d0d6d..60c23a0 100644
49319--- a/drivers/net/wireless/ti/wl12xx/main.c
49320+++ b/drivers/net/wireless/ti/wl12xx/main.c
49321@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49322 sizeof(wl->conf.mem));
49323
49324 /* read data preparation is only needed by wl127x */
49325- wl->ops->prepare_read = wl127x_prepare_read;
49326+ pax_open_kernel();
49327+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49328+ pax_close_kernel();
49329
49330 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49331 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49332@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49333 sizeof(wl->conf.mem));
49334
49335 /* read data preparation is only needed by wl127x */
49336- wl->ops->prepare_read = wl127x_prepare_read;
49337+ pax_open_kernel();
49338+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49339+ pax_close_kernel();
49340
49341 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49342 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49343diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49344index 8e56261..9140678 100644
49345--- a/drivers/net/wireless/ti/wl18xx/main.c
49346+++ b/drivers/net/wireless/ti/wl18xx/main.c
49347@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49348 }
49349
49350 if (!checksum_param) {
49351- wl18xx_ops.set_rx_csum = NULL;
49352- wl18xx_ops.init_vif = NULL;
49353+ pax_open_kernel();
49354+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49355+ *(void **)&wl18xx_ops.init_vif = NULL;
49356+ pax_close_kernel();
49357 }
49358
49359 /* Enable 11a Band only if we have 5G antennas */
49360diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49361index a912dc0..a8225ba 100644
49362--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49363+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49364@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49365 {
49366 struct zd_usb *usb = urb->context;
49367 struct zd_usb_interrupt *intr = &usb->intr;
49368- int len;
49369+ unsigned int len;
49370 u16 int_num;
49371
49372 ZD_ASSERT(in_interrupt());
49373diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49374index ce2e2cf..f81e500 100644
49375--- a/drivers/nfc/nfcwilink.c
49376+++ b/drivers/nfc/nfcwilink.c
49377@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49378
49379 static int nfcwilink_probe(struct platform_device *pdev)
49380 {
49381- static struct nfcwilink *drv;
49382+ struct nfcwilink *drv;
49383 int rc;
49384 __u32 protocols;
49385
49386diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49387index f2596c8..50d53af 100644
49388--- a/drivers/nfc/st21nfca/st21nfca.c
49389+++ b/drivers/nfc/st21nfca/st21nfca.c
49390@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49391 goto exit;
49392 }
49393
49394- gate = uid_skb->data;
49395+ memcpy(gate, uid_skb->data, uid_skb->len);
49396 *len = uid_skb->len;
49397 exit:
49398 kfree_skb(uid_skb);
49399diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49400index 5100742..6ad4e6d 100644
49401--- a/drivers/of/fdt.c
49402+++ b/drivers/of/fdt.c
49403@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49404 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49405 return 0;
49406 }
49407- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49408+ pax_open_kernel();
49409+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49410+ pax_close_kernel();
49411 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49412 }
49413 late_initcall(of_fdt_raw_init);
49414diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49415index d93b2b6..ae50401 100644
49416--- a/drivers/oprofile/buffer_sync.c
49417+++ b/drivers/oprofile/buffer_sync.c
49418@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49419 if (cookie == NO_COOKIE)
49420 offset = pc;
49421 if (cookie == INVALID_COOKIE) {
49422- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49423+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49424 offset = pc;
49425 }
49426 if (cookie != last_cookie) {
49427@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49428 /* add userspace sample */
49429
49430 if (!mm) {
49431- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49432+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49433 return 0;
49434 }
49435
49436 cookie = lookup_dcookie(mm, s->eip, &offset);
49437
49438 if (cookie == INVALID_COOKIE) {
49439- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49440+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49441 return 0;
49442 }
49443
49444@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49445 /* ignore backtraces if failed to add a sample */
49446 if (state == sb_bt_start) {
49447 state = sb_bt_ignore;
49448- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49449+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49450 }
49451 }
49452 release_mm(mm);
49453diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49454index c0cc4e7..44d4e54 100644
49455--- a/drivers/oprofile/event_buffer.c
49456+++ b/drivers/oprofile/event_buffer.c
49457@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49458 }
49459
49460 if (buffer_pos == buffer_size) {
49461- atomic_inc(&oprofile_stats.event_lost_overflow);
49462+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49463 return;
49464 }
49465
49466diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49467index ed2c3ec..deda85a 100644
49468--- a/drivers/oprofile/oprof.c
49469+++ b/drivers/oprofile/oprof.c
49470@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49471 if (oprofile_ops.switch_events())
49472 return;
49473
49474- atomic_inc(&oprofile_stats.multiplex_counter);
49475+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49476 start_switch_worker();
49477 }
49478
49479diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49480index ee2cfce..7f8f699 100644
49481--- a/drivers/oprofile/oprofile_files.c
49482+++ b/drivers/oprofile/oprofile_files.c
49483@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49484
49485 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49486
49487-static ssize_t timeout_read(struct file *file, char __user *buf,
49488+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49489 size_t count, loff_t *offset)
49490 {
49491 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49492diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49493index 59659ce..6c860a0 100644
49494--- a/drivers/oprofile/oprofile_stats.c
49495+++ b/drivers/oprofile/oprofile_stats.c
49496@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49497 cpu_buf->sample_invalid_eip = 0;
49498 }
49499
49500- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49501- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49502- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49503- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49504- atomic_set(&oprofile_stats.multiplex_counter, 0);
49505+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49506+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49507+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49508+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49509+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49510 }
49511
49512
49513diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49514index 1fc622b..8c48fc3 100644
49515--- a/drivers/oprofile/oprofile_stats.h
49516+++ b/drivers/oprofile/oprofile_stats.h
49517@@ -13,11 +13,11 @@
49518 #include <linux/atomic.h>
49519
49520 struct oprofile_stat_struct {
49521- atomic_t sample_lost_no_mm;
49522- atomic_t sample_lost_no_mapping;
49523- atomic_t bt_lost_no_mapping;
49524- atomic_t event_lost_overflow;
49525- atomic_t multiplex_counter;
49526+ atomic_unchecked_t sample_lost_no_mm;
49527+ atomic_unchecked_t sample_lost_no_mapping;
49528+ atomic_unchecked_t bt_lost_no_mapping;
49529+ atomic_unchecked_t event_lost_overflow;
49530+ atomic_unchecked_t multiplex_counter;
49531 };
49532
49533 extern struct oprofile_stat_struct oprofile_stats;
49534diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49535index 3f49345..c750d0b 100644
49536--- a/drivers/oprofile/oprofilefs.c
49537+++ b/drivers/oprofile/oprofilefs.c
49538@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49539
49540 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49541 {
49542- atomic_t *val = file->private_data;
49543- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49544+ atomic_unchecked_t *val = file->private_data;
49545+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49546 }
49547
49548
49549@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49550
49551
49552 int oprofilefs_create_ro_atomic(struct dentry *root,
49553- char const *name, atomic_t *val)
49554+ char const *name, atomic_unchecked_t *val)
49555 {
49556 return __oprofilefs_create_file(root, name,
49557 &atomic_ro_fops, 0444, val);
49558diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49559index bdef916..88c7dee 100644
49560--- a/drivers/oprofile/timer_int.c
49561+++ b/drivers/oprofile/timer_int.c
49562@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49563 return NOTIFY_OK;
49564 }
49565
49566-static struct notifier_block __refdata oprofile_cpu_notifier = {
49567+static struct notifier_block oprofile_cpu_notifier = {
49568 .notifier_call = oprofile_cpu_notify,
49569 };
49570
49571diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49572index 3b47080..6cd05dd 100644
49573--- a/drivers/parport/procfs.c
49574+++ b/drivers/parport/procfs.c
49575@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49576
49577 *ppos += len;
49578
49579- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49580+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49581 }
49582
49583 #ifdef CONFIG_PARPORT_1284
49584@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49585
49586 *ppos += len;
49587
49588- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49589+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49590 }
49591 #endif /* IEEE1284.3 support. */
49592
49593diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49594index 6ca2399..68d866b 100644
49595--- a/drivers/pci/hotplug/acpiphp_ibm.c
49596+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49597@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49598 goto init_cleanup;
49599 }
49600
49601- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49602+ pax_open_kernel();
49603+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49604+ pax_close_kernel();
49605 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49606
49607 return retval;
49608diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49609index 66b7bbe..26bee78 100644
49610--- a/drivers/pci/hotplug/cpcihp_generic.c
49611+++ b/drivers/pci/hotplug/cpcihp_generic.c
49612@@ -73,7 +73,6 @@ static u16 port;
49613 static unsigned int enum_bit;
49614 static u8 enum_mask;
49615
49616-static struct cpci_hp_controller_ops generic_hpc_ops;
49617 static struct cpci_hp_controller generic_hpc;
49618
49619 static int __init validate_parameters(void)
49620@@ -139,6 +138,10 @@ static int query_enum(void)
49621 return ((value & enum_mask) == enum_mask);
49622 }
49623
49624+static struct cpci_hp_controller_ops generic_hpc_ops = {
49625+ .query_enum = query_enum,
49626+};
49627+
49628 static int __init cpcihp_generic_init(void)
49629 {
49630 int status;
49631@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49632 pci_dev_put(dev);
49633
49634 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49635- generic_hpc_ops.query_enum = query_enum;
49636 generic_hpc.ops = &generic_hpc_ops;
49637
49638 status = cpci_hp_register_controller(&generic_hpc);
49639diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49640index 7ecf34e..effed62 100644
49641--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49642+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49643@@ -59,7 +59,6 @@
49644 /* local variables */
49645 static bool debug;
49646 static bool poll;
49647-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49648 static struct cpci_hp_controller zt5550_hpc;
49649
49650 /* Primary cPCI bus bridge device */
49651@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49652 return 0;
49653 }
49654
49655+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49656+ .query_enum = zt5550_hc_query_enum,
49657+};
49658+
49659 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49660 {
49661 int status;
49662@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49663 dbg("returned from zt5550_hc_config");
49664
49665 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49666- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49667 zt5550_hpc.ops = &zt5550_hpc_ops;
49668 if (!poll) {
49669 zt5550_hpc.irq = hc_dev->irq;
49670 zt5550_hpc.irq_flags = IRQF_SHARED;
49671 zt5550_hpc.dev_id = hc_dev;
49672
49673- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49674- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49675- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49676+ pax_open_kernel();
49677+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49678+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49679+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49680+ pax_open_kernel();
49681 } else {
49682 info("using ENUM# polling mode");
49683 }
49684diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49685index 1e08ff8c..3cd145f 100644
49686--- a/drivers/pci/hotplug/cpqphp_nvram.c
49687+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49688@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49689
49690 void compaq_nvram_init (void __iomem *rom_start)
49691 {
49692+#ifndef CONFIG_PAX_KERNEXEC
49693 if (rom_start)
49694 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49695+#endif
49696
49697 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49698
49699diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49700index 56d8486..f26113f 100644
49701--- a/drivers/pci/hotplug/pci_hotplug_core.c
49702+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49703@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49704 return -EINVAL;
49705 }
49706
49707- slot->ops->owner = owner;
49708- slot->ops->mod_name = mod_name;
49709+ pax_open_kernel();
49710+ *(struct module **)&slot->ops->owner = owner;
49711+ *(const char **)&slot->ops->mod_name = mod_name;
49712+ pax_close_kernel();
49713
49714 mutex_lock(&pci_hp_mutex);
49715 /*
49716diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49717index 07aa722..84514b4 100644
49718--- a/drivers/pci/hotplug/pciehp_core.c
49719+++ b/drivers/pci/hotplug/pciehp_core.c
49720@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49721 struct slot *slot = ctrl->slot;
49722 struct hotplug_slot *hotplug = NULL;
49723 struct hotplug_slot_info *info = NULL;
49724- struct hotplug_slot_ops *ops = NULL;
49725+ hotplug_slot_ops_no_const *ops = NULL;
49726 char name[SLOT_NAME_SIZE];
49727 int retval = -ENOMEM;
49728
49729diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49730index fd60806..ab6c565 100644
49731--- a/drivers/pci/msi.c
49732+++ b/drivers/pci/msi.c
49733@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49734 {
49735 struct attribute **msi_attrs;
49736 struct attribute *msi_attr;
49737- struct device_attribute *msi_dev_attr;
49738- struct attribute_group *msi_irq_group;
49739+ device_attribute_no_const *msi_dev_attr;
49740+ attribute_group_no_const *msi_irq_group;
49741 const struct attribute_group **msi_irq_groups;
49742 struct msi_desc *entry;
49743 int ret = -ENOMEM;
49744@@ -573,7 +573,7 @@ error_attrs:
49745 count = 0;
49746 msi_attr = msi_attrs[count];
49747 while (msi_attr) {
49748- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49749+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49750 kfree(msi_attr->name);
49751 kfree(msi_dev_attr);
49752 ++count;
49753diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49754index aa012fb..63fac5d 100644
49755--- a/drivers/pci/pci-sysfs.c
49756+++ b/drivers/pci/pci-sysfs.c
49757@@ -1139,7 +1139,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49758 {
49759 /* allocate attribute structure, piggyback attribute name */
49760 int name_len = write_combine ? 13 : 10;
49761- struct bin_attribute *res_attr;
49762+ bin_attribute_no_const *res_attr;
49763 int retval;
49764
49765 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49766@@ -1316,7 +1316,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49767 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49768 {
49769 int retval;
49770- struct bin_attribute *attr;
49771+ bin_attribute_no_const *attr;
49772
49773 /* If the device has VPD, try to expose it in sysfs. */
49774 if (dev->vpd) {
49775@@ -1363,7 +1363,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49776 {
49777 int retval;
49778 int rom_size = 0;
49779- struct bin_attribute *attr;
49780+ bin_attribute_no_const *attr;
49781
49782 if (!sysfs_initialized)
49783 return -EACCES;
49784diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49785index d54632a..198c84d 100644
49786--- a/drivers/pci/pci.h
49787+++ b/drivers/pci/pci.h
49788@@ -93,7 +93,7 @@ struct pci_vpd_ops {
49789 struct pci_vpd {
49790 unsigned int len;
49791 const struct pci_vpd_ops *ops;
49792- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49793+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49794 };
49795
49796 int pci_vpd_pci22_init(struct pci_dev *dev);
49797diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49798index e1e7026..d28dd33 100644
49799--- a/drivers/pci/pcie/aspm.c
49800+++ b/drivers/pci/pcie/aspm.c
49801@@ -27,9 +27,9 @@
49802 #define MODULE_PARAM_PREFIX "pcie_aspm."
49803
49804 /* Note: those are not register definitions */
49805-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49806-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49807-#define ASPM_STATE_L1 (4) /* L1 state */
49808+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49809+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49810+#define ASPM_STATE_L1 (4U) /* L1 state */
49811 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49812 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
49813
49814diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
49815index 23212f8..65e945b 100644
49816--- a/drivers/pci/probe.c
49817+++ b/drivers/pci/probe.c
49818@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
49819 u16 orig_cmd;
49820 struct pci_bus_region region, inverted_region;
49821
49822- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
49823+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
49824
49825 /* No printks while decoding is disabled! */
49826 if (!dev->mmio_always_on) {
49827diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
49828index 3f155e7..0f4b1f0 100644
49829--- a/drivers/pci/proc.c
49830+++ b/drivers/pci/proc.c
49831@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
49832 static int __init pci_proc_init(void)
49833 {
49834 struct pci_dev *dev = NULL;
49835+
49836+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49837+#ifdef CONFIG_GRKERNSEC_PROC_USER
49838+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
49839+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49840+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49841+#endif
49842+#else
49843 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
49844+#endif
49845 proc_create("devices", 0, proc_bus_pci_dir,
49846 &proc_bus_pci_dev_operations);
49847 proc_initialized = 1;
49848diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
49849index b84fdd6..b89d829 100644
49850--- a/drivers/platform/chrome/chromeos_laptop.c
49851+++ b/drivers/platform/chrome/chromeos_laptop.c
49852@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
49853 .callback = chromeos_laptop_dmi_matched, \
49854 .driver_data = (void *)&board_
49855
49856-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
49857+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
49858 {
49859 .ident = "Samsung Series 5 550",
49860 .matches = {
49861diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
49862index 1e1e594..8fe59c5 100644
49863--- a/drivers/platform/x86/alienware-wmi.c
49864+++ b/drivers/platform/x86/alienware-wmi.c
49865@@ -150,7 +150,7 @@ struct wmax_led_args {
49866 } __packed;
49867
49868 static struct platform_device *platform_device;
49869-static struct device_attribute *zone_dev_attrs;
49870+static device_attribute_no_const *zone_dev_attrs;
49871 static struct attribute **zone_attrs;
49872 static struct platform_zone *zone_data;
49873
49874@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
49875 }
49876 };
49877
49878-static struct attribute_group zone_attribute_group = {
49879+static attribute_group_no_const zone_attribute_group = {
49880 .name = "rgb_zones",
49881 };
49882
49883diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
49884index 7543a56..367ca8ed 100644
49885--- a/drivers/platform/x86/asus-wmi.c
49886+++ b/drivers/platform/x86/asus-wmi.c
49887@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
49888 int err;
49889 u32 retval = -1;
49890
49891+#ifdef CONFIG_GRKERNSEC_KMEM
49892+ return -EPERM;
49893+#endif
49894+
49895 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
49896
49897 if (err < 0)
49898@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
49899 int err;
49900 u32 retval = -1;
49901
49902+#ifdef CONFIG_GRKERNSEC_KMEM
49903+ return -EPERM;
49904+#endif
49905+
49906 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
49907 &retval);
49908
49909@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
49910 union acpi_object *obj;
49911 acpi_status status;
49912
49913+#ifdef CONFIG_GRKERNSEC_KMEM
49914+ return -EPERM;
49915+#endif
49916+
49917 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
49918 1, asus->debug.method_id,
49919 &input, &output);
49920diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
49921index 0859877..1cf7d08 100644
49922--- a/drivers/platform/x86/msi-laptop.c
49923+++ b/drivers/platform/x86/msi-laptop.c
49924@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
49925
49926 if (!quirks->ec_read_only) {
49927 /* allow userland write sysfs file */
49928- dev_attr_bluetooth.store = store_bluetooth;
49929- dev_attr_wlan.store = store_wlan;
49930- dev_attr_threeg.store = store_threeg;
49931- dev_attr_bluetooth.attr.mode |= S_IWUSR;
49932- dev_attr_wlan.attr.mode |= S_IWUSR;
49933- dev_attr_threeg.attr.mode |= S_IWUSR;
49934+ pax_open_kernel();
49935+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
49936+ *(void **)&dev_attr_wlan.store = store_wlan;
49937+ *(void **)&dev_attr_threeg.store = store_threeg;
49938+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
49939+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
49940+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
49941+ pax_close_kernel();
49942 }
49943
49944 /* disable hardware control by fn key */
49945diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
49946index 6d2bac0..ec2b029 100644
49947--- a/drivers/platform/x86/msi-wmi.c
49948+++ b/drivers/platform/x86/msi-wmi.c
49949@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
49950 static void msi_wmi_notify(u32 value, void *context)
49951 {
49952 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
49953- static struct key_entry *key;
49954+ struct key_entry *key;
49955 union acpi_object *obj;
49956 acpi_status status;
49957
49958diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
49959index 6dd1c0e..5d602c7 100644
49960--- a/drivers/platform/x86/sony-laptop.c
49961+++ b/drivers/platform/x86/sony-laptop.c
49962@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
49963 }
49964
49965 /* High speed charging function */
49966-static struct device_attribute *hsc_handle;
49967+static device_attribute_no_const *hsc_handle;
49968
49969 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
49970 struct device_attribute *attr,
49971@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
49972 }
49973
49974 /* low battery function */
49975-static struct device_attribute *lowbatt_handle;
49976+static device_attribute_no_const *lowbatt_handle;
49977
49978 static ssize_t sony_nc_lowbatt_store(struct device *dev,
49979 struct device_attribute *attr,
49980@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
49981 }
49982
49983 /* fan speed function */
49984-static struct device_attribute *fan_handle, *hsf_handle;
49985+static device_attribute_no_const *fan_handle, *hsf_handle;
49986
49987 static ssize_t sony_nc_hsfan_store(struct device *dev,
49988 struct device_attribute *attr,
49989@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
49990 }
49991
49992 /* USB charge function */
49993-static struct device_attribute *uc_handle;
49994+static device_attribute_no_const *uc_handle;
49995
49996 static ssize_t sony_nc_usb_charge_store(struct device *dev,
49997 struct device_attribute *attr,
49998@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
49999 }
50000
50001 /* Panel ID function */
50002-static struct device_attribute *panel_handle;
50003+static device_attribute_no_const *panel_handle;
50004
50005 static ssize_t sony_nc_panelid_show(struct device *dev,
50006 struct device_attribute *attr, char *buffer)
50007@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50008 }
50009
50010 /* smart connect function */
50011-static struct device_attribute *sc_handle;
50012+static device_attribute_no_const *sc_handle;
50013
50014 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50015 struct device_attribute *attr,
50016diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50017index c3d11fa..f83cded 100644
50018--- a/drivers/platform/x86/thinkpad_acpi.c
50019+++ b/drivers/platform/x86/thinkpad_acpi.c
50020@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
50021 return 0;
50022 }
50023
50024-void static hotkey_mask_warn_incomplete_mask(void)
50025+static void hotkey_mask_warn_incomplete_mask(void)
50026 {
50027 /* log only what the user can fix... */
50028 const u32 wantedmask = hotkey_driver_mask &
50029@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50030 && !tp_features.bright_unkfw)
50031 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50032 }
50033+}
50034
50035 #undef TPACPI_COMPARE_KEY
50036 #undef TPACPI_MAY_SEND_KEY
50037-}
50038
50039 /*
50040 * Polling driver
50041diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50042index 438d4c7..ca8a2fb 100644
50043--- a/drivers/pnp/pnpbios/bioscalls.c
50044+++ b/drivers/pnp/pnpbios/bioscalls.c
50045@@ -59,7 +59,7 @@ do { \
50046 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50047 } while(0)
50048
50049-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50050+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50051 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50052
50053 /*
50054@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50055
50056 cpu = get_cpu();
50057 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50058+
50059+ pax_open_kernel();
50060 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50061+ pax_close_kernel();
50062
50063 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50064 spin_lock_irqsave(&pnp_bios_lock, flags);
50065@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50066 :"memory");
50067 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50068
50069+ pax_open_kernel();
50070 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50071+ pax_close_kernel();
50072+
50073 put_cpu();
50074
50075 /* If we get here and this is set then the PnP BIOS faulted on us. */
50076@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50077 return status;
50078 }
50079
50080-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50081+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50082 {
50083 int i;
50084
50085@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50086 pnp_bios_callpoint.offset = header->fields.pm16offset;
50087 pnp_bios_callpoint.segment = PNP_CS16;
50088
50089+ pax_open_kernel();
50090+
50091 for_each_possible_cpu(i) {
50092 struct desc_struct *gdt = get_cpu_gdt_table(i);
50093 if (!gdt)
50094@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50095 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50096 (unsigned long)__va(header->fields.pm16dseg));
50097 }
50098+
50099+ pax_close_kernel();
50100 }
50101diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50102index 0c52e2a..3421ab7 100644
50103--- a/drivers/power/pda_power.c
50104+++ b/drivers/power/pda_power.c
50105@@ -37,7 +37,11 @@ static int polling;
50106
50107 #if IS_ENABLED(CONFIG_USB_PHY)
50108 static struct usb_phy *transceiver;
50109-static struct notifier_block otg_nb;
50110+static int otg_handle_notification(struct notifier_block *nb,
50111+ unsigned long event, void *unused);
50112+static struct notifier_block otg_nb = {
50113+ .notifier_call = otg_handle_notification
50114+};
50115 #endif
50116
50117 static struct regulator *ac_draw;
50118@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50119
50120 #if IS_ENABLED(CONFIG_USB_PHY)
50121 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50122- otg_nb.notifier_call = otg_handle_notification;
50123 ret = usb_register_notifier(transceiver, &otg_nb);
50124 if (ret) {
50125 dev_err(dev, "failure to register otg notifier\n");
50126diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50127index cc439fd..8fa30df 100644
50128--- a/drivers/power/power_supply.h
50129+++ b/drivers/power/power_supply.h
50130@@ -16,12 +16,12 @@ struct power_supply;
50131
50132 #ifdef CONFIG_SYSFS
50133
50134-extern void power_supply_init_attrs(struct device_type *dev_type);
50135+extern void power_supply_init_attrs(void);
50136 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50137
50138 #else
50139
50140-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50141+static inline void power_supply_init_attrs(void) {}
50142 #define power_supply_uevent NULL
50143
50144 #endif /* CONFIG_SYSFS */
50145diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50146index 694e8cd..9f03483 100644
50147--- a/drivers/power/power_supply_core.c
50148+++ b/drivers/power/power_supply_core.c
50149@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50150 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50151 EXPORT_SYMBOL_GPL(power_supply_notifier);
50152
50153-static struct device_type power_supply_dev_type;
50154+extern const struct attribute_group *power_supply_attr_groups[];
50155+static struct device_type power_supply_dev_type = {
50156+ .groups = power_supply_attr_groups,
50157+};
50158
50159 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50160 struct power_supply *supply)
50161@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50162 return PTR_ERR(power_supply_class);
50163
50164 power_supply_class->dev_uevent = power_supply_uevent;
50165- power_supply_init_attrs(&power_supply_dev_type);
50166+ power_supply_init_attrs();
50167
50168 return 0;
50169 }
50170diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50171index 62653f5..d0bb485 100644
50172--- a/drivers/power/power_supply_sysfs.c
50173+++ b/drivers/power/power_supply_sysfs.c
50174@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50175 .is_visible = power_supply_attr_is_visible,
50176 };
50177
50178-static const struct attribute_group *power_supply_attr_groups[] = {
50179+const struct attribute_group *power_supply_attr_groups[] = {
50180 &power_supply_attr_group,
50181 NULL,
50182 };
50183
50184-void power_supply_init_attrs(struct device_type *dev_type)
50185+void power_supply_init_attrs(void)
50186 {
50187 int i;
50188
50189- dev_type->groups = power_supply_attr_groups;
50190-
50191 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50192 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50193 }
50194diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50195index 84419af..268ede8 100644
50196--- a/drivers/powercap/powercap_sys.c
50197+++ b/drivers/powercap/powercap_sys.c
50198@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50199 struct device_attribute name_attr;
50200 };
50201
50202+static ssize_t show_constraint_name(struct device *dev,
50203+ struct device_attribute *dev_attr,
50204+ char *buf);
50205+
50206 static struct powercap_constraint_attr
50207- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50208+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50209+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50210+ .power_limit_attr = {
50211+ .attr = {
50212+ .name = NULL,
50213+ .mode = S_IWUSR | S_IRUGO
50214+ },
50215+ .show = show_constraint_power_limit_uw,
50216+ .store = store_constraint_power_limit_uw
50217+ },
50218+
50219+ .time_window_attr = {
50220+ .attr = {
50221+ .name = NULL,
50222+ .mode = S_IWUSR | S_IRUGO
50223+ },
50224+ .show = show_constraint_time_window_us,
50225+ .store = store_constraint_time_window_us
50226+ },
50227+
50228+ .max_power_attr = {
50229+ .attr = {
50230+ .name = NULL,
50231+ .mode = S_IRUGO
50232+ },
50233+ .show = show_constraint_max_power_uw,
50234+ .store = NULL
50235+ },
50236+
50237+ .min_power_attr = {
50238+ .attr = {
50239+ .name = NULL,
50240+ .mode = S_IRUGO
50241+ },
50242+ .show = show_constraint_min_power_uw,
50243+ .store = NULL
50244+ },
50245+
50246+ .max_time_window_attr = {
50247+ .attr = {
50248+ .name = NULL,
50249+ .mode = S_IRUGO
50250+ },
50251+ .show = show_constraint_max_time_window_us,
50252+ .store = NULL
50253+ },
50254+
50255+ .min_time_window_attr = {
50256+ .attr = {
50257+ .name = NULL,
50258+ .mode = S_IRUGO
50259+ },
50260+ .show = show_constraint_min_time_window_us,
50261+ .store = NULL
50262+ },
50263+
50264+ .name_attr = {
50265+ .attr = {
50266+ .name = NULL,
50267+ .mode = S_IRUGO
50268+ },
50269+ .show = show_constraint_name,
50270+ .store = NULL
50271+ }
50272+ }
50273+};
50274
50275 /* A list of powercap control_types */
50276 static LIST_HEAD(powercap_cntrl_list);
50277@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50278 }
50279
50280 static int create_constraint_attribute(int id, const char *name,
50281- int mode,
50282- struct device_attribute *dev_attr,
50283- ssize_t (*show)(struct device *,
50284- struct device_attribute *, char *),
50285- ssize_t (*store)(struct device *,
50286- struct device_attribute *,
50287- const char *, size_t)
50288- )
50289+ struct device_attribute *dev_attr)
50290 {
50291+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50292
50293- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50294- id, name);
50295- if (!dev_attr->attr.name)
50296+ if (!name)
50297 return -ENOMEM;
50298- dev_attr->attr.mode = mode;
50299- dev_attr->show = show;
50300- dev_attr->store = store;
50301+
50302+ pax_open_kernel();
50303+ *(const char **)&dev_attr->attr.name = name;
50304+ pax_close_kernel();
50305
50306 return 0;
50307 }
50308@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50309
50310 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50311 ret = create_constraint_attribute(i, "power_limit_uw",
50312- S_IWUSR | S_IRUGO,
50313- &constraint_attrs[i].power_limit_attr,
50314- show_constraint_power_limit_uw,
50315- store_constraint_power_limit_uw);
50316+ &constraint_attrs[i].power_limit_attr);
50317 if (ret)
50318 goto err_alloc;
50319 ret = create_constraint_attribute(i, "time_window_us",
50320- S_IWUSR | S_IRUGO,
50321- &constraint_attrs[i].time_window_attr,
50322- show_constraint_time_window_us,
50323- store_constraint_time_window_us);
50324+ &constraint_attrs[i].time_window_attr);
50325 if (ret)
50326 goto err_alloc;
50327- ret = create_constraint_attribute(i, "name", S_IRUGO,
50328- &constraint_attrs[i].name_attr,
50329- show_constraint_name,
50330- NULL);
50331+ ret = create_constraint_attribute(i, "name",
50332+ &constraint_attrs[i].name_attr);
50333 if (ret)
50334 goto err_alloc;
50335- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50336- &constraint_attrs[i].max_power_attr,
50337- show_constraint_max_power_uw,
50338- NULL);
50339+ ret = create_constraint_attribute(i, "max_power_uw",
50340+ &constraint_attrs[i].max_power_attr);
50341 if (ret)
50342 goto err_alloc;
50343- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50344- &constraint_attrs[i].min_power_attr,
50345- show_constraint_min_power_uw,
50346- NULL);
50347+ ret = create_constraint_attribute(i, "min_power_uw",
50348+ &constraint_attrs[i].min_power_attr);
50349 if (ret)
50350 goto err_alloc;
50351 ret = create_constraint_attribute(i, "max_time_window_us",
50352- S_IRUGO,
50353- &constraint_attrs[i].max_time_window_attr,
50354- show_constraint_max_time_window_us,
50355- NULL);
50356+ &constraint_attrs[i].max_time_window_attr);
50357 if (ret)
50358 goto err_alloc;
50359 ret = create_constraint_attribute(i, "min_time_window_us",
50360- S_IRUGO,
50361- &constraint_attrs[i].min_time_window_attr,
50362- show_constraint_min_time_window_us,
50363- NULL);
50364+ &constraint_attrs[i].min_time_window_attr);
50365 if (ret)
50366 goto err_alloc;
50367
50368@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50369 power_zone->zone_dev_attrs[count++] =
50370 &dev_attr_max_energy_range_uj.attr;
50371 if (power_zone->ops->get_energy_uj) {
50372+ pax_open_kernel();
50373 if (power_zone->ops->reset_energy_uj)
50374- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50375+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50376 else
50377- dev_attr_energy_uj.attr.mode = S_IRUGO;
50378+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50379+ pax_close_kernel();
50380 power_zone->zone_dev_attrs[count++] =
50381 &dev_attr_energy_uj.attr;
50382 }
50383diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50384index 9c5d414..c7900ce 100644
50385--- a/drivers/ptp/ptp_private.h
50386+++ b/drivers/ptp/ptp_private.h
50387@@ -51,7 +51,7 @@ struct ptp_clock {
50388 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50389 wait_queue_head_t tsev_wq;
50390 int defunct; /* tells readers to go away when clock is being removed */
50391- struct device_attribute *pin_dev_attr;
50392+ device_attribute_no_const *pin_dev_attr;
50393 struct attribute **pin_attr;
50394 struct attribute_group pin_attr_group;
50395 };
50396diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50397index 302e626..12579af 100644
50398--- a/drivers/ptp/ptp_sysfs.c
50399+++ b/drivers/ptp/ptp_sysfs.c
50400@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50401 goto no_pin_attr;
50402
50403 for (i = 0; i < n_pins; i++) {
50404- struct device_attribute *da = &ptp->pin_dev_attr[i];
50405+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50406 sysfs_attr_init(&da->attr);
50407 da->attr.name = info->pin_config[i].name;
50408 da->attr.mode = 0644;
50409diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50410index 9c48fb3..5b494fa 100644
50411--- a/drivers/regulator/core.c
50412+++ b/drivers/regulator/core.c
50413@@ -3587,7 +3587,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50414 {
50415 const struct regulation_constraints *constraints = NULL;
50416 const struct regulator_init_data *init_data;
50417- static atomic_t regulator_no = ATOMIC_INIT(0);
50418+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50419 struct regulator_dev *rdev;
50420 struct device *dev;
50421 int ret, i;
50422@@ -3661,7 +3661,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50423 rdev->dev.class = &regulator_class;
50424 rdev->dev.parent = dev;
50425 dev_set_name(&rdev->dev, "regulator.%d",
50426- atomic_inc_return(&regulator_no) - 1);
50427+ atomic_inc_return_unchecked(&regulator_no) - 1);
50428 ret = device_register(&rdev->dev);
50429 if (ret != 0) {
50430 put_device(&rdev->dev);
50431diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50432index 7eee2ca..4024513 100644
50433--- a/drivers/regulator/max8660.c
50434+++ b/drivers/regulator/max8660.c
50435@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50436 max8660->shadow_regs[MAX8660_OVER1] = 5;
50437 } else {
50438 /* Otherwise devices can be toggled via software */
50439- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50440- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50441+ pax_open_kernel();
50442+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50443+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50444+ pax_close_kernel();
50445 }
50446
50447 /*
50448diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50449index c3d55c2..0dddfe6 100644
50450--- a/drivers/regulator/max8973-regulator.c
50451+++ b/drivers/regulator/max8973-regulator.c
50452@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50453 if (!pdata || !pdata->enable_ext_control) {
50454 max->desc.enable_reg = MAX8973_VOUT;
50455 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50456- max->ops.enable = regulator_enable_regmap;
50457- max->ops.disable = regulator_disable_regmap;
50458- max->ops.is_enabled = regulator_is_enabled_regmap;
50459+ pax_open_kernel();
50460+ *(void **)&max->ops.enable = regulator_enable_regmap;
50461+ *(void **)&max->ops.disable = regulator_disable_regmap;
50462+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50463+ pax_close_kernel();
50464 }
50465
50466 if (pdata) {
50467diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50468index 0d17c92..a29f627 100644
50469--- a/drivers/regulator/mc13892-regulator.c
50470+++ b/drivers/regulator/mc13892-regulator.c
50471@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50472 mc13xxx_unlock(mc13892);
50473
50474 /* update mc13892_vcam ops */
50475- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50476+ pax_open_kernel();
50477+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50478 sizeof(struct regulator_ops));
50479- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50480- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50481+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50482+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50483+ pax_close_kernel();
50484 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50485
50486 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50487diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50488index 5b2e761..c8c8a4a 100644
50489--- a/drivers/rtc/rtc-cmos.c
50490+++ b/drivers/rtc/rtc-cmos.c
50491@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50492 hpet_rtc_timer_init();
50493
50494 /* export at least the first block of NVRAM */
50495- nvram.size = address_space - NVRAM_OFFSET;
50496+ pax_open_kernel();
50497+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50498+ pax_close_kernel();
50499 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50500 if (retval < 0) {
50501 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50502diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50503index d049393..bb20be0 100644
50504--- a/drivers/rtc/rtc-dev.c
50505+++ b/drivers/rtc/rtc-dev.c
50506@@ -16,6 +16,7 @@
50507 #include <linux/module.h>
50508 #include <linux/rtc.h>
50509 #include <linux/sched.h>
50510+#include <linux/grsecurity.h>
50511 #include "rtc-core.h"
50512
50513 static dev_t rtc_devt;
50514@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50515 if (copy_from_user(&tm, uarg, sizeof(tm)))
50516 return -EFAULT;
50517
50518+ gr_log_timechange();
50519+
50520 return rtc_set_time(rtc, &tm);
50521
50522 case RTC_PIE_ON:
50523diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50524index 4ffabb3..1f87fca 100644
50525--- a/drivers/rtc/rtc-ds1307.c
50526+++ b/drivers/rtc/rtc-ds1307.c
50527@@ -107,7 +107,7 @@ struct ds1307 {
50528 u8 offset; /* register's offset */
50529 u8 regs[11];
50530 u16 nvram_offset;
50531- struct bin_attribute *nvram;
50532+ bin_attribute_no_const *nvram;
50533 enum ds_type type;
50534 unsigned long flags;
50535 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50536diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50537index 90abb5b..e0bf6dd 100644
50538--- a/drivers/rtc/rtc-m48t59.c
50539+++ b/drivers/rtc/rtc-m48t59.c
50540@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50541 if (IS_ERR(m48t59->rtc))
50542 return PTR_ERR(m48t59->rtc);
50543
50544- m48t59_nvram_attr.size = pdata->offset;
50545+ pax_open_kernel();
50546+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50547+ pax_close_kernel();
50548
50549 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50550 if (ret)
50551diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50552index e693af6..2e525b6 100644
50553--- a/drivers/scsi/bfa/bfa_fcpim.h
50554+++ b/drivers/scsi/bfa/bfa_fcpim.h
50555@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50556
50557 struct bfa_itn_s {
50558 bfa_isr_func_t isr;
50559-};
50560+} __no_const;
50561
50562 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50563 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50564diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50565index 0f19455..ef7adb5 100644
50566--- a/drivers/scsi/bfa/bfa_fcs.c
50567+++ b/drivers/scsi/bfa/bfa_fcs.c
50568@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50569 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50570
50571 static struct bfa_fcs_mod_s fcs_modules[] = {
50572- { bfa_fcs_port_attach, NULL, NULL },
50573- { bfa_fcs_uf_attach, NULL, NULL },
50574- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50575- bfa_fcs_fabric_modexit },
50576+ {
50577+ .attach = bfa_fcs_port_attach,
50578+ .modinit = NULL,
50579+ .modexit = NULL
50580+ },
50581+ {
50582+ .attach = bfa_fcs_uf_attach,
50583+ .modinit = NULL,
50584+ .modexit = NULL
50585+ },
50586+ {
50587+ .attach = bfa_fcs_fabric_attach,
50588+ .modinit = bfa_fcs_fabric_modinit,
50589+ .modexit = bfa_fcs_fabric_modexit
50590+ },
50591 };
50592
50593 /*
50594diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50595index ff75ef8..2dfe00a 100644
50596--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50597+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50598@@ -89,15 +89,26 @@ static struct {
50599 void (*offline) (struct bfa_fcs_lport_s *port);
50600 } __port_action[] = {
50601 {
50602- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50603- bfa_fcs_lport_unknown_offline}, {
50604- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50605- bfa_fcs_lport_fab_offline}, {
50606- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50607- bfa_fcs_lport_n2n_offline}, {
50608- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50609- bfa_fcs_lport_loop_offline},
50610- };
50611+ .init = bfa_fcs_lport_unknown_init,
50612+ .online = bfa_fcs_lport_unknown_online,
50613+ .offline = bfa_fcs_lport_unknown_offline
50614+ },
50615+ {
50616+ .init = bfa_fcs_lport_fab_init,
50617+ .online = bfa_fcs_lport_fab_online,
50618+ .offline = bfa_fcs_lport_fab_offline
50619+ },
50620+ {
50621+ .init = bfa_fcs_lport_n2n_init,
50622+ .online = bfa_fcs_lport_n2n_online,
50623+ .offline = bfa_fcs_lport_n2n_offline
50624+ },
50625+ {
50626+ .init = bfa_fcs_lport_loop_init,
50627+ .online = bfa_fcs_lport_loop_online,
50628+ .offline = bfa_fcs_lport_loop_offline
50629+ },
50630+};
50631
50632 /*
50633 * fcs_port_sm FCS logical port state machine
50634diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50635index a38aafa0..fe8f03b 100644
50636--- a/drivers/scsi/bfa/bfa_ioc.h
50637+++ b/drivers/scsi/bfa/bfa_ioc.h
50638@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50639 bfa_ioc_disable_cbfn_t disable_cbfn;
50640 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50641 bfa_ioc_reset_cbfn_t reset_cbfn;
50642-};
50643+} __no_const;
50644
50645 /*
50646 * IOC event notification mechanism.
50647@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50648 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50649 enum bfi_ioc_state fwstate);
50650 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50651-};
50652+} __no_const;
50653
50654 /*
50655 * Queue element to wait for room in request queue. FIFO order is
50656diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50657index a14c784..6de6790 100644
50658--- a/drivers/scsi/bfa/bfa_modules.h
50659+++ b/drivers/scsi/bfa/bfa_modules.h
50660@@ -78,12 +78,12 @@ enum {
50661 \
50662 extern struct bfa_module_s hal_mod_ ## __mod; \
50663 struct bfa_module_s hal_mod_ ## __mod = { \
50664- bfa_ ## __mod ## _meminfo, \
50665- bfa_ ## __mod ## _attach, \
50666- bfa_ ## __mod ## _detach, \
50667- bfa_ ## __mod ## _start, \
50668- bfa_ ## __mod ## _stop, \
50669- bfa_ ## __mod ## _iocdisable, \
50670+ .meminfo = bfa_ ## __mod ## _meminfo, \
50671+ .attach = bfa_ ## __mod ## _attach, \
50672+ .detach = bfa_ ## __mod ## _detach, \
50673+ .start = bfa_ ## __mod ## _start, \
50674+ .stop = bfa_ ## __mod ## _stop, \
50675+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50676 }
50677
50678 #define BFA_CACHELINE_SZ (256)
50679diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50680index 045c4e1..13de803 100644
50681--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50682+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50683@@ -33,8 +33,8 @@
50684 */
50685 #include "libfcoe.h"
50686
50687-static atomic_t ctlr_num;
50688-static atomic_t fcf_num;
50689+static atomic_unchecked_t ctlr_num;
50690+static atomic_unchecked_t fcf_num;
50691
50692 /*
50693 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
50694@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
50695 if (!ctlr)
50696 goto out;
50697
50698- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
50699+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
50700 ctlr->f = f;
50701 ctlr->mode = FIP_CONN_TYPE_FABRIC;
50702 INIT_LIST_HEAD(&ctlr->fcfs);
50703@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
50704 fcf->dev.parent = &ctlr->dev;
50705 fcf->dev.bus = &fcoe_bus_type;
50706 fcf->dev.type = &fcoe_fcf_device_type;
50707- fcf->id = atomic_inc_return(&fcf_num) - 1;
50708+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
50709 fcf->state = FCOE_FCF_STATE_UNKNOWN;
50710
50711 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
50712@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
50713 {
50714 int error;
50715
50716- atomic_set(&ctlr_num, 0);
50717- atomic_set(&fcf_num, 0);
50718+ atomic_set_unchecked(&ctlr_num, 0);
50719+ atomic_set_unchecked(&fcf_num, 0);
50720
50721 error = bus_register(&fcoe_bus_type);
50722 if (error)
50723diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
50724index 8bb173e..20236b4 100644
50725--- a/drivers/scsi/hosts.c
50726+++ b/drivers/scsi/hosts.c
50727@@ -42,7 +42,7 @@
50728 #include "scsi_logging.h"
50729
50730
50731-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50732+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50733
50734
50735 static void scsi_host_cls_release(struct device *dev)
50736@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
50737 * subtract one because we increment first then return, but we need to
50738 * know what the next host number was before increment
50739 */
50740- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
50741+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
50742 shost->dma_channel = 0xff;
50743
50744 /* These three are default values which can be overridden */
50745diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
50746index 6bb4611..0203251 100644
50747--- a/drivers/scsi/hpsa.c
50748+++ b/drivers/scsi/hpsa.c
50749@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
50750 struct reply_queue_buffer *rq = &h->reply_queue[q];
50751
50752 if (h->transMethod & CFGTBL_Trans_io_accel1)
50753- return h->access.command_completed(h, q);
50754+ return h->access->command_completed(h, q);
50755
50756 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
50757- return h->access.command_completed(h, q);
50758+ return h->access->command_completed(h, q);
50759
50760 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
50761 a = rq->head[rq->current_entry];
50762@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50763 while (!list_empty(&h->reqQ)) {
50764 c = list_entry(h->reqQ.next, struct CommandList, list);
50765 /* can't do anything if fifo is full */
50766- if ((h->access.fifo_full(h))) {
50767+ if ((h->access->fifo_full(h))) {
50768 h->fifo_recently_full = 1;
50769 dev_warn(&h->pdev->dev, "fifo full\n");
50770 break;
50771@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50772 atomic_inc(&h->commands_outstanding);
50773 spin_unlock_irqrestore(&h->lock, *flags);
50774 /* Tell the controller execute command */
50775- h->access.submit_command(h, c);
50776+ h->access->submit_command(h, c);
50777 spin_lock_irqsave(&h->lock, *flags);
50778 }
50779 }
50780@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
50781
50782 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
50783 {
50784- return h->access.command_completed(h, q);
50785+ return h->access->command_completed(h, q);
50786 }
50787
50788 static inline bool interrupt_pending(struct ctlr_info *h)
50789 {
50790- return h->access.intr_pending(h);
50791+ return h->access->intr_pending(h);
50792 }
50793
50794 static inline long interrupt_not_for_us(struct ctlr_info *h)
50795 {
50796- return (h->access.intr_pending(h) == 0) ||
50797+ return (h->access->intr_pending(h) == 0) ||
50798 (h->interrupts_enabled == 0);
50799 }
50800
50801@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
50802 if (prod_index < 0)
50803 return -ENODEV;
50804 h->product_name = products[prod_index].product_name;
50805- h->access = *(products[prod_index].access);
50806+ h->access = products[prod_index].access;
50807
50808 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
50809 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
50810@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
50811 unsigned long flags;
50812 u32 lockup_detected;
50813
50814- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50815+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50816 spin_lock_irqsave(&h->lock, flags);
50817 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
50818 if (!lockup_detected) {
50819@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
50820 }
50821
50822 /* make sure the board interrupts are off */
50823- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50824+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50825
50826 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
50827 goto clean2;
50828@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
50829 * fake ones to scoop up any residual completions.
50830 */
50831 spin_lock_irqsave(&h->lock, flags);
50832- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50833+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50834 spin_unlock_irqrestore(&h->lock, flags);
50835 free_irqs(h);
50836 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
50837@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
50838 dev_info(&h->pdev->dev, "Board READY.\n");
50839 dev_info(&h->pdev->dev,
50840 "Waiting for stale completions to drain.\n");
50841- h->access.set_intr_mask(h, HPSA_INTR_ON);
50842+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50843 msleep(10000);
50844- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50845+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50846
50847 rc = controller_reset_failed(h->cfgtable);
50848 if (rc)
50849@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
50850 h->drv_req_rescan = 0;
50851
50852 /* Turn the interrupts on so we can service requests */
50853- h->access.set_intr_mask(h, HPSA_INTR_ON);
50854+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50855
50856 hpsa_hba_inquiry(h);
50857 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
50858@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
50859 * To write all data in the battery backed cache to disks
50860 */
50861 hpsa_flush_cache(h);
50862- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50863+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50864 hpsa_free_irqs_and_disable_msix(h);
50865 }
50866
50867@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50868 CFGTBL_Trans_enable_directed_msix |
50869 (trans_support & (CFGTBL_Trans_io_accel1 |
50870 CFGTBL_Trans_io_accel2));
50871- struct access_method access = SA5_performant_access;
50872+ struct access_method *access = &SA5_performant_access;
50873
50874 /* This is a bit complicated. There are 8 registers on
50875 * the controller which we write to to tell it 8 different
50876@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50877 * perform the superfluous readl() after each command submission.
50878 */
50879 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
50880- access = SA5_performant_access_no_read;
50881+ access = &SA5_performant_access_no_read;
50882
50883 /* Controller spec: zero out this buffer. */
50884 for (i = 0; i < h->nreply_queues; i++)
50885@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50886 * enable outbound interrupt coalescing in accelerator mode;
50887 */
50888 if (trans_support & CFGTBL_Trans_io_accel1) {
50889- access = SA5_ioaccel_mode1_access;
50890+ access = &SA5_ioaccel_mode1_access;
50891 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50892 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50893 } else {
50894 if (trans_support & CFGTBL_Trans_io_accel2) {
50895- access = SA5_ioaccel_mode2_access;
50896+ access = &SA5_ioaccel_mode2_access;
50897 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50898 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50899 }
50900diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
50901index 8e06d9e..396e0a1 100644
50902--- a/drivers/scsi/hpsa.h
50903+++ b/drivers/scsi/hpsa.h
50904@@ -127,7 +127,7 @@ struct ctlr_info {
50905 unsigned int msix_vector;
50906 unsigned int msi_vector;
50907 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
50908- struct access_method access;
50909+ struct access_method *access;
50910 char hba_mode_enabled;
50911
50912 /* queue and queue Info */
50913@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
50914 }
50915
50916 static struct access_method SA5_access = {
50917- SA5_submit_command,
50918- SA5_intr_mask,
50919- SA5_fifo_full,
50920- SA5_intr_pending,
50921- SA5_completed,
50922+ .submit_command = SA5_submit_command,
50923+ .set_intr_mask = SA5_intr_mask,
50924+ .fifo_full = SA5_fifo_full,
50925+ .intr_pending = SA5_intr_pending,
50926+ .command_completed = SA5_completed,
50927 };
50928
50929 static struct access_method SA5_ioaccel_mode1_access = {
50930- SA5_submit_command,
50931- SA5_performant_intr_mask,
50932- SA5_fifo_full,
50933- SA5_ioaccel_mode1_intr_pending,
50934- SA5_ioaccel_mode1_completed,
50935+ .submit_command = SA5_submit_command,
50936+ .set_intr_mask = SA5_performant_intr_mask,
50937+ .fifo_full = SA5_fifo_full,
50938+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
50939+ .command_completed = SA5_ioaccel_mode1_completed,
50940 };
50941
50942 static struct access_method SA5_ioaccel_mode2_access = {
50943- SA5_submit_command_ioaccel2,
50944- SA5_performant_intr_mask,
50945- SA5_fifo_full,
50946- SA5_performant_intr_pending,
50947- SA5_performant_completed,
50948+ .submit_command = SA5_submit_command_ioaccel2,
50949+ .set_intr_mask = SA5_performant_intr_mask,
50950+ .fifo_full = SA5_fifo_full,
50951+ .intr_pending = SA5_performant_intr_pending,
50952+ .command_completed = SA5_performant_completed,
50953 };
50954
50955 static struct access_method SA5_performant_access = {
50956- SA5_submit_command,
50957- SA5_performant_intr_mask,
50958- SA5_fifo_full,
50959- SA5_performant_intr_pending,
50960- SA5_performant_completed,
50961+ .submit_command = SA5_submit_command,
50962+ .set_intr_mask = SA5_performant_intr_mask,
50963+ .fifo_full = SA5_fifo_full,
50964+ .intr_pending = SA5_performant_intr_pending,
50965+ .command_completed = SA5_performant_completed,
50966 };
50967
50968 static struct access_method SA5_performant_access_no_read = {
50969- SA5_submit_command_no_read,
50970- SA5_performant_intr_mask,
50971- SA5_fifo_full,
50972- SA5_performant_intr_pending,
50973- SA5_performant_completed,
50974+ .submit_command = SA5_submit_command_no_read,
50975+ .set_intr_mask = SA5_performant_intr_mask,
50976+ .fifo_full = SA5_fifo_full,
50977+ .intr_pending = SA5_performant_intr_pending,
50978+ .command_completed = SA5_performant_completed,
50979 };
50980
50981 struct board_type {
50982diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
50983index 1b3a094..068e683 100644
50984--- a/drivers/scsi/libfc/fc_exch.c
50985+++ b/drivers/scsi/libfc/fc_exch.c
50986@@ -101,12 +101,12 @@ struct fc_exch_mgr {
50987 u16 pool_max_index;
50988
50989 struct {
50990- atomic_t no_free_exch;
50991- atomic_t no_free_exch_xid;
50992- atomic_t xid_not_found;
50993- atomic_t xid_busy;
50994- atomic_t seq_not_found;
50995- atomic_t non_bls_resp;
50996+ atomic_unchecked_t no_free_exch;
50997+ atomic_unchecked_t no_free_exch_xid;
50998+ atomic_unchecked_t xid_not_found;
50999+ atomic_unchecked_t xid_busy;
51000+ atomic_unchecked_t seq_not_found;
51001+ atomic_unchecked_t non_bls_resp;
51002 } stats;
51003 };
51004
51005@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51006 /* allocate memory for exchange */
51007 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51008 if (!ep) {
51009- atomic_inc(&mp->stats.no_free_exch);
51010+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51011 goto out;
51012 }
51013 memset(ep, 0, sizeof(*ep));
51014@@ -874,7 +874,7 @@ out:
51015 return ep;
51016 err:
51017 spin_unlock_bh(&pool->lock);
51018- atomic_inc(&mp->stats.no_free_exch_xid);
51019+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51020 mempool_free(ep, mp->ep_pool);
51021 return NULL;
51022 }
51023@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51024 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51025 ep = fc_exch_find(mp, xid);
51026 if (!ep) {
51027- atomic_inc(&mp->stats.xid_not_found);
51028+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51029 reject = FC_RJT_OX_ID;
51030 goto out;
51031 }
51032@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51033 ep = fc_exch_find(mp, xid);
51034 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51035 if (ep) {
51036- atomic_inc(&mp->stats.xid_busy);
51037+ atomic_inc_unchecked(&mp->stats.xid_busy);
51038 reject = FC_RJT_RX_ID;
51039 goto rel;
51040 }
51041@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51042 }
51043 xid = ep->xid; /* get our XID */
51044 } else if (!ep) {
51045- atomic_inc(&mp->stats.xid_not_found);
51046+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51047 reject = FC_RJT_RX_ID; /* XID not found */
51048 goto out;
51049 }
51050@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51051 } else {
51052 sp = &ep->seq;
51053 if (sp->id != fh->fh_seq_id) {
51054- atomic_inc(&mp->stats.seq_not_found);
51055+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51056 if (f_ctl & FC_FC_END_SEQ) {
51057 /*
51058 * Update sequence_id based on incoming last
51059@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51060
51061 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51062 if (!ep) {
51063- atomic_inc(&mp->stats.xid_not_found);
51064+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51065 goto out;
51066 }
51067 if (ep->esb_stat & ESB_ST_COMPLETE) {
51068- atomic_inc(&mp->stats.xid_not_found);
51069+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51070 goto rel;
51071 }
51072 if (ep->rxid == FC_XID_UNKNOWN)
51073 ep->rxid = ntohs(fh->fh_rx_id);
51074 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51075- atomic_inc(&mp->stats.xid_not_found);
51076+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51077 goto rel;
51078 }
51079 if (ep->did != ntoh24(fh->fh_s_id) &&
51080 ep->did != FC_FID_FLOGI) {
51081- atomic_inc(&mp->stats.xid_not_found);
51082+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51083 goto rel;
51084 }
51085 sof = fr_sof(fp);
51086@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51087 sp->ssb_stat |= SSB_ST_RESP;
51088 sp->id = fh->fh_seq_id;
51089 } else if (sp->id != fh->fh_seq_id) {
51090- atomic_inc(&mp->stats.seq_not_found);
51091+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51092 goto rel;
51093 }
51094
51095@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51096 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51097
51098 if (!sp)
51099- atomic_inc(&mp->stats.xid_not_found);
51100+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51101 else
51102- atomic_inc(&mp->stats.non_bls_resp);
51103+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51104
51105 fc_frame_free(fp);
51106 }
51107@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51108
51109 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51110 mp = ema->mp;
51111- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51112+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51113 st->fc_no_free_exch_xid +=
51114- atomic_read(&mp->stats.no_free_exch_xid);
51115- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51116- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51117- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51118- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51119+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51120+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51121+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51122+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51123+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51124 }
51125 }
51126 EXPORT_SYMBOL(fc_exch_update_stats);
51127diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51128index 932d9cc..50c7ee9 100644
51129--- a/drivers/scsi/libsas/sas_ata.c
51130+++ b/drivers/scsi/libsas/sas_ata.c
51131@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51132 .postreset = ata_std_postreset,
51133 .error_handler = ata_std_error_handler,
51134 .post_internal_cmd = sas_ata_post_internal,
51135- .qc_defer = ata_std_qc_defer,
51136+ .qc_defer = ata_std_qc_defer,
51137 .qc_prep = ata_noop_qc_prep,
51138 .qc_issue = sas_ata_qc_issue,
51139 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51140diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51141index 434e903..5a4a79b 100644
51142--- a/drivers/scsi/lpfc/lpfc.h
51143+++ b/drivers/scsi/lpfc/lpfc.h
51144@@ -430,7 +430,7 @@ struct lpfc_vport {
51145 struct dentry *debug_nodelist;
51146 struct dentry *vport_debugfs_root;
51147 struct lpfc_debugfs_trc *disc_trc;
51148- atomic_t disc_trc_cnt;
51149+ atomic_unchecked_t disc_trc_cnt;
51150 #endif
51151 uint8_t stat_data_enabled;
51152 uint8_t stat_data_blocked;
51153@@ -880,8 +880,8 @@ struct lpfc_hba {
51154 struct timer_list fabric_block_timer;
51155 unsigned long bit_flags;
51156 #define FABRIC_COMANDS_BLOCKED 0
51157- atomic_t num_rsrc_err;
51158- atomic_t num_cmd_success;
51159+ atomic_unchecked_t num_rsrc_err;
51160+ atomic_unchecked_t num_cmd_success;
51161 unsigned long last_rsrc_error_time;
51162 unsigned long last_ramp_down_time;
51163 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51164@@ -916,7 +916,7 @@ struct lpfc_hba {
51165
51166 struct dentry *debug_slow_ring_trc;
51167 struct lpfc_debugfs_trc *slow_ring_trc;
51168- atomic_t slow_ring_trc_cnt;
51169+ atomic_unchecked_t slow_ring_trc_cnt;
51170 /* iDiag debugfs sub-directory */
51171 struct dentry *idiag_root;
51172 struct dentry *idiag_pci_cfg;
51173diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51174index 5633e7d..8272114 100644
51175--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51176+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51177@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51178
51179 #include <linux/debugfs.h>
51180
51181-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51182+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51183 static unsigned long lpfc_debugfs_start_time = 0L;
51184
51185 /* iDiag */
51186@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51187 lpfc_debugfs_enable = 0;
51188
51189 len = 0;
51190- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51191+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51192 (lpfc_debugfs_max_disc_trc - 1);
51193 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51194 dtp = vport->disc_trc + i;
51195@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51196 lpfc_debugfs_enable = 0;
51197
51198 len = 0;
51199- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51200+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51201 (lpfc_debugfs_max_slow_ring_trc - 1);
51202 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51203 dtp = phba->slow_ring_trc + i;
51204@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51205 !vport || !vport->disc_trc)
51206 return;
51207
51208- index = atomic_inc_return(&vport->disc_trc_cnt) &
51209+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51210 (lpfc_debugfs_max_disc_trc - 1);
51211 dtp = vport->disc_trc + index;
51212 dtp->fmt = fmt;
51213 dtp->data1 = data1;
51214 dtp->data2 = data2;
51215 dtp->data3 = data3;
51216- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51217+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51218 dtp->jif = jiffies;
51219 #endif
51220 return;
51221@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51222 !phba || !phba->slow_ring_trc)
51223 return;
51224
51225- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51226+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51227 (lpfc_debugfs_max_slow_ring_trc - 1);
51228 dtp = phba->slow_ring_trc + index;
51229 dtp->fmt = fmt;
51230 dtp->data1 = data1;
51231 dtp->data2 = data2;
51232 dtp->data3 = data3;
51233- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51234+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51235 dtp->jif = jiffies;
51236 #endif
51237 return;
51238@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51239 "slow_ring buffer\n");
51240 goto debug_failed;
51241 }
51242- atomic_set(&phba->slow_ring_trc_cnt, 0);
51243+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51244 memset(phba->slow_ring_trc, 0,
51245 (sizeof(struct lpfc_debugfs_trc) *
51246 lpfc_debugfs_max_slow_ring_trc));
51247@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51248 "buffer\n");
51249 goto debug_failed;
51250 }
51251- atomic_set(&vport->disc_trc_cnt, 0);
51252+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51253
51254 snprintf(name, sizeof(name), "discovery_trace");
51255 vport->debug_disc_trc =
51256diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51257index 0b2c53a..aec2b45 100644
51258--- a/drivers/scsi/lpfc/lpfc_init.c
51259+++ b/drivers/scsi/lpfc/lpfc_init.c
51260@@ -11290,8 +11290,10 @@ lpfc_init(void)
51261 "misc_register returned with status %d", error);
51262
51263 if (lpfc_enable_npiv) {
51264- lpfc_transport_functions.vport_create = lpfc_vport_create;
51265- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51266+ pax_open_kernel();
51267+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51268+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51269+ pax_close_kernel();
51270 }
51271 lpfc_transport_template =
51272 fc_attach_transport(&lpfc_transport_functions);
51273diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51274index 4f9222e..f1850e3 100644
51275--- a/drivers/scsi/lpfc/lpfc_scsi.c
51276+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51277@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51278 unsigned long expires;
51279
51280 spin_lock_irqsave(&phba->hbalock, flags);
51281- atomic_inc(&phba->num_rsrc_err);
51282+ atomic_inc_unchecked(&phba->num_rsrc_err);
51283 phba->last_rsrc_error_time = jiffies;
51284
51285 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51286@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51287 unsigned long num_rsrc_err, num_cmd_success;
51288 int i;
51289
51290- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51291- num_cmd_success = atomic_read(&phba->num_cmd_success);
51292+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51293+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51294
51295 /*
51296 * The error and success command counters are global per
51297@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51298 }
51299 }
51300 lpfc_destroy_vport_work_array(phba, vports);
51301- atomic_set(&phba->num_rsrc_err, 0);
51302- atomic_set(&phba->num_cmd_success, 0);
51303+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51304+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51305 }
51306
51307 /**
51308diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51309index 6a1c036..38e0e8d 100644
51310--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51311+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51312@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
51313 {
51314 struct scsi_device *sdev = to_scsi_device(dev);
51315 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51316- static struct _raid_device *raid_device;
51317+ struct _raid_device *raid_device;
51318 unsigned long flags;
51319 Mpi2RaidVolPage0_t vol_pg0;
51320 Mpi2ConfigReply_t mpi_reply;
51321@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
51322 {
51323 struct scsi_device *sdev = to_scsi_device(dev);
51324 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51325- static struct _raid_device *raid_device;
51326+ struct _raid_device *raid_device;
51327 unsigned long flags;
51328 Mpi2RaidVolPage0_t vol_pg0;
51329 Mpi2ConfigReply_t mpi_reply;
51330@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51331 Mpi2EventDataIrOperationStatus_t *event_data =
51332 (Mpi2EventDataIrOperationStatus_t *)
51333 fw_event->event_data;
51334- static struct _raid_device *raid_device;
51335+ struct _raid_device *raid_device;
51336 unsigned long flags;
51337 u16 handle;
51338
51339@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51340 u64 sas_address;
51341 struct _sas_device *sas_device;
51342 struct _sas_node *expander_device;
51343- static struct _raid_device *raid_device;
51344+ struct _raid_device *raid_device;
51345 u8 retry_count;
51346 unsigned long flags;
51347
51348diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51349index 8c27b6a..607f56e 100644
51350--- a/drivers/scsi/pmcraid.c
51351+++ b/drivers/scsi/pmcraid.c
51352@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51353 res->scsi_dev = scsi_dev;
51354 scsi_dev->hostdata = res;
51355 res->change_detected = 0;
51356- atomic_set(&res->read_failures, 0);
51357- atomic_set(&res->write_failures, 0);
51358+ atomic_set_unchecked(&res->read_failures, 0);
51359+ atomic_set_unchecked(&res->write_failures, 0);
51360 rc = 0;
51361 }
51362 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51363@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51364
51365 /* If this was a SCSI read/write command keep count of errors */
51366 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51367- atomic_inc(&res->read_failures);
51368+ atomic_inc_unchecked(&res->read_failures);
51369 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51370- atomic_inc(&res->write_failures);
51371+ atomic_inc_unchecked(&res->write_failures);
51372
51373 if (!RES_IS_GSCSI(res->cfg_entry) &&
51374 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51375@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
51376 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51377 * hrrq_id assigned here in queuecommand
51378 */
51379- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51380+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51381 pinstance->num_hrrq;
51382 cmd->cmd_done = pmcraid_io_done;
51383
51384@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
51385 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51386 * hrrq_id assigned here in queuecommand
51387 */
51388- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51389+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51390 pinstance->num_hrrq;
51391
51392 if (request_size) {
51393@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51394
51395 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51396 /* add resources only after host is added into system */
51397- if (!atomic_read(&pinstance->expose_resources))
51398+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51399 return;
51400
51401 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51402@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51403 init_waitqueue_head(&pinstance->reset_wait_q);
51404
51405 atomic_set(&pinstance->outstanding_cmds, 0);
51406- atomic_set(&pinstance->last_message_id, 0);
51407- atomic_set(&pinstance->expose_resources, 0);
51408+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51409+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51410
51411 INIT_LIST_HEAD(&pinstance->free_res_q);
51412 INIT_LIST_HEAD(&pinstance->used_res_q);
51413@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51414 /* Schedule worker thread to handle CCN and take care of adding and
51415 * removing devices to OS
51416 */
51417- atomic_set(&pinstance->expose_resources, 1);
51418+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51419 schedule_work(&pinstance->worker_q);
51420 return rc;
51421
51422diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51423index e1d150f..6c6df44 100644
51424--- a/drivers/scsi/pmcraid.h
51425+++ b/drivers/scsi/pmcraid.h
51426@@ -748,7 +748,7 @@ struct pmcraid_instance {
51427 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51428
51429 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51430- atomic_t last_message_id;
51431+ atomic_unchecked_t last_message_id;
51432
51433 /* configuration table */
51434 struct pmcraid_config_table *cfg_table;
51435@@ -777,7 +777,7 @@ struct pmcraid_instance {
51436 atomic_t outstanding_cmds;
51437
51438 /* should add/delete resources to mid-layer now ?*/
51439- atomic_t expose_resources;
51440+ atomic_unchecked_t expose_resources;
51441
51442
51443
51444@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51445 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51446 };
51447 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51448- atomic_t read_failures; /* count of failed READ commands */
51449- atomic_t write_failures; /* count of failed WRITE commands */
51450+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51451+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51452
51453 /* To indicate add/delete/modify during CCN */
51454 u8 change_detected;
51455diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51456index 82b92c4..3178171 100644
51457--- a/drivers/scsi/qla2xxx/qla_attr.c
51458+++ b/drivers/scsi/qla2xxx/qla_attr.c
51459@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51460 return 0;
51461 }
51462
51463-struct fc_function_template qla2xxx_transport_functions = {
51464+fc_function_template_no_const qla2xxx_transport_functions = {
51465
51466 .show_host_node_name = 1,
51467 .show_host_port_name = 1,
51468@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51469 .bsg_timeout = qla24xx_bsg_timeout,
51470 };
51471
51472-struct fc_function_template qla2xxx_transport_vport_functions = {
51473+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51474
51475 .show_host_node_name = 1,
51476 .show_host_port_name = 1,
51477diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51478index 7686bfe..4710893 100644
51479--- a/drivers/scsi/qla2xxx/qla_gbl.h
51480+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51481@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51482 struct device_attribute;
51483 extern struct device_attribute *qla2x00_host_attrs[];
51484 struct fc_function_template;
51485-extern struct fc_function_template qla2xxx_transport_functions;
51486-extern struct fc_function_template qla2xxx_transport_vport_functions;
51487+extern fc_function_template_no_const qla2xxx_transport_functions;
51488+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51489 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51490 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51491 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51492diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51493index cce1cbc..5b9f0fe 100644
51494--- a/drivers/scsi/qla2xxx/qla_os.c
51495+++ b/drivers/scsi/qla2xxx/qla_os.c
51496@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51497 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51498 /* Ok, a 64bit DMA mask is applicable. */
51499 ha->flags.enable_64bit_addressing = 1;
51500- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51501- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51502+ pax_open_kernel();
51503+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51504+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51505+ pax_close_kernel();
51506 return;
51507 }
51508 }
51509diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51510index 8f6d0fb..1b21097 100644
51511--- a/drivers/scsi/qla4xxx/ql4_def.h
51512+++ b/drivers/scsi/qla4xxx/ql4_def.h
51513@@ -305,7 +305,7 @@ struct ddb_entry {
51514 * (4000 only) */
51515 atomic_t relogin_timer; /* Max Time to wait for
51516 * relogin to complete */
51517- atomic_t relogin_retry_count; /* Num of times relogin has been
51518+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51519 * retried */
51520 uint32_t default_time2wait; /* Default Min time between
51521 * relogins (+aens) */
51522diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51523index 6d25879..3031a9f 100644
51524--- a/drivers/scsi/qla4xxx/ql4_os.c
51525+++ b/drivers/scsi/qla4xxx/ql4_os.c
51526@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51527 */
51528 if (!iscsi_is_session_online(cls_sess)) {
51529 /* Reset retry relogin timer */
51530- atomic_inc(&ddb_entry->relogin_retry_count);
51531+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51532 DEBUG2(ql4_printk(KERN_INFO, ha,
51533 "%s: index[%d] relogin timed out-retrying"
51534 " relogin (%d), retry (%d)\n", __func__,
51535 ddb_entry->fw_ddb_index,
51536- atomic_read(&ddb_entry->relogin_retry_count),
51537+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51538 ddb_entry->default_time2wait + 4));
51539 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51540 atomic_set(&ddb_entry->retry_relogin_timer,
51541@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51542
51543 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51544 atomic_set(&ddb_entry->relogin_timer, 0);
51545- atomic_set(&ddb_entry->relogin_retry_count, 0);
51546+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51547 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51548 ddb_entry->default_relogin_timeout =
51549 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51550diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51551index 17bb541..85f4508 100644
51552--- a/drivers/scsi/scsi_lib.c
51553+++ b/drivers/scsi/scsi_lib.c
51554@@ -1595,7 +1595,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51555 shost = sdev->host;
51556 scsi_init_cmd_errh(cmd);
51557 cmd->result = DID_NO_CONNECT << 16;
51558- atomic_inc(&cmd->device->iorequest_cnt);
51559+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51560
51561 /*
51562 * SCSI request completion path will do scsi_device_unbusy(),
51563@@ -1618,9 +1618,9 @@ static void scsi_softirq_done(struct request *rq)
51564
51565 INIT_LIST_HEAD(&cmd->eh_entry);
51566
51567- atomic_inc(&cmd->device->iodone_cnt);
51568+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51569 if (cmd->result)
51570- atomic_inc(&cmd->device->ioerr_cnt);
51571+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51572
51573 disposition = scsi_decide_disposition(cmd);
51574 if (disposition != SUCCESS &&
51575@@ -1661,7 +1661,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51576 struct Scsi_Host *host = cmd->device->host;
51577 int rtn = 0;
51578
51579- atomic_inc(&cmd->device->iorequest_cnt);
51580+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51581
51582 /* check if the device is still usable */
51583 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51584diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51585index 1ac38e7..6acc656 100644
51586--- a/drivers/scsi/scsi_sysfs.c
51587+++ b/drivers/scsi/scsi_sysfs.c
51588@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51589 char *buf) \
51590 { \
51591 struct scsi_device *sdev = to_scsi_device(dev); \
51592- unsigned long long count = atomic_read(&sdev->field); \
51593+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51594 return snprintf(buf, 20, "0x%llx\n", count); \
51595 } \
51596 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51597diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51598index 5d6f348..18778a6b 100644
51599--- a/drivers/scsi/scsi_transport_fc.c
51600+++ b/drivers/scsi/scsi_transport_fc.c
51601@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51602 * Netlink Infrastructure
51603 */
51604
51605-static atomic_t fc_event_seq;
51606+static atomic_unchecked_t fc_event_seq;
51607
51608 /**
51609 * fc_get_event_number - Obtain the next sequential FC event number
51610@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51611 u32
51612 fc_get_event_number(void)
51613 {
51614- return atomic_add_return(1, &fc_event_seq);
51615+ return atomic_add_return_unchecked(1, &fc_event_seq);
51616 }
51617 EXPORT_SYMBOL(fc_get_event_number);
51618
51619@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51620 {
51621 int error;
51622
51623- atomic_set(&fc_event_seq, 0);
51624+ atomic_set_unchecked(&fc_event_seq, 0);
51625
51626 error = transport_class_register(&fc_host_class);
51627 if (error)
51628@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51629 char *cp;
51630
51631 *val = simple_strtoul(buf, &cp, 0);
51632- if ((*cp && (*cp != '\n')) || (*val < 0))
51633+ if (*cp && (*cp != '\n'))
51634 return -EINVAL;
51635 /*
51636 * Check for overflow; dev_loss_tmo is u32
51637diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51638index 67d43e3..8cee73c 100644
51639--- a/drivers/scsi/scsi_transport_iscsi.c
51640+++ b/drivers/scsi/scsi_transport_iscsi.c
51641@@ -79,7 +79,7 @@ struct iscsi_internal {
51642 struct transport_container session_cont;
51643 };
51644
51645-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51646+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51647 static struct workqueue_struct *iscsi_eh_timer_workq;
51648
51649 static DEFINE_IDA(iscsi_sess_ida);
51650@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51651 int err;
51652
51653 ihost = shost->shost_data;
51654- session->sid = atomic_add_return(1, &iscsi_session_nr);
51655+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51656
51657 if (target_id == ISCSI_MAX_TARGET) {
51658 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51659@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51660 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51661 ISCSI_TRANSPORT_VERSION);
51662
51663- atomic_set(&iscsi_session_nr, 0);
51664+ atomic_set_unchecked(&iscsi_session_nr, 0);
51665
51666 err = class_register(&iscsi_transport_class);
51667 if (err)
51668diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51669index ae45bd9..c32a586 100644
51670--- a/drivers/scsi/scsi_transport_srp.c
51671+++ b/drivers/scsi/scsi_transport_srp.c
51672@@ -35,7 +35,7 @@
51673 #include "scsi_priv.h"
51674
51675 struct srp_host_attrs {
51676- atomic_t next_port_id;
51677+ atomic_unchecked_t next_port_id;
51678 };
51679 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51680
51681@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51682 struct Scsi_Host *shost = dev_to_shost(dev);
51683 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51684
51685- atomic_set(&srp_host->next_port_id, 0);
51686+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51687 return 0;
51688 }
51689
51690@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51691 rport_fast_io_fail_timedout);
51692 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51693
51694- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51695+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51696 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51697
51698 transport_setup_device(&rport->dev);
51699diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51700index 05ea0d4..5af8049 100644
51701--- a/drivers/scsi/sd.c
51702+++ b/drivers/scsi/sd.c
51703@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51704 sdkp->disk = gd;
51705 sdkp->index = index;
51706 atomic_set(&sdkp->openers, 0);
51707- atomic_set(&sdkp->device->ioerr_cnt, 0);
51708+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
51709
51710 if (!sdp->request_queue->rq_timeout) {
51711 if (sdp->type != TYPE_MOD)
51712diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
51713index 763bffe..e0eacf4 100644
51714--- a/drivers/scsi/sg.c
51715+++ b/drivers/scsi/sg.c
51716@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
51717 sdp->disk->disk_name,
51718 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
51719 NULL,
51720- (char *)arg);
51721+ (char __user *)arg);
51722 case BLKTRACESTART:
51723 return blk_trace_startstop(sdp->device->request_queue, 1);
51724 case BLKTRACESTOP:
51725diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
51726index 011a336..fb2b7a0 100644
51727--- a/drivers/soc/tegra/fuse/fuse-tegra.c
51728+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
51729@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
51730 return i;
51731 }
51732
51733-static struct bin_attribute fuse_bin_attr = {
51734+static bin_attribute_no_const fuse_bin_attr = {
51735 .attr = { .name = "fuse", .mode = S_IRUGO, },
51736 .read = fuse_read,
51737 };
51738diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51739index 66a70e9..f82cea4 100644
51740--- a/drivers/spi/spi.c
51741+++ b/drivers/spi/spi.c
51742@@ -2238,7 +2238,7 @@ int spi_bus_unlock(struct spi_master *master)
51743 EXPORT_SYMBOL_GPL(spi_bus_unlock);
51744
51745 /* portable code must never pass more than 32 bytes */
51746-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
51747+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
51748
51749 static u8 *buf;
51750
51751diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
51752index b41429f..2de5373 100644
51753--- a/drivers/staging/android/timed_output.c
51754+++ b/drivers/staging/android/timed_output.c
51755@@ -25,7 +25,7 @@
51756 #include "timed_output.h"
51757
51758 static struct class *timed_output_class;
51759-static atomic_t device_count;
51760+static atomic_unchecked_t device_count;
51761
51762 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
51763 char *buf)
51764@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
51765 timed_output_class = class_create(THIS_MODULE, "timed_output");
51766 if (IS_ERR(timed_output_class))
51767 return PTR_ERR(timed_output_class);
51768- atomic_set(&device_count, 0);
51769+ atomic_set_unchecked(&device_count, 0);
51770 timed_output_class->dev_groups = timed_output_groups;
51771 }
51772
51773@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
51774 if (ret < 0)
51775 return ret;
51776
51777- tdev->index = atomic_inc_return(&device_count);
51778+ tdev->index = atomic_inc_return_unchecked(&device_count);
51779 tdev->dev = device_create(timed_output_class, NULL,
51780 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
51781 if (IS_ERR(tdev->dev))
51782diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
51783index f143cb6..6fb8255 100644
51784--- a/drivers/staging/comedi/comedi_fops.c
51785+++ b/drivers/staging/comedi/comedi_fops.c
51786@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
51787 }
51788 cfp->last_attached = dev->attached;
51789 cfp->last_detach_count = dev->detach_count;
51790- ACCESS_ONCE(cfp->read_subdev) = read_s;
51791- ACCESS_ONCE(cfp->write_subdev) = write_s;
51792+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
51793+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
51794 }
51795
51796 static void comedi_file_check(struct file *file)
51797@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51798 !(s_old->async->cmd.flags & CMDF_WRITE))
51799 return -EBUSY;
51800
51801- ACCESS_ONCE(cfp->read_subdev) = s_new;
51802+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
51803 return 0;
51804 }
51805
51806@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51807 (s_old->async->cmd.flags & CMDF_WRITE))
51808 return -EBUSY;
51809
51810- ACCESS_ONCE(cfp->write_subdev) = s_new;
51811+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
51812 return 0;
51813 }
51814
51815diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
51816index 001348c..cfaac8a 100644
51817--- a/drivers/staging/gdm724x/gdm_tty.c
51818+++ b/drivers/staging/gdm724x/gdm_tty.c
51819@@ -44,7 +44,7 @@
51820 #define gdm_tty_send_control(n, r, v, d, l) (\
51821 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
51822
51823-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
51824+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
51825
51826 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
51827 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
51828diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
51829index 503b2d7..c904931 100644
51830--- a/drivers/staging/line6/driver.c
51831+++ b/drivers/staging/line6/driver.c
51832@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51833 {
51834 struct usb_device *usbdev = line6->usbdev;
51835 int ret;
51836- unsigned char len;
51837+ unsigned char *plen;
51838
51839 /* query the serial number: */
51840 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51841@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51842 return ret;
51843 }
51844
51845+ plen = kmalloc(1, GFP_KERNEL);
51846+ if (plen == NULL)
51847+ return -ENOMEM;
51848+
51849 /* Wait for data length. We'll get 0xff until length arrives. */
51850 do {
51851 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51852 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51853 USB_DIR_IN,
51854- 0x0012, 0x0000, &len, 1,
51855+ 0x0012, 0x0000, plen, 1,
51856 LINE6_TIMEOUT * HZ);
51857 if (ret < 0) {
51858 dev_err(line6->ifcdev,
51859 "receive length failed (error %d)\n", ret);
51860+ kfree(plen);
51861 return ret;
51862 }
51863- } while (len == 0xff);
51864+ } while (*plen == 0xff);
51865
51866- if (len != datalen) {
51867+ if (*plen != datalen) {
51868 /* should be equal or something went wrong */
51869 dev_err(line6->ifcdev,
51870 "length mismatch (expected %d, got %d)\n",
51871- (int)datalen, (int)len);
51872+ (int)datalen, (int)*plen);
51873+ kfree(plen);
51874 return -EINVAL;
51875 }
51876+ kfree(plen);
51877
51878 /* receive the result: */
51879 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51880@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51881 {
51882 struct usb_device *usbdev = line6->usbdev;
51883 int ret;
51884- unsigned char status;
51885+ unsigned char *status;
51886
51887 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51888 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
51889@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51890 return ret;
51891 }
51892
51893+ status = kmalloc(1, GFP_KERNEL);
51894+ if (status == NULL)
51895+ return -ENOMEM;
51896+
51897 do {
51898 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
51899 0x67,
51900 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51901 USB_DIR_IN,
51902 0x0012, 0x0000,
51903- &status, 1, LINE6_TIMEOUT * HZ);
51904+ status, 1, LINE6_TIMEOUT * HZ);
51905
51906 if (ret < 0) {
51907 dev_err(line6->ifcdev,
51908 "receiving status failed (error %d)\n", ret);
51909+ kfree(status);
51910 return ret;
51911 }
51912- } while (status == 0xff);
51913+ } while (*status == 0xff);
51914
51915- if (status != 0) {
51916+ if (*status != 0) {
51917 dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
51918+ kfree(status);
51919 return -EINVAL;
51920 }
51921
51922+ kfree(status);
51923+
51924 return 0;
51925 }
51926
51927diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
51928index 6943715..0a93632 100644
51929--- a/drivers/staging/line6/toneport.c
51930+++ b/drivers/staging/line6/toneport.c
51931@@ -11,6 +11,7 @@
51932 */
51933
51934 #include <linux/wait.h>
51935+#include <linux/slab.h>
51936 #include <sound/control.h>
51937
51938 #include "audio.h"
51939@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
51940 */
51941 static void toneport_setup(struct usb_line6_toneport *toneport)
51942 {
51943- int ticks;
51944+ int *ticks;
51945 struct usb_line6 *line6 = &toneport->line6;
51946 struct usb_device *usbdev = line6->usbdev;
51947 u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
51948
51949+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
51950+ if (ticks == NULL)
51951+ return;
51952+
51953 /* sync time on device with host: */
51954- ticks = (int)get_seconds();
51955- line6_write_data(line6, 0x80c6, &ticks, 4);
51956+ *ticks = (int)get_seconds();
51957+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
51958+
51959+ kfree(ticks);
51960
51961 /* enable device: */
51962 toneport_send_cmd(usbdev, 0x0301, 0x0000);
51963diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
51964index 463da07..e791ce9 100644
51965--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
51966+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
51967@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
51968 return 0;
51969 }
51970
51971-sfw_test_client_ops_t brw_test_client;
51972-void brw_init_test_client(void)
51973-{
51974- brw_test_client.tso_init = brw_client_init;
51975- brw_test_client.tso_fini = brw_client_fini;
51976- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
51977- brw_test_client.tso_done_rpc = brw_client_done_rpc;
51978+sfw_test_client_ops_t brw_test_client = {
51979+ .tso_init = brw_client_init,
51980+ .tso_fini = brw_client_fini,
51981+ .tso_prep_rpc = brw_client_prep_rpc,
51982+ .tso_done_rpc = brw_client_done_rpc,
51983 };
51984
51985 srpc_service_t brw_test_service;
51986diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
51987index cc9d182..8fabce3 100644
51988--- a/drivers/staging/lustre/lnet/selftest/framework.c
51989+++ b/drivers/staging/lustre/lnet/selftest/framework.c
51990@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
51991
51992 extern sfw_test_client_ops_t ping_test_client;
51993 extern srpc_service_t ping_test_service;
51994-extern void ping_init_test_client(void);
51995 extern void ping_init_test_service(void);
51996
51997 extern sfw_test_client_ops_t brw_test_client;
51998 extern srpc_service_t brw_test_service;
51999-extern void brw_init_test_client(void);
52000 extern void brw_init_test_service(void);
52001
52002
52003@@ -1675,12 +1673,10 @@ sfw_startup (void)
52004 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52005 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52006
52007- brw_init_test_client();
52008 brw_init_test_service();
52009 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52010 LASSERT (rc == 0);
52011
52012- ping_init_test_client();
52013 ping_init_test_service();
52014 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52015 LASSERT (rc == 0);
52016diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52017index d8c0df6..5041cbb 100644
52018--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52019+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52020@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52021 return 0;
52022 }
52023
52024-sfw_test_client_ops_t ping_test_client;
52025-void ping_init_test_client(void)
52026-{
52027- ping_test_client.tso_init = ping_client_init;
52028- ping_test_client.tso_fini = ping_client_fini;
52029- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52030- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52031-}
52032+sfw_test_client_ops_t ping_test_client = {
52033+ .tso_init = ping_client_init,
52034+ .tso_fini = ping_client_fini,
52035+ .tso_prep_rpc = ping_client_prep_rpc,
52036+ .tso_done_rpc = ping_client_done_rpc,
52037+};
52038
52039 srpc_service_t ping_test_service;
52040 void ping_init_test_service(void)
52041diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52042index 83bc0a9..12ba00a 100644
52043--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52044+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52045@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52046 ldlm_completion_callback lcs_completion;
52047 ldlm_blocking_callback lcs_blocking;
52048 ldlm_glimpse_callback lcs_glimpse;
52049-};
52050+} __no_const;
52051
52052 /* ldlm_lockd.c */
52053 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52054diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52055index 2a88b80..62e7e5f 100644
52056--- a/drivers/staging/lustre/lustre/include/obd.h
52057+++ b/drivers/staging/lustre/lustre/include/obd.h
52058@@ -1362,7 +1362,7 @@ struct md_ops {
52059 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52060 * wrapper function in include/linux/obd_class.h.
52061 */
52062-};
52063+} __no_const;
52064
52065 struct lsm_operations {
52066 void (*lsm_free)(struct lov_stripe_md *);
52067diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52068index a4c252f..b21acac 100644
52069--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52070+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52071@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52072 int added = (mode == LCK_NL);
52073 int overlaps = 0;
52074 int splitted = 0;
52075- const struct ldlm_callback_suite null_cbs = { NULL };
52076+ const struct ldlm_callback_suite null_cbs = { };
52077
52078 CDEBUG(D_DLMTRACE,
52079 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52080diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52081index 83d3f08..b03adad 100644
52082--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52083+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52084@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52085 void __user *buffer, size_t *lenp, loff_t *ppos)
52086 {
52087 int rc, max_delay_cs;
52088- struct ctl_table dummy = *table;
52089+ ctl_table_no_const dummy = *table;
52090 long d;
52091
52092 dummy.data = &max_delay_cs;
52093@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52094 void __user *buffer, size_t *lenp, loff_t *ppos)
52095 {
52096 int rc, min_delay_cs;
52097- struct ctl_table dummy = *table;
52098+ ctl_table_no_const dummy = *table;
52099 long d;
52100
52101 dummy.data = &min_delay_cs;
52102@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52103 void __user *buffer, size_t *lenp, loff_t *ppos)
52104 {
52105 int rc, backoff;
52106- struct ctl_table dummy = *table;
52107+ ctl_table_no_const dummy = *table;
52108
52109 dummy.data = &backoff;
52110 dummy.proc_handler = &proc_dointvec;
52111diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52112index 2c4fc74..b04ca79 100644
52113--- a/drivers/staging/lustre/lustre/libcfs/module.c
52114+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52115@@ -315,11 +315,11 @@ out:
52116
52117
52118 struct cfs_psdev_ops libcfs_psdev_ops = {
52119- libcfs_psdev_open,
52120- libcfs_psdev_release,
52121- NULL,
52122- NULL,
52123- libcfs_ioctl
52124+ .p_open = libcfs_psdev_open,
52125+ .p_close = libcfs_psdev_release,
52126+ .p_read = NULL,
52127+ .p_write = NULL,
52128+ .p_ioctl = libcfs_ioctl
52129 };
52130
52131 extern int insert_proc(void);
52132diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52133index fcbe836..8a7ada4 100644
52134--- a/drivers/staging/octeon/ethernet-rx.c
52135+++ b/drivers/staging/octeon/ethernet-rx.c
52136@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52137 /* Increment RX stats for virtual ports */
52138 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52139 #ifdef CONFIG_64BIT
52140- atomic64_add(1,
52141+ atomic64_add_unchecked(1,
52142 (atomic64_t *)&priv->stats.rx_packets);
52143- atomic64_add(skb->len,
52144+ atomic64_add_unchecked(skb->len,
52145 (atomic64_t *)&priv->stats.rx_bytes);
52146 #else
52147- atomic_add(1,
52148+ atomic_add_unchecked(1,
52149 (atomic_t *)&priv->stats.rx_packets);
52150- atomic_add(skb->len,
52151+ atomic_add_unchecked(skb->len,
52152 (atomic_t *)&priv->stats.rx_bytes);
52153 #endif
52154 }
52155@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52156 dev->name);
52157 */
52158 #ifdef CONFIG_64BIT
52159- atomic64_add(1,
52160+ atomic64_add_unchecked(1,
52161 (atomic64_t *)&priv->stats.rx_dropped);
52162 #else
52163- atomic_add(1,
52164+ atomic_add_unchecked(1,
52165 (atomic_t *)&priv->stats.rx_dropped);
52166 #endif
52167 dev_kfree_skb_irq(skb);
52168diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52169index ee32149..052d1836 100644
52170--- a/drivers/staging/octeon/ethernet.c
52171+++ b/drivers/staging/octeon/ethernet.c
52172@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52173 * since the RX tasklet also increments it.
52174 */
52175 #ifdef CONFIG_64BIT
52176- atomic64_add(rx_status.dropped_packets,
52177- (atomic64_t *)&priv->stats.rx_dropped);
52178+ atomic64_add_unchecked(rx_status.dropped_packets,
52179+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52180 #else
52181- atomic_add(rx_status.dropped_packets,
52182- (atomic_t *)&priv->stats.rx_dropped);
52183+ atomic_add_unchecked(rx_status.dropped_packets,
52184+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52185 #endif
52186 }
52187
52188diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52189index 3b476d8..f522d68 100644
52190--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52191+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52192@@ -225,7 +225,7 @@ struct hal_ops {
52193
52194 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52195 void (*hal_reset_security_engine)(struct adapter *adapter);
52196-};
52197+} __no_const;
52198
52199 enum rt_eeprom_type {
52200 EEPROM_93C46,
52201diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52202index 070cc03..6806e37 100644
52203--- a/drivers/staging/rtl8712/rtl871x_io.h
52204+++ b/drivers/staging/rtl8712/rtl871x_io.h
52205@@ -108,7 +108,7 @@ struct _io_ops {
52206 u8 *pmem);
52207 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52208 u8 *pmem);
52209-};
52210+} __no_const;
52211
52212 struct io_req {
52213 struct list_head list;
52214diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52215index 46dad63..fe4acdc 100644
52216--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52217+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52218@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
52219 void (*device_resume)(ulong bus_no, ulong dev_no);
52220 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52221 ulong *max_size);
52222-};
52223+} __no_const;
52224
52225 /* These functions live inside visorchipset, and will be called to indicate
52226 * responses to specific events (by code outside of visorchipset).
52227@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
52228 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52229 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52230 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52231-};
52232+} __no_const;
52233
52234 /** Register functions (in the bus driver) to get called by visorchipset
52235 * whenever a bus or device appears for which this service partition is
52236diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52237index 9512af6..045bf5a 100644
52238--- a/drivers/target/sbp/sbp_target.c
52239+++ b/drivers/target/sbp/sbp_target.c
52240@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52241
52242 #define SESSION_MAINTENANCE_INTERVAL HZ
52243
52244-static atomic_t login_id = ATOMIC_INIT(0);
52245+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52246
52247 static void session_maintenance_work(struct work_struct *);
52248 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52249@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52250 login->lun = se_lun;
52251 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52252 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52253- login->login_id = atomic_inc_return(&login_id);
52254+ login->login_id = atomic_inc_return_unchecked(&login_id);
52255
52256 login->tgt_agt = sbp_target_agent_register(login);
52257 if (IS_ERR(login->tgt_agt)) {
52258diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52259index 58f49ff..2669604 100644
52260--- a/drivers/target/target_core_device.c
52261+++ b/drivers/target/target_core_device.c
52262@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52263 spin_lock_init(&dev->se_tmr_lock);
52264 spin_lock_init(&dev->qf_cmd_lock);
52265 sema_init(&dev->caw_sem, 1);
52266- atomic_set(&dev->dev_ordered_id, 0);
52267+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52268 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52269 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52270 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52271diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52272index 0adc0f6..7757bfe 100644
52273--- a/drivers/target/target_core_transport.c
52274+++ b/drivers/target/target_core_transport.c
52275@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52276 * Used to determine when ORDERED commands should go from
52277 * Dormant to Active status.
52278 */
52279- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52280+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52281 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52282 cmd->se_ordered_id, cmd->sam_task_attr,
52283 dev->transport->name);
52284diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52285index 65a98a9..d93d3a8 100644
52286--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52287+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52288@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52289 platform_set_drvdata(pdev, priv);
52290
52291 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52292- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52293- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52294+ pax_open_kernel();
52295+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52296+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52297+ pax_close_kernel();
52298 }
52299 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52300 priv, &int3400_thermal_ops,
52301diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52302index d717f3d..cae1cc3e 100644
52303--- a/drivers/thermal/of-thermal.c
52304+++ b/drivers/thermal/of-thermal.c
52305@@ -31,6 +31,7 @@
52306 #include <linux/export.h>
52307 #include <linux/string.h>
52308 #include <linux/thermal.h>
52309+#include <linux/mm.h>
52310
52311 #include "thermal_core.h"
52312
52313@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52314 tz->ops = ops;
52315 tz->sensor_data = data;
52316
52317- tzd->ops->get_temp = of_thermal_get_temp;
52318- tzd->ops->get_trend = of_thermal_get_trend;
52319- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52320+ pax_open_kernel();
52321+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52322+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52323+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52324+ pax_close_kernel();
52325 mutex_unlock(&tzd->lock);
52326
52327 return tzd;
52328@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52329 return;
52330
52331 mutex_lock(&tzd->lock);
52332- tzd->ops->get_temp = NULL;
52333- tzd->ops->get_trend = NULL;
52334- tzd->ops->set_emul_temp = NULL;
52335+ pax_open_kernel();
52336+ *(void **)&tzd->ops->get_temp = NULL;
52337+ *(void **)&tzd->ops->get_trend = NULL;
52338+ *(void **)&tzd->ops->set_emul_temp = NULL;
52339+ pax_close_kernel();
52340
52341 tz->ops = NULL;
52342 tz->sensor_data = NULL;
52343diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52344index fd66f57..48e6376 100644
52345--- a/drivers/tty/cyclades.c
52346+++ b/drivers/tty/cyclades.c
52347@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52348 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52349 info->port.count);
52350 #endif
52351- info->port.count++;
52352+ atomic_inc(&info->port.count);
52353 #ifdef CY_DEBUG_COUNT
52354 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52355- current->pid, info->port.count);
52356+ current->pid, atomic_read(&info->port.count));
52357 #endif
52358
52359 /*
52360@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52361 for (j = 0; j < cy_card[i].nports; j++) {
52362 info = &cy_card[i].ports[j];
52363
52364- if (info->port.count) {
52365+ if (atomic_read(&info->port.count)) {
52366 /* XXX is the ldisc num worth this? */
52367 struct tty_struct *tty;
52368 struct tty_ldisc *ld;
52369diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52370index 4fcec1d..5a036f7 100644
52371--- a/drivers/tty/hvc/hvc_console.c
52372+++ b/drivers/tty/hvc/hvc_console.c
52373@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52374
52375 spin_lock_irqsave(&hp->port.lock, flags);
52376 /* Check and then increment for fast path open. */
52377- if (hp->port.count++ > 0) {
52378+ if (atomic_inc_return(&hp->port.count) > 1) {
52379 spin_unlock_irqrestore(&hp->port.lock, flags);
52380 hvc_kick();
52381 return 0;
52382@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52383
52384 spin_lock_irqsave(&hp->port.lock, flags);
52385
52386- if (--hp->port.count == 0) {
52387+ if (atomic_dec_return(&hp->port.count) == 0) {
52388 spin_unlock_irqrestore(&hp->port.lock, flags);
52389 /* We are done with the tty pointer now. */
52390 tty_port_tty_set(&hp->port, NULL);
52391@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52392 */
52393 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52394 } else {
52395- if (hp->port.count < 0)
52396+ if (atomic_read(&hp->port.count) < 0)
52397 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52398- hp->vtermno, hp->port.count);
52399+ hp->vtermno, atomic_read(&hp->port.count));
52400 spin_unlock_irqrestore(&hp->port.lock, flags);
52401 }
52402 }
52403@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52404 * open->hangup case this can be called after the final close so prevent
52405 * that from happening for now.
52406 */
52407- if (hp->port.count <= 0) {
52408+ if (atomic_read(&hp->port.count) <= 0) {
52409 spin_unlock_irqrestore(&hp->port.lock, flags);
52410 return;
52411 }
52412
52413- hp->port.count = 0;
52414+ atomic_set(&hp->port.count, 0);
52415 spin_unlock_irqrestore(&hp->port.lock, flags);
52416 tty_port_tty_set(&hp->port, NULL);
52417
52418@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52419 return -EPIPE;
52420
52421 /* FIXME what's this (unprotected) check for? */
52422- if (hp->port.count <= 0)
52423+ if (atomic_read(&hp->port.count) <= 0)
52424 return -EIO;
52425
52426 spin_lock_irqsave(&hp->lock, flags);
52427diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52428index 81ff7e1..dfb7b71 100644
52429--- a/drivers/tty/hvc/hvcs.c
52430+++ b/drivers/tty/hvc/hvcs.c
52431@@ -83,6 +83,7 @@
52432 #include <asm/hvcserver.h>
52433 #include <asm/uaccess.h>
52434 #include <asm/vio.h>
52435+#include <asm/local.h>
52436
52437 /*
52438 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52439@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52440
52441 spin_lock_irqsave(&hvcsd->lock, flags);
52442
52443- if (hvcsd->port.count > 0) {
52444+ if (atomic_read(&hvcsd->port.count) > 0) {
52445 spin_unlock_irqrestore(&hvcsd->lock, flags);
52446 printk(KERN_INFO "HVCS: vterm state unchanged. "
52447 "The hvcs device node is still in use.\n");
52448@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52449 }
52450 }
52451
52452- hvcsd->port.count = 0;
52453+ atomic_set(&hvcsd->port.count, 0);
52454 hvcsd->port.tty = tty;
52455 tty->driver_data = hvcsd;
52456
52457@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52458 unsigned long flags;
52459
52460 spin_lock_irqsave(&hvcsd->lock, flags);
52461- hvcsd->port.count++;
52462+ atomic_inc(&hvcsd->port.count);
52463 hvcsd->todo_mask |= HVCS_SCHED_READ;
52464 spin_unlock_irqrestore(&hvcsd->lock, flags);
52465
52466@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52467 hvcsd = tty->driver_data;
52468
52469 spin_lock_irqsave(&hvcsd->lock, flags);
52470- if (--hvcsd->port.count == 0) {
52471+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52472
52473 vio_disable_interrupts(hvcsd->vdev);
52474
52475@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52476
52477 free_irq(irq, hvcsd);
52478 return;
52479- } else if (hvcsd->port.count < 0) {
52480+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52481 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52482 " is missmanaged.\n",
52483- hvcsd->vdev->unit_address, hvcsd->port.count);
52484+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52485 }
52486
52487 spin_unlock_irqrestore(&hvcsd->lock, flags);
52488@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52489
52490 spin_lock_irqsave(&hvcsd->lock, flags);
52491 /* Preserve this so that we know how many kref refs to put */
52492- temp_open_count = hvcsd->port.count;
52493+ temp_open_count = atomic_read(&hvcsd->port.count);
52494
52495 /*
52496 * Don't kref put inside the spinlock because the destruction
52497@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52498 tty->driver_data = NULL;
52499 hvcsd->port.tty = NULL;
52500
52501- hvcsd->port.count = 0;
52502+ atomic_set(&hvcsd->port.count, 0);
52503
52504 /* This will drop any buffered data on the floor which is OK in a hangup
52505 * scenario. */
52506@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52507 * the middle of a write operation? This is a crummy place to do this
52508 * but we want to keep it all in the spinlock.
52509 */
52510- if (hvcsd->port.count <= 0) {
52511+ if (atomic_read(&hvcsd->port.count) <= 0) {
52512 spin_unlock_irqrestore(&hvcsd->lock, flags);
52513 return -ENODEV;
52514 }
52515@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52516 {
52517 struct hvcs_struct *hvcsd = tty->driver_data;
52518
52519- if (!hvcsd || hvcsd->port.count <= 0)
52520+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52521 return 0;
52522
52523 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52524diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52525index 4190199..06d5bfa 100644
52526--- a/drivers/tty/hvc/hvsi.c
52527+++ b/drivers/tty/hvc/hvsi.c
52528@@ -85,7 +85,7 @@ struct hvsi_struct {
52529 int n_outbuf;
52530 uint32_t vtermno;
52531 uint32_t virq;
52532- atomic_t seqno; /* HVSI packet sequence number */
52533+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52534 uint16_t mctrl;
52535 uint8_t state; /* HVSI protocol state */
52536 uint8_t flags;
52537@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52538
52539 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52540 packet.hdr.len = sizeof(struct hvsi_query_response);
52541- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52542+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52543 packet.verb = VSV_SEND_VERSION_NUMBER;
52544 packet.u.version = HVSI_VERSION;
52545 packet.query_seqno = query_seqno+1;
52546@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52547
52548 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52549 packet.hdr.len = sizeof(struct hvsi_query);
52550- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52551+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52552 packet.verb = verb;
52553
52554 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52555@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52556 int wrote;
52557
52558 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52559- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52560+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52561 packet.hdr.len = sizeof(struct hvsi_control);
52562 packet.verb = VSV_SET_MODEM_CTL;
52563 packet.mask = HVSI_TSDTR;
52564@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52565 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52566
52567 packet.hdr.type = VS_DATA_PACKET_HEADER;
52568- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52569+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52570 packet.hdr.len = count + sizeof(struct hvsi_header);
52571 memcpy(&packet.data, buf, count);
52572
52573@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52574 struct hvsi_control packet __ALIGNED__;
52575
52576 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52577- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52578+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52579 packet.hdr.len = 6;
52580 packet.verb = VSV_CLOSE_PROTOCOL;
52581
52582@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52583
52584 tty_port_tty_set(&hp->port, tty);
52585 spin_lock_irqsave(&hp->lock, flags);
52586- hp->port.count++;
52587+ atomic_inc(&hp->port.count);
52588 atomic_set(&hp->seqno, 0);
52589 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52590 spin_unlock_irqrestore(&hp->lock, flags);
52591@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52592
52593 spin_lock_irqsave(&hp->lock, flags);
52594
52595- if (--hp->port.count == 0) {
52596+ if (atomic_dec_return(&hp->port.count) == 0) {
52597 tty_port_tty_set(&hp->port, NULL);
52598 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
52599
52600@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52601
52602 spin_lock_irqsave(&hp->lock, flags);
52603 }
52604- } else if (hp->port.count < 0)
52605+ } else if (atomic_read(&hp->port.count) < 0)
52606 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
52607- hp - hvsi_ports, hp->port.count);
52608+ hp - hvsi_ports, atomic_read(&hp->port.count));
52609
52610 spin_unlock_irqrestore(&hp->lock, flags);
52611 }
52612@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
52613 tty_port_tty_set(&hp->port, NULL);
52614
52615 spin_lock_irqsave(&hp->lock, flags);
52616- hp->port.count = 0;
52617+ atomic_set(&hp->port.count, 0);
52618 hp->n_outbuf = 0;
52619 spin_unlock_irqrestore(&hp->lock, flags);
52620 }
52621diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
52622index a270f04..7c77b5d 100644
52623--- a/drivers/tty/hvc/hvsi_lib.c
52624+++ b/drivers/tty/hvc/hvsi_lib.c
52625@@ -8,7 +8,7 @@
52626
52627 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
52628 {
52629- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
52630+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
52631
52632 /* Assumes that always succeeds, works in practice */
52633 return pv->put_chars(pv->termno, (char *)packet, packet->len);
52634@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
52635
52636 /* Reset state */
52637 pv->established = 0;
52638- atomic_set(&pv->seqno, 0);
52639+ atomic_set_unchecked(&pv->seqno, 0);
52640
52641 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
52642
52643diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
52644index 345cebb..d5a1e9e 100644
52645--- a/drivers/tty/ipwireless/tty.c
52646+++ b/drivers/tty/ipwireless/tty.c
52647@@ -28,6 +28,7 @@
52648 #include <linux/tty_driver.h>
52649 #include <linux/tty_flip.h>
52650 #include <linux/uaccess.h>
52651+#include <asm/local.h>
52652
52653 #include "tty.h"
52654 #include "network.h"
52655@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52656 return -ENODEV;
52657
52658 mutex_lock(&tty->ipw_tty_mutex);
52659- if (tty->port.count == 0)
52660+ if (atomic_read(&tty->port.count) == 0)
52661 tty->tx_bytes_queued = 0;
52662
52663- tty->port.count++;
52664+ atomic_inc(&tty->port.count);
52665
52666 tty->port.tty = linux_tty;
52667 linux_tty->driver_data = tty;
52668@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52669
52670 static void do_ipw_close(struct ipw_tty *tty)
52671 {
52672- tty->port.count--;
52673-
52674- if (tty->port.count == 0) {
52675+ if (atomic_dec_return(&tty->port.count) == 0) {
52676 struct tty_struct *linux_tty = tty->port.tty;
52677
52678 if (linux_tty != NULL) {
52679@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
52680 return;
52681
52682 mutex_lock(&tty->ipw_tty_mutex);
52683- if (tty->port.count == 0) {
52684+ if (atomic_read(&tty->port.count) == 0) {
52685 mutex_unlock(&tty->ipw_tty_mutex);
52686 return;
52687 }
52688@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
52689
52690 mutex_lock(&tty->ipw_tty_mutex);
52691
52692- if (!tty->port.count) {
52693+ if (!atomic_read(&tty->port.count)) {
52694 mutex_unlock(&tty->ipw_tty_mutex);
52695 return;
52696 }
52697@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
52698 return -ENODEV;
52699
52700 mutex_lock(&tty->ipw_tty_mutex);
52701- if (!tty->port.count) {
52702+ if (!atomic_read(&tty->port.count)) {
52703 mutex_unlock(&tty->ipw_tty_mutex);
52704 return -EINVAL;
52705 }
52706@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
52707 if (!tty)
52708 return -ENODEV;
52709
52710- if (!tty->port.count)
52711+ if (!atomic_read(&tty->port.count))
52712 return -EINVAL;
52713
52714 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
52715@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
52716 if (!tty)
52717 return 0;
52718
52719- if (!tty->port.count)
52720+ if (!atomic_read(&tty->port.count))
52721 return 0;
52722
52723 return tty->tx_bytes_queued;
52724@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
52725 if (!tty)
52726 return -ENODEV;
52727
52728- if (!tty->port.count)
52729+ if (!atomic_read(&tty->port.count))
52730 return -EINVAL;
52731
52732 return get_control_lines(tty);
52733@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
52734 if (!tty)
52735 return -ENODEV;
52736
52737- if (!tty->port.count)
52738+ if (!atomic_read(&tty->port.count))
52739 return -EINVAL;
52740
52741 return set_control_lines(tty, set, clear);
52742@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
52743 if (!tty)
52744 return -ENODEV;
52745
52746- if (!tty->port.count)
52747+ if (!atomic_read(&tty->port.count))
52748 return -EINVAL;
52749
52750 /* FIXME: Exactly how is the tty object locked here .. */
52751@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
52752 * are gone */
52753 mutex_lock(&ttyj->ipw_tty_mutex);
52754 }
52755- while (ttyj->port.count)
52756+ while (atomic_read(&ttyj->port.count))
52757 do_ipw_close(ttyj);
52758 ipwireless_disassociate_network_ttys(network,
52759 ttyj->channel_idx);
52760diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52761index 14c54e0..1efd4f2 100644
52762--- a/drivers/tty/moxa.c
52763+++ b/drivers/tty/moxa.c
52764@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
52765 }
52766
52767 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
52768- ch->port.count++;
52769+ atomic_inc(&ch->port.count);
52770 tty->driver_data = ch;
52771 tty_port_tty_set(&ch->port, tty);
52772 mutex_lock(&ch->port.mutex);
52773diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52774index c434376..114ce13 100644
52775--- a/drivers/tty/n_gsm.c
52776+++ b/drivers/tty/n_gsm.c
52777@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
52778 spin_lock_init(&dlci->lock);
52779 mutex_init(&dlci->mutex);
52780 dlci->fifo = &dlci->_fifo;
52781- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
52782+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
52783 kfree(dlci);
52784 return NULL;
52785 }
52786@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
52787 struct gsm_dlci *dlci = tty->driver_data;
52788 struct tty_port *port = &dlci->port;
52789
52790- port->count++;
52791+ atomic_inc(&port->count);
52792 tty_port_tty_set(port, tty);
52793
52794 dlci->modem_rx = 0;
52795diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
52796index 4ddfa60..1b7e112 100644
52797--- a/drivers/tty/n_tty.c
52798+++ b/drivers/tty/n_tty.c
52799@@ -115,7 +115,7 @@ struct n_tty_data {
52800 int minimum_to_wake;
52801
52802 /* consumer-published */
52803- size_t read_tail;
52804+ size_t read_tail __intentional_overflow(-1);
52805 size_t line_start;
52806
52807 /* protected by output lock */
52808@@ -2503,6 +2503,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
52809 {
52810 *ops = tty_ldisc_N_TTY;
52811 ops->owner = NULL;
52812- ops->refcount = ops->flags = 0;
52813+ atomic_set(&ops->refcount, 0);
52814+ ops->flags = 0;
52815 }
52816 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
52817diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
52818index 6e1f150..c3ba598 100644
52819--- a/drivers/tty/pty.c
52820+++ b/drivers/tty/pty.c
52821@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
52822 panic("Couldn't register Unix98 pts driver");
52823
52824 /* Now create the /dev/ptmx special device */
52825+ pax_open_kernel();
52826 tty_default_fops(&ptmx_fops);
52827- ptmx_fops.open = ptmx_open;
52828+ *(void **)&ptmx_fops.open = ptmx_open;
52829+ pax_close_kernel();
52830
52831 cdev_init(&ptmx_cdev, &ptmx_fops);
52832 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
52833diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
52834index 383c4c7..d408e21 100644
52835--- a/drivers/tty/rocket.c
52836+++ b/drivers/tty/rocket.c
52837@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52838 tty->driver_data = info;
52839 tty_port_tty_set(port, tty);
52840
52841- if (port->count++ == 0) {
52842+ if (atomic_inc_return(&port->count) == 1) {
52843 atomic_inc(&rp_num_ports_open);
52844
52845 #ifdef ROCKET_DEBUG_OPEN
52846@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52847 #endif
52848 }
52849 #ifdef ROCKET_DEBUG_OPEN
52850- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
52851+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
52852 #endif
52853
52854 /*
52855@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
52856 spin_unlock_irqrestore(&info->port.lock, flags);
52857 return;
52858 }
52859- if (info->port.count)
52860+ if (atomic_read(&info->port.count))
52861 atomic_dec(&rp_num_ports_open);
52862 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
52863 spin_unlock_irqrestore(&info->port.lock, flags);
52864diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
52865index aa28209..e08fb85 100644
52866--- a/drivers/tty/serial/ioc4_serial.c
52867+++ b/drivers/tty/serial/ioc4_serial.c
52868@@ -437,7 +437,7 @@ struct ioc4_soft {
52869 } is_intr_info[MAX_IOC4_INTR_ENTS];
52870
52871 /* Number of entries active in the above array */
52872- atomic_t is_num_intrs;
52873+ atomic_unchecked_t is_num_intrs;
52874 } is_intr_type[IOC4_NUM_INTR_TYPES];
52875
52876 /* is_ir_lock must be held while
52877@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
52878 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
52879 || (type == IOC4_OTHER_INTR_TYPE)));
52880
52881- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
52882+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
52883 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
52884
52885 /* Save off the lower level interrupt handler */
52886@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
52887
52888 soft = arg;
52889 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
52890- num_intrs = (int)atomic_read(
52891+ num_intrs = (int)atomic_read_unchecked(
52892 &soft->is_intr_type[intr_type].is_num_intrs);
52893
52894 this_mir = this_ir = pending_intrs(soft, intr_type);
52895diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
52896index 129dc5b..1da5bb8 100644
52897--- a/drivers/tty/serial/kgdb_nmi.c
52898+++ b/drivers/tty/serial/kgdb_nmi.c
52899@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
52900 * I/O utilities that messages sent to the console will automatically
52901 * be displayed on the dbg_io.
52902 */
52903- dbg_io_ops->is_console = true;
52904+ pax_open_kernel();
52905+ *(int *)&dbg_io_ops->is_console = true;
52906+ pax_close_kernel();
52907
52908 return 0;
52909 }
52910diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
52911index a260cde..6b2b5ce 100644
52912--- a/drivers/tty/serial/kgdboc.c
52913+++ b/drivers/tty/serial/kgdboc.c
52914@@ -24,8 +24,9 @@
52915 #define MAX_CONFIG_LEN 40
52916
52917 static struct kgdb_io kgdboc_io_ops;
52918+static struct kgdb_io kgdboc_io_ops_console;
52919
52920-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
52921+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
52922 static int configured = -1;
52923
52924 static char config[MAX_CONFIG_LEN];
52925@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
52926 kgdboc_unregister_kbd();
52927 if (configured == 1)
52928 kgdb_unregister_io_module(&kgdboc_io_ops);
52929+ else if (configured == 2)
52930+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
52931 }
52932
52933 static int configure_kgdboc(void)
52934@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
52935 int err;
52936 char *cptr = config;
52937 struct console *cons;
52938+ int is_console = 0;
52939
52940 err = kgdboc_option_setup(config);
52941 if (err || !strlen(config) || isspace(config[0]))
52942 goto noconfig;
52943
52944 err = -ENODEV;
52945- kgdboc_io_ops.is_console = 0;
52946 kgdb_tty_driver = NULL;
52947
52948 kgdboc_use_kms = 0;
52949@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
52950 int idx;
52951 if (cons->device && cons->device(cons, &idx) == p &&
52952 idx == tty_line) {
52953- kgdboc_io_ops.is_console = 1;
52954+ is_console = 1;
52955 break;
52956 }
52957 cons = cons->next;
52958@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
52959 kgdb_tty_line = tty_line;
52960
52961 do_register:
52962- err = kgdb_register_io_module(&kgdboc_io_ops);
52963+ if (is_console) {
52964+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
52965+ configured = 2;
52966+ } else {
52967+ err = kgdb_register_io_module(&kgdboc_io_ops);
52968+ configured = 1;
52969+ }
52970 if (err)
52971 goto noconfig;
52972
52973@@ -205,8 +214,6 @@ do_register:
52974 if (err)
52975 goto nmi_con_failed;
52976
52977- configured = 1;
52978-
52979 return 0;
52980
52981 nmi_con_failed:
52982@@ -223,7 +230,7 @@ noconfig:
52983 static int __init init_kgdboc(void)
52984 {
52985 /* Already configured? */
52986- if (configured == 1)
52987+ if (configured >= 1)
52988 return 0;
52989
52990 return configure_kgdboc();
52991@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
52992 if (config[len - 1] == '\n')
52993 config[len - 1] = '\0';
52994
52995- if (configured == 1)
52996+ if (configured >= 1)
52997 cleanup_kgdboc();
52998
52999 /* Go and configure with the new params. */
53000@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53001 .post_exception = kgdboc_post_exp_handler,
53002 };
53003
53004+static struct kgdb_io kgdboc_io_ops_console = {
53005+ .name = "kgdboc",
53006+ .read_char = kgdboc_get_char,
53007+ .write_char = kgdboc_put_char,
53008+ .pre_exception = kgdboc_pre_exp_handler,
53009+ .post_exception = kgdboc_post_exp_handler,
53010+ .is_console = 1
53011+};
53012+
53013 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53014 /* This is only available if kgdboc is a built in for early debugging */
53015 static int __init kgdboc_early_init(char *opt)
53016diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53017index c88b522..e763029 100644
53018--- a/drivers/tty/serial/msm_serial.c
53019+++ b/drivers/tty/serial/msm_serial.c
53020@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
53021 .cons = MSM_CONSOLE,
53022 };
53023
53024-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53025+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53026
53027 static const struct of_device_id msm_uartdm_table[] = {
53028 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53029@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53030 line = pdev->id;
53031
53032 if (line < 0)
53033- line = atomic_inc_return(&msm_uart_next_id) - 1;
53034+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53035
53036 if (unlikely(line < 0 || line >= UART_NR))
53037 return -ENXIO;
53038diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53039index 107e807..d4a02fa 100644
53040--- a/drivers/tty/serial/samsung.c
53041+++ b/drivers/tty/serial/samsung.c
53042@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53043 }
53044 }
53045
53046+static int s3c64xx_serial_startup(struct uart_port *port);
53047 static int s3c24xx_serial_startup(struct uart_port *port)
53048 {
53049 struct s3c24xx_uart_port *ourport = to_ourport(port);
53050 int ret;
53051
53052+ /* Startup sequence is different for s3c64xx and higher SoC's */
53053+ if (s3c24xx_serial_has_interrupt_mask(port))
53054+ return s3c64xx_serial_startup(port);
53055+
53056 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53057 port, (unsigned long long)port->mapbase, port->membase);
53058
53059@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53060 /* setup info for port */
53061 port->dev = &platdev->dev;
53062
53063- /* Startup sequence is different for s3c64xx and higher SoC's */
53064- if (s3c24xx_serial_has_interrupt_mask(port))
53065- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53066-
53067 port->uartclk = 1;
53068
53069 if (cfg->uart_flags & UPF_CONS_FLOW) {
53070diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53071index 984605b..e538330 100644
53072--- a/drivers/tty/serial/serial_core.c
53073+++ b/drivers/tty/serial/serial_core.c
53074@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53075 state = drv->state + tty->index;
53076 port = &state->port;
53077 spin_lock_irq(&port->lock);
53078- --port->count;
53079+ atomic_dec(&port->count);
53080 spin_unlock_irq(&port->lock);
53081 return;
53082 }
53083@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53084
53085 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53086
53087- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53088+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53089 return;
53090
53091 /*
53092@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
53093 uart_flush_buffer(tty);
53094 uart_shutdown(tty, state);
53095 spin_lock_irqsave(&port->lock, flags);
53096- port->count = 0;
53097+ atomic_set(&port->count, 0);
53098 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53099 spin_unlock_irqrestore(&port->lock, flags);
53100 tty_port_tty_set(port, NULL);
53101@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53102 pr_debug("uart_open(%d) called\n", line);
53103
53104 spin_lock_irq(&port->lock);
53105- ++port->count;
53106+ atomic_inc(&port->count);
53107 spin_unlock_irq(&port->lock);
53108
53109 /*
53110diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53111index b799170..87dafd5 100644
53112--- a/drivers/tty/synclink.c
53113+++ b/drivers/tty/synclink.c
53114@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53115
53116 if (debug_level >= DEBUG_LEVEL_INFO)
53117 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53118- __FILE__,__LINE__, info->device_name, info->port.count);
53119+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53120
53121 if (tty_port_close_start(&info->port, tty, filp) == 0)
53122 goto cleanup;
53123@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53124 cleanup:
53125 if (debug_level >= DEBUG_LEVEL_INFO)
53126 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53127- tty->driver->name, info->port.count);
53128+ tty->driver->name, atomic_read(&info->port.count));
53129
53130 } /* end of mgsl_close() */
53131
53132@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53133
53134 mgsl_flush_buffer(tty);
53135 shutdown(info);
53136-
53137- info->port.count = 0;
53138+
53139+ atomic_set(&info->port.count, 0);
53140 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53141 info->port.tty = NULL;
53142
53143@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53144
53145 if (debug_level >= DEBUG_LEVEL_INFO)
53146 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53147- __FILE__,__LINE__, tty->driver->name, port->count );
53148+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53149
53150 spin_lock_irqsave(&info->irq_spinlock, flags);
53151- port->count--;
53152+ atomic_dec(&port->count);
53153 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53154 port->blocked_open++;
53155
53156@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53157
53158 if (debug_level >= DEBUG_LEVEL_INFO)
53159 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53160- __FILE__,__LINE__, tty->driver->name, port->count );
53161+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53162
53163 tty_unlock(tty);
53164 schedule();
53165@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53166
53167 /* FIXME: Racy on hangup during close wait */
53168 if (!tty_hung_up_p(filp))
53169- port->count++;
53170+ atomic_inc(&port->count);
53171 port->blocked_open--;
53172
53173 if (debug_level >= DEBUG_LEVEL_INFO)
53174 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53175- __FILE__,__LINE__, tty->driver->name, port->count );
53176+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53177
53178 if (!retval)
53179 port->flags |= ASYNC_NORMAL_ACTIVE;
53180@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53181
53182 if (debug_level >= DEBUG_LEVEL_INFO)
53183 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53184- __FILE__,__LINE__,tty->driver->name, info->port.count);
53185+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53186
53187 /* If port is closing, signal caller to try again */
53188 if (info->port.flags & ASYNC_CLOSING){
53189@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53190 spin_unlock_irqrestore(&info->netlock, flags);
53191 goto cleanup;
53192 }
53193- info->port.count++;
53194+ atomic_inc(&info->port.count);
53195 spin_unlock_irqrestore(&info->netlock, flags);
53196
53197- if (info->port.count == 1) {
53198+ if (atomic_read(&info->port.count) == 1) {
53199 /* 1st open on this device, init hardware */
53200 retval = startup(info);
53201 if (retval < 0)
53202@@ -3442,8 +3442,8 @@ cleanup:
53203 if (retval) {
53204 if (tty->count == 1)
53205 info->port.tty = NULL; /* tty layer will release tty struct */
53206- if(info->port.count)
53207- info->port.count--;
53208+ if (atomic_read(&info->port.count))
53209+ atomic_dec(&info->port.count);
53210 }
53211
53212 return retval;
53213@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53214 unsigned short new_crctype;
53215
53216 /* return error if TTY interface open */
53217- if (info->port.count)
53218+ if (atomic_read(&info->port.count))
53219 return -EBUSY;
53220
53221 switch (encoding)
53222@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53223
53224 /* arbitrate between network and tty opens */
53225 spin_lock_irqsave(&info->netlock, flags);
53226- if (info->port.count != 0 || info->netcount != 0) {
53227+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53228 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53229 spin_unlock_irqrestore(&info->netlock, flags);
53230 return -EBUSY;
53231@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53232 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53233
53234 /* return error if TTY interface open */
53235- if (info->port.count)
53236+ if (atomic_read(&info->port.count))
53237 return -EBUSY;
53238
53239 if (cmd != SIOCWANDEV)
53240diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53241index 0e8c39b..e0cb171 100644
53242--- a/drivers/tty/synclink_gt.c
53243+++ b/drivers/tty/synclink_gt.c
53244@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53245 tty->driver_data = info;
53246 info->port.tty = tty;
53247
53248- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53249+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53250
53251 /* If port is closing, signal caller to try again */
53252 if (info->port.flags & ASYNC_CLOSING){
53253@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53254 mutex_unlock(&info->port.mutex);
53255 goto cleanup;
53256 }
53257- info->port.count++;
53258+ atomic_inc(&info->port.count);
53259 spin_unlock_irqrestore(&info->netlock, flags);
53260
53261- if (info->port.count == 1) {
53262+ if (atomic_read(&info->port.count) == 1) {
53263 /* 1st open on this device, init hardware */
53264 retval = startup(info);
53265 if (retval < 0) {
53266@@ -715,8 +715,8 @@ cleanup:
53267 if (retval) {
53268 if (tty->count == 1)
53269 info->port.tty = NULL; /* tty layer will release tty struct */
53270- if(info->port.count)
53271- info->port.count--;
53272+ if(atomic_read(&info->port.count))
53273+ atomic_dec(&info->port.count);
53274 }
53275
53276 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53277@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53278
53279 if (sanity_check(info, tty->name, "close"))
53280 return;
53281- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53282+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53283
53284 if (tty_port_close_start(&info->port, tty, filp) == 0)
53285 goto cleanup;
53286@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53287 tty_port_close_end(&info->port, tty);
53288 info->port.tty = NULL;
53289 cleanup:
53290- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53291+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53292 }
53293
53294 static void hangup(struct tty_struct *tty)
53295@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53296 shutdown(info);
53297
53298 spin_lock_irqsave(&info->port.lock, flags);
53299- info->port.count = 0;
53300+ atomic_set(&info->port.count, 0);
53301 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53302 info->port.tty = NULL;
53303 spin_unlock_irqrestore(&info->port.lock, flags);
53304@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53305 unsigned short new_crctype;
53306
53307 /* return error if TTY interface open */
53308- if (info->port.count)
53309+ if (atomic_read(&info->port.count))
53310 return -EBUSY;
53311
53312 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53313@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53314
53315 /* arbitrate between network and tty opens */
53316 spin_lock_irqsave(&info->netlock, flags);
53317- if (info->port.count != 0 || info->netcount != 0) {
53318+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53319 DBGINFO(("%s hdlc_open busy\n", dev->name));
53320 spin_unlock_irqrestore(&info->netlock, flags);
53321 return -EBUSY;
53322@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53323 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53324
53325 /* return error if TTY interface open */
53326- if (info->port.count)
53327+ if (atomic_read(&info->port.count))
53328 return -EBUSY;
53329
53330 if (cmd != SIOCWANDEV)
53331@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53332 if (port == NULL)
53333 continue;
53334 spin_lock(&port->lock);
53335- if ((port->port.count || port->netcount) &&
53336+ if ((atomic_read(&port->port.count) || port->netcount) &&
53337 port->pending_bh && !port->bh_running &&
53338 !port->bh_requested) {
53339 DBGISR(("%s bh queued\n", port->device_name));
53340@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53341 add_wait_queue(&port->open_wait, &wait);
53342
53343 spin_lock_irqsave(&info->lock, flags);
53344- port->count--;
53345+ atomic_dec(&port->count);
53346 spin_unlock_irqrestore(&info->lock, flags);
53347 port->blocked_open++;
53348
53349@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53350 remove_wait_queue(&port->open_wait, &wait);
53351
53352 if (!tty_hung_up_p(filp))
53353- port->count++;
53354+ atomic_inc(&port->count);
53355 port->blocked_open--;
53356
53357 if (!retval)
53358diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53359index c3f9091..abe4601 100644
53360--- a/drivers/tty/synclinkmp.c
53361+++ b/drivers/tty/synclinkmp.c
53362@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53363
53364 if (debug_level >= DEBUG_LEVEL_INFO)
53365 printk("%s(%d):%s open(), old ref count = %d\n",
53366- __FILE__,__LINE__,tty->driver->name, info->port.count);
53367+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53368
53369 /* If port is closing, signal caller to try again */
53370 if (info->port.flags & ASYNC_CLOSING){
53371@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53372 spin_unlock_irqrestore(&info->netlock, flags);
53373 goto cleanup;
53374 }
53375- info->port.count++;
53376+ atomic_inc(&info->port.count);
53377 spin_unlock_irqrestore(&info->netlock, flags);
53378
53379- if (info->port.count == 1) {
53380+ if (atomic_read(&info->port.count) == 1) {
53381 /* 1st open on this device, init hardware */
53382 retval = startup(info);
53383 if (retval < 0)
53384@@ -796,8 +796,8 @@ cleanup:
53385 if (retval) {
53386 if (tty->count == 1)
53387 info->port.tty = NULL; /* tty layer will release tty struct */
53388- if(info->port.count)
53389- info->port.count--;
53390+ if(atomic_read(&info->port.count))
53391+ atomic_dec(&info->port.count);
53392 }
53393
53394 return retval;
53395@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53396
53397 if (debug_level >= DEBUG_LEVEL_INFO)
53398 printk("%s(%d):%s close() entry, count=%d\n",
53399- __FILE__,__LINE__, info->device_name, info->port.count);
53400+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53401
53402 if (tty_port_close_start(&info->port, tty, filp) == 0)
53403 goto cleanup;
53404@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53405 cleanup:
53406 if (debug_level >= DEBUG_LEVEL_INFO)
53407 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53408- tty->driver->name, info->port.count);
53409+ tty->driver->name, atomic_read(&info->port.count));
53410 }
53411
53412 /* Called by tty_hangup() when a hangup is signaled.
53413@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53414 shutdown(info);
53415
53416 spin_lock_irqsave(&info->port.lock, flags);
53417- info->port.count = 0;
53418+ atomic_set(&info->port.count, 0);
53419 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53420 info->port.tty = NULL;
53421 spin_unlock_irqrestore(&info->port.lock, flags);
53422@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53423 unsigned short new_crctype;
53424
53425 /* return error if TTY interface open */
53426- if (info->port.count)
53427+ if (atomic_read(&info->port.count))
53428 return -EBUSY;
53429
53430 switch (encoding)
53431@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53432
53433 /* arbitrate between network and tty opens */
53434 spin_lock_irqsave(&info->netlock, flags);
53435- if (info->port.count != 0 || info->netcount != 0) {
53436+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53437 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53438 spin_unlock_irqrestore(&info->netlock, flags);
53439 return -EBUSY;
53440@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53441 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53442
53443 /* return error if TTY interface open */
53444- if (info->port.count)
53445+ if (atomic_read(&info->port.count))
53446 return -EBUSY;
53447
53448 if (cmd != SIOCWANDEV)
53449@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53450 * do not request bottom half processing if the
53451 * device is not open in a normal mode.
53452 */
53453- if ( port && (port->port.count || port->netcount) &&
53454+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53455 port->pending_bh && !port->bh_running &&
53456 !port->bh_requested ) {
53457 if ( debug_level >= DEBUG_LEVEL_ISR )
53458@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53459
53460 if (debug_level >= DEBUG_LEVEL_INFO)
53461 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53462- __FILE__,__LINE__, tty->driver->name, port->count );
53463+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53464
53465 spin_lock_irqsave(&info->lock, flags);
53466- port->count--;
53467+ atomic_dec(&port->count);
53468 spin_unlock_irqrestore(&info->lock, flags);
53469 port->blocked_open++;
53470
53471@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53472
53473 if (debug_level >= DEBUG_LEVEL_INFO)
53474 printk("%s(%d):%s block_til_ready() count=%d\n",
53475- __FILE__,__LINE__, tty->driver->name, port->count );
53476+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53477
53478 tty_unlock(tty);
53479 schedule();
53480@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53481 set_current_state(TASK_RUNNING);
53482 remove_wait_queue(&port->open_wait, &wait);
53483 if (!tty_hung_up_p(filp))
53484- port->count++;
53485+ atomic_inc(&port->count);
53486 port->blocked_open--;
53487
53488 if (debug_level >= DEBUG_LEVEL_INFO)
53489 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53490- __FILE__,__LINE__, tty->driver->name, port->count );
53491+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53492
53493 if (!retval)
53494 port->flags |= ASYNC_NORMAL_ACTIVE;
53495diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53496index 42bad18..447d7a2 100644
53497--- a/drivers/tty/sysrq.c
53498+++ b/drivers/tty/sysrq.c
53499@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53500 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53501 size_t count, loff_t *ppos)
53502 {
53503- if (count) {
53504+ if (count && capable(CAP_SYS_ADMIN)) {
53505 char c;
53506
53507 if (get_user(c, buf))
53508diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53509index 51f066a..a7f6e86 100644
53510--- a/drivers/tty/tty_io.c
53511+++ b/drivers/tty/tty_io.c
53512@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(start_tty);
53513 /* We limit tty time update visibility to every 8 seconds or so. */
53514 static void tty_update_time(struct timespec *time)
53515 {
53516- unsigned long sec = get_seconds() & ~7;
53517- if ((long)(sec - time->tv_sec) > 0)
53518+ unsigned long sec = get_seconds();
53519+ if (abs(sec - time->tv_sec) & ~7)
53520 time->tv_sec = sec;
53521 }
53522
53523@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53524
53525 void tty_default_fops(struct file_operations *fops)
53526 {
53527- *fops = tty_fops;
53528+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53529 }
53530
53531 /*
53532diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
53533index 1787fa4..552076b 100644
53534--- a/drivers/tty/tty_ioctl.c
53535+++ b/drivers/tty/tty_ioctl.c
53536@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
53537 #endif
53538 if (!timeout)
53539 timeout = MAX_SCHEDULE_TIMEOUT;
53540+
53541 if (wait_event_interruptible_timeout(tty->write_wait,
53542- !tty_chars_in_buffer(tty), timeout) >= 0) {
53543- if (tty->ops->wait_until_sent)
53544- tty->ops->wait_until_sent(tty, timeout);
53545+ !tty_chars_in_buffer(tty), timeout) < 0) {
53546+ return;
53547 }
53548+
53549+ if (timeout == MAX_SCHEDULE_TIMEOUT)
53550+ timeout = 0;
53551+
53552+ if (tty->ops->wait_until_sent)
53553+ tty->ops->wait_until_sent(tty, timeout);
53554 }
53555 EXPORT_SYMBOL(tty_wait_until_sent);
53556
53557diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53558index 3737f55..7cef448 100644
53559--- a/drivers/tty/tty_ldisc.c
53560+++ b/drivers/tty/tty_ldisc.c
53561@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53562 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53563 tty_ldiscs[disc] = new_ldisc;
53564 new_ldisc->num = disc;
53565- new_ldisc->refcount = 0;
53566+ atomic_set(&new_ldisc->refcount, 0);
53567 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53568
53569 return ret;
53570@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53571 return -EINVAL;
53572
53573 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53574- if (tty_ldiscs[disc]->refcount)
53575+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53576 ret = -EBUSY;
53577 else
53578 tty_ldiscs[disc] = NULL;
53579@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53580 if (ldops) {
53581 ret = ERR_PTR(-EAGAIN);
53582 if (try_module_get(ldops->owner)) {
53583- ldops->refcount++;
53584+ atomic_inc(&ldops->refcount);
53585 ret = ldops;
53586 }
53587 }
53588@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53589 unsigned long flags;
53590
53591 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53592- ldops->refcount--;
53593+ atomic_dec(&ldops->refcount);
53594 module_put(ldops->owner);
53595 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53596 }
53597diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53598index 40b31835..94d92ae 100644
53599--- a/drivers/tty/tty_port.c
53600+++ b/drivers/tty/tty_port.c
53601@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53602 unsigned long flags;
53603
53604 spin_lock_irqsave(&port->lock, flags);
53605- port->count = 0;
53606+ atomic_set(&port->count, 0);
53607 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53608 tty = port->tty;
53609 if (tty)
53610@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53611
53612 /* The port lock protects the port counts */
53613 spin_lock_irqsave(&port->lock, flags);
53614- port->count--;
53615+ atomic_dec(&port->count);
53616 port->blocked_open++;
53617 spin_unlock_irqrestore(&port->lock, flags);
53618
53619@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53620 we must not mess that up further */
53621 spin_lock_irqsave(&port->lock, flags);
53622 if (!tty_hung_up_p(filp))
53623- port->count++;
53624+ atomic_inc(&port->count);
53625 port->blocked_open--;
53626 if (retval == 0)
53627 port->flags |= ASYNC_NORMAL_ACTIVE;
53628@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53629 return 0;
53630
53631 spin_lock_irqsave(&port->lock, flags);
53632- if (tty->count == 1 && port->count != 1) {
53633+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
53634 printk(KERN_WARNING
53635 "tty_port_close_start: tty->count = 1 port count = %d.\n",
53636- port->count);
53637- port->count = 1;
53638+ atomic_read(&port->count));
53639+ atomic_set(&port->count, 1);
53640 }
53641- if (--port->count < 0) {
53642+ if (atomic_dec_return(&port->count) < 0) {
53643 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
53644- port->count);
53645- port->count = 0;
53646+ atomic_read(&port->count));
53647+ atomic_set(&port->count, 0);
53648 }
53649
53650- if (port->count) {
53651+ if (atomic_read(&port->count)) {
53652 spin_unlock_irqrestore(&port->lock, flags);
53653 return 0;
53654 }
53655@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
53656 struct file *filp)
53657 {
53658 spin_lock_irq(&port->lock);
53659- ++port->count;
53660+ atomic_inc(&port->count);
53661 spin_unlock_irq(&port->lock);
53662 tty_port_tty_set(port, tty);
53663
53664diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
53665index 8a89f6e..50b32af 100644
53666--- a/drivers/tty/vt/keyboard.c
53667+++ b/drivers/tty/vt/keyboard.c
53668@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
53669 kbd->kbdmode == VC_OFF) &&
53670 value != KVAL(K_SAK))
53671 return; /* SAK is allowed even in raw mode */
53672+
53673+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53674+ {
53675+ void *func = fn_handler[value];
53676+ if (func == fn_show_state || func == fn_show_ptregs ||
53677+ func == fn_show_mem)
53678+ return;
53679+ }
53680+#endif
53681+
53682 fn_handler[value](vc);
53683 }
53684
53685@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53686 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
53687 return -EFAULT;
53688
53689- if (!capable(CAP_SYS_TTY_CONFIG))
53690- perm = 0;
53691-
53692 switch (cmd) {
53693 case KDGKBENT:
53694 /* Ensure another thread doesn't free it under us */
53695@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53696 spin_unlock_irqrestore(&kbd_event_lock, flags);
53697 return put_user(val, &user_kbe->kb_value);
53698 case KDSKBENT:
53699+ if (!capable(CAP_SYS_TTY_CONFIG))
53700+ perm = 0;
53701+
53702 if (!perm)
53703 return -EPERM;
53704 if (!i && v == K_NOSUCHMAP) {
53705@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53706 int i, j, k;
53707 int ret;
53708
53709- if (!capable(CAP_SYS_TTY_CONFIG))
53710- perm = 0;
53711-
53712 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
53713 if (!kbs) {
53714 ret = -ENOMEM;
53715@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53716 kfree(kbs);
53717 return ((p && *p) ? -EOVERFLOW : 0);
53718 case KDSKBSENT:
53719+ if (!capable(CAP_SYS_TTY_CONFIG))
53720+ perm = 0;
53721+
53722 if (!perm) {
53723 ret = -EPERM;
53724 goto reterr;
53725diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
53726index 6276f13..84f2449 100644
53727--- a/drivers/uio/uio.c
53728+++ b/drivers/uio/uio.c
53729@@ -25,6 +25,7 @@
53730 #include <linux/kobject.h>
53731 #include <linux/cdev.h>
53732 #include <linux/uio_driver.h>
53733+#include <asm/local.h>
53734
53735 #define UIO_MAX_DEVICES (1U << MINORBITS)
53736
53737@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
53738 struct device_attribute *attr, char *buf)
53739 {
53740 struct uio_device *idev = dev_get_drvdata(dev);
53741- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
53742+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
53743 }
53744 static DEVICE_ATTR_RO(event);
53745
53746@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
53747 {
53748 struct uio_device *idev = info->uio_dev;
53749
53750- atomic_inc(&idev->event);
53751+ atomic_inc_unchecked(&idev->event);
53752 wake_up_interruptible(&idev->wait);
53753 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
53754 }
53755@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
53756 }
53757
53758 listener->dev = idev;
53759- listener->event_count = atomic_read(&idev->event);
53760+ listener->event_count = atomic_read_unchecked(&idev->event);
53761 filep->private_data = listener;
53762
53763 if (idev->info->open) {
53764@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
53765 return -EIO;
53766
53767 poll_wait(filep, &idev->wait, wait);
53768- if (listener->event_count != atomic_read(&idev->event))
53769+ if (listener->event_count != atomic_read_unchecked(&idev->event))
53770 return POLLIN | POLLRDNORM;
53771 return 0;
53772 }
53773@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
53774 do {
53775 set_current_state(TASK_INTERRUPTIBLE);
53776
53777- event_count = atomic_read(&idev->event);
53778+ event_count = atomic_read_unchecked(&idev->event);
53779 if (event_count != listener->event_count) {
53780 if (copy_to_user(buf, &event_count, count))
53781 retval = -EFAULT;
53782@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
53783 static int uio_find_mem_index(struct vm_area_struct *vma)
53784 {
53785 struct uio_device *idev = vma->vm_private_data;
53786+ unsigned long size;
53787
53788 if (vma->vm_pgoff < MAX_UIO_MAPS) {
53789- if (idev->info->mem[vma->vm_pgoff].size == 0)
53790+ size = idev->info->mem[vma->vm_pgoff].size;
53791+ if (size == 0)
53792+ return -1;
53793+ if (vma->vm_end - vma->vm_start > size)
53794 return -1;
53795 return (int)vma->vm_pgoff;
53796 }
53797@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
53798 idev->owner = owner;
53799 idev->info = info;
53800 init_waitqueue_head(&idev->wait);
53801- atomic_set(&idev->event, 0);
53802+ atomic_set_unchecked(&idev->event, 0);
53803
53804 ret = uio_get_minor(idev);
53805 if (ret)
53806diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
53807index 813d4d3..a71934f 100644
53808--- a/drivers/usb/atm/cxacru.c
53809+++ b/drivers/usb/atm/cxacru.c
53810@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
53811 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
53812 if (ret < 2)
53813 return -EINVAL;
53814- if (index < 0 || index > 0x7f)
53815+ if (index > 0x7f)
53816 return -EINVAL;
53817 pos += tmp;
53818
53819diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
53820index dada014..1d0d517 100644
53821--- a/drivers/usb/atm/usbatm.c
53822+++ b/drivers/usb/atm/usbatm.c
53823@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53824 if (printk_ratelimit())
53825 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
53826 __func__, vpi, vci);
53827- atomic_inc(&vcc->stats->rx_err);
53828+ atomic_inc_unchecked(&vcc->stats->rx_err);
53829 return;
53830 }
53831
53832@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53833 if (length > ATM_MAX_AAL5_PDU) {
53834 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
53835 __func__, length, vcc);
53836- atomic_inc(&vcc->stats->rx_err);
53837+ atomic_inc_unchecked(&vcc->stats->rx_err);
53838 goto out;
53839 }
53840
53841@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53842 if (sarb->len < pdu_length) {
53843 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
53844 __func__, pdu_length, sarb->len, vcc);
53845- atomic_inc(&vcc->stats->rx_err);
53846+ atomic_inc_unchecked(&vcc->stats->rx_err);
53847 goto out;
53848 }
53849
53850 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
53851 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
53852 __func__, vcc);
53853- atomic_inc(&vcc->stats->rx_err);
53854+ atomic_inc_unchecked(&vcc->stats->rx_err);
53855 goto out;
53856 }
53857
53858@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53859 if (printk_ratelimit())
53860 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
53861 __func__, length);
53862- atomic_inc(&vcc->stats->rx_drop);
53863+ atomic_inc_unchecked(&vcc->stats->rx_drop);
53864 goto out;
53865 }
53866
53867@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53868
53869 vcc->push(vcc, skb);
53870
53871- atomic_inc(&vcc->stats->rx);
53872+ atomic_inc_unchecked(&vcc->stats->rx);
53873 out:
53874 skb_trim(sarb, 0);
53875 }
53876@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
53877 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
53878
53879 usbatm_pop(vcc, skb);
53880- atomic_inc(&vcc->stats->tx);
53881+ atomic_inc_unchecked(&vcc->stats->tx);
53882
53883 skb = skb_dequeue(&instance->sndqueue);
53884 }
53885@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
53886 if (!left--)
53887 return sprintf(page,
53888 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
53889- atomic_read(&atm_dev->stats.aal5.tx),
53890- atomic_read(&atm_dev->stats.aal5.tx_err),
53891- atomic_read(&atm_dev->stats.aal5.rx),
53892- atomic_read(&atm_dev->stats.aal5.rx_err),
53893- atomic_read(&atm_dev->stats.aal5.rx_drop));
53894+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
53895+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
53896+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
53897+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
53898+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
53899
53900 if (!left--) {
53901 if (instance->disconnected)
53902diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
53903index 2a3bbdf..91d72cf 100644
53904--- a/drivers/usb/core/devices.c
53905+++ b/drivers/usb/core/devices.c
53906@@ -126,7 +126,7 @@ static const char format_endpt[] =
53907 * time it gets called.
53908 */
53909 static struct device_connect_event {
53910- atomic_t count;
53911+ atomic_unchecked_t count;
53912 wait_queue_head_t wait;
53913 } device_event = {
53914 .count = ATOMIC_INIT(1),
53915@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
53916
53917 void usbfs_conn_disc_event(void)
53918 {
53919- atomic_add(2, &device_event.count);
53920+ atomic_add_unchecked(2, &device_event.count);
53921 wake_up(&device_event.wait);
53922 }
53923
53924@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
53925
53926 poll_wait(file, &device_event.wait, wait);
53927
53928- event_count = atomic_read(&device_event.count);
53929+ event_count = atomic_read_unchecked(&device_event.count);
53930 if (file->f_version != event_count) {
53931 file->f_version = event_count;
53932 return POLLIN | POLLRDNORM;
53933diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
53934index 0b59731..46ee7d1 100644
53935--- a/drivers/usb/core/devio.c
53936+++ b/drivers/usb/core/devio.c
53937@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53938 struct usb_dev_state *ps = file->private_data;
53939 struct usb_device *dev = ps->dev;
53940 ssize_t ret = 0;
53941- unsigned len;
53942+ size_t len;
53943 loff_t pos;
53944 int i;
53945
53946@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53947 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
53948 struct usb_config_descriptor *config =
53949 (struct usb_config_descriptor *)dev->rawdescriptors[i];
53950- unsigned int length = le16_to_cpu(config->wTotalLength);
53951+ size_t length = le16_to_cpu(config->wTotalLength);
53952
53953 if (*ppos < pos + length) {
53954
53955 /* The descriptor may claim to be longer than it
53956 * really is. Here is the actual allocated length. */
53957- unsigned alloclen =
53958+ size_t alloclen =
53959 le16_to_cpu(dev->config[i].desc.wTotalLength);
53960
53961- len = length - (*ppos - pos);
53962+ len = length + pos - *ppos;
53963 if (len > nbytes)
53964 len = nbytes;
53965
53966 /* Simply don't write (skip over) unallocated parts */
53967 if (alloclen > (*ppos - pos)) {
53968- alloclen -= (*ppos - pos);
53969+ alloclen = alloclen + pos - *ppos;
53970 if (copy_to_user(buf,
53971 dev->rawdescriptors[i] + (*ppos - pos),
53972 min(len, alloclen))) {
53973diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
53974index 45a915c..09f9735 100644
53975--- a/drivers/usb/core/hcd.c
53976+++ b/drivers/usb/core/hcd.c
53977@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53978 */
53979 usb_get_urb(urb);
53980 atomic_inc(&urb->use_count);
53981- atomic_inc(&urb->dev->urbnum);
53982+ atomic_inc_unchecked(&urb->dev->urbnum);
53983 usbmon_urb_submit(&hcd->self, urb);
53984
53985 /* NOTE requirements on root-hub callers (usbfs and the hub
53986@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53987 urb->hcpriv = NULL;
53988 INIT_LIST_HEAD(&urb->urb_list);
53989 atomic_dec(&urb->use_count);
53990- atomic_dec(&urb->dev->urbnum);
53991+ atomic_dec_unchecked(&urb->dev->urbnum);
53992 if (atomic_read(&urb->reject))
53993 wake_up(&usb_kill_urb_queue);
53994 usb_put_urb(urb);
53995diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
53996index b4bfa3a..008f926 100644
53997--- a/drivers/usb/core/hub.c
53998+++ b/drivers/usb/core/hub.c
53999@@ -26,6 +26,7 @@
54000 #include <linux/mutex.h>
54001 #include <linux/random.h>
54002 #include <linux/pm_qos.h>
54003+#include <linux/grsecurity.h>
54004
54005 #include <asm/uaccess.h>
54006 #include <asm/byteorder.h>
54007@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
54008 goto done;
54009 return;
54010 }
54011+
54012+ if (gr_handle_new_usb())
54013+ goto done;
54014+
54015 if (hub_is_superspeed(hub->hdev))
54016 unit_load = 150;
54017 else
54018diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
54019index f368d20..0c30ac5 100644
54020--- a/drivers/usb/core/message.c
54021+++ b/drivers/usb/core/message.c
54022@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54023 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54024 * error number.
54025 */
54026-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54027+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54028 __u8 requesttype, __u16 value, __u16 index, void *data,
54029 __u16 size, int timeout)
54030 {
54031@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54032 * If successful, 0. Otherwise a negative error number. The number of actual
54033 * bytes transferred will be stored in the @actual_length parameter.
54034 */
54035-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54036+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54037 void *data, int len, int *actual_length, int timeout)
54038 {
54039 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54040@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54041 * bytes transferred will be stored in the @actual_length parameter.
54042 *
54043 */
54044-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54045+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54046 void *data, int len, int *actual_length, int timeout)
54047 {
54048 struct urb *urb;
54049diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54050index d269738..7340cd7 100644
54051--- a/drivers/usb/core/sysfs.c
54052+++ b/drivers/usb/core/sysfs.c
54053@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54054 struct usb_device *udev;
54055
54056 udev = to_usb_device(dev);
54057- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54058+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54059 }
54060 static DEVICE_ATTR_RO(urbnum);
54061
54062diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54063index b1fb9ae..4224885 100644
54064--- a/drivers/usb/core/usb.c
54065+++ b/drivers/usb/core/usb.c
54066@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54067 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54068 dev->state = USB_STATE_ATTACHED;
54069 dev->lpm_disable_count = 1;
54070- atomic_set(&dev->urbnum, 0);
54071+ atomic_set_unchecked(&dev->urbnum, 0);
54072
54073 INIT_LIST_HEAD(&dev->ep0.urb_list);
54074 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54075diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54076index 8cfc319..4868255 100644
54077--- a/drivers/usb/early/ehci-dbgp.c
54078+++ b/drivers/usb/early/ehci-dbgp.c
54079@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54080
54081 #ifdef CONFIG_KGDB
54082 static struct kgdb_io kgdbdbgp_io_ops;
54083-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54084+static struct kgdb_io kgdbdbgp_io_ops_console;
54085+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54086 #else
54087 #define dbgp_kgdb_mode (0)
54088 #endif
54089@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54090 .write_char = kgdbdbgp_write_char,
54091 };
54092
54093+static struct kgdb_io kgdbdbgp_io_ops_console = {
54094+ .name = "kgdbdbgp",
54095+ .read_char = kgdbdbgp_read_char,
54096+ .write_char = kgdbdbgp_write_char,
54097+ .is_console = 1
54098+};
54099+
54100 static int kgdbdbgp_wait_time;
54101
54102 static int __init kgdbdbgp_parse_config(char *str)
54103@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54104 ptr++;
54105 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54106 }
54107- kgdb_register_io_module(&kgdbdbgp_io_ops);
54108- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54109+ if (early_dbgp_console.index != -1)
54110+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54111+ else
54112+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54113
54114 return 0;
54115 }
54116diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54117index e971584..03495ab 100644
54118--- a/drivers/usb/gadget/function/f_uac1.c
54119+++ b/drivers/usb/gadget/function/f_uac1.c
54120@@ -14,6 +14,7 @@
54121 #include <linux/module.h>
54122 #include <linux/device.h>
54123 #include <linux/atomic.h>
54124+#include <linux/module.h>
54125
54126 #include "u_uac1.h"
54127
54128diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54129index 491082a..dfd7d17 100644
54130--- a/drivers/usb/gadget/function/u_serial.c
54131+++ b/drivers/usb/gadget/function/u_serial.c
54132@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54133 spin_lock_irq(&port->port_lock);
54134
54135 /* already open? Great. */
54136- if (port->port.count) {
54137+ if (atomic_read(&port->port.count)) {
54138 status = 0;
54139- port->port.count++;
54140+ atomic_inc(&port->port.count);
54141
54142 /* currently opening/closing? wait ... */
54143 } else if (port->openclose) {
54144@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54145 tty->driver_data = port;
54146 port->port.tty = tty;
54147
54148- port->port.count = 1;
54149+ atomic_set(&port->port.count, 1);
54150 port->openclose = false;
54151
54152 /* if connected, start the I/O stream */
54153@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54154
54155 spin_lock_irq(&port->port_lock);
54156
54157- if (port->port.count != 1) {
54158- if (port->port.count == 0)
54159+ if (atomic_read(&port->port.count) != 1) {
54160+ if (atomic_read(&port->port.count) == 0)
54161 WARN_ON(1);
54162 else
54163- --port->port.count;
54164+ atomic_dec(&port->port.count);
54165 goto exit;
54166 }
54167
54168@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54169 * and sleep if necessary
54170 */
54171 port->openclose = true;
54172- port->port.count = 0;
54173+ atomic_set(&port->port.count, 0);
54174
54175 gser = port->port_usb;
54176 if (gser && gser->disconnect)
54177@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54178 int cond;
54179
54180 spin_lock_irq(&port->port_lock);
54181- cond = (port->port.count == 0) && !port->openclose;
54182+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54183 spin_unlock_irq(&port->port_lock);
54184 return cond;
54185 }
54186@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54187 /* if it's already open, start I/O ... and notify the serial
54188 * protocol about open/close status (connect/disconnect).
54189 */
54190- if (port->port.count) {
54191+ if (atomic_read(&port->port.count)) {
54192 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54193 gs_start_io(port);
54194 if (gser->connect)
54195@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54196
54197 port->port_usb = NULL;
54198 gser->ioport = NULL;
54199- if (port->port.count > 0 || port->openclose) {
54200+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54201 wake_up_interruptible(&port->drain_wait);
54202 if (port->port.tty)
54203 tty_hangup(port->port.tty);
54204@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54205
54206 /* finally, free any unused/unusable I/O buffers */
54207 spin_lock_irqsave(&port->port_lock, flags);
54208- if (port->port.count == 0 && !port->openclose)
54209+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54210 gs_buf_free(&port->port_write_buf);
54211 gs_free_requests(gser->out, &port->read_pool, NULL);
54212 gs_free_requests(gser->out, &port->read_queue, NULL);
54213diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54214index 53842a1..2bef3b6 100644
54215--- a/drivers/usb/gadget/function/u_uac1.c
54216+++ b/drivers/usb/gadget/function/u_uac1.c
54217@@ -17,6 +17,7 @@
54218 #include <linux/ctype.h>
54219 #include <linux/random.h>
54220 #include <linux/syscalls.h>
54221+#include <linux/module.h>
54222
54223 #include "u_uac1.h"
54224
54225diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54226index 118edb7..7a6415f 100644
54227--- a/drivers/usb/host/ehci-hub.c
54228+++ b/drivers/usb/host/ehci-hub.c
54229@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54230 urb->transfer_flags = URB_DIR_IN;
54231 usb_get_urb(urb);
54232 atomic_inc(&urb->use_count);
54233- atomic_inc(&urb->dev->urbnum);
54234+ atomic_inc_unchecked(&urb->dev->urbnum);
54235 urb->setup_dma = dma_map_single(
54236 hcd->self.controller,
54237 urb->setup_packet,
54238@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54239 urb->status = -EINPROGRESS;
54240 usb_get_urb(urb);
54241 atomic_inc(&urb->use_count);
54242- atomic_inc(&urb->dev->urbnum);
54243+ atomic_inc_unchecked(&urb->dev->urbnum);
54244 retval = submit_single_step_set_feature(hcd, urb, 0);
54245 if (!retval && !wait_for_completion_timeout(&done,
54246 msecs_to_jiffies(2000))) {
54247diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54248index 1db0626..4948782 100644
54249--- a/drivers/usb/host/hwa-hc.c
54250+++ b/drivers/usb/host/hwa-hc.c
54251@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54252 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54253 struct wahc *wa = &hwahc->wa;
54254 struct device *dev = &wa->usb_iface->dev;
54255- u8 mas_le[UWB_NUM_MAS/8];
54256+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54257+
54258+ if (mas_le == NULL)
54259+ return -ENOMEM;
54260
54261 /* Set the stream index */
54262 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54263@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54264 WUSB_REQ_SET_WUSB_MAS,
54265 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54266 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54267- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54268+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54269 if (result < 0)
54270 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54271 out:
54272+ kfree(mas_le);
54273+
54274 return result;
54275 }
54276
54277diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54278index b3d245e..99549ed 100644
54279--- a/drivers/usb/misc/appledisplay.c
54280+++ b/drivers/usb/misc/appledisplay.c
54281@@ -84,7 +84,7 @@ struct appledisplay {
54282 struct mutex sysfslock; /* concurrent read and write */
54283 };
54284
54285-static atomic_t count_displays = ATOMIC_INIT(0);
54286+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54287 static struct workqueue_struct *wq;
54288
54289 static void appledisplay_complete(struct urb *urb)
54290@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54291
54292 /* Register backlight device */
54293 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54294- atomic_inc_return(&count_displays) - 1);
54295+ atomic_inc_return_unchecked(&count_displays) - 1);
54296 memset(&props, 0, sizeof(struct backlight_properties));
54297 props.type = BACKLIGHT_RAW;
54298 props.max_brightness = 0xff;
54299diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54300index 29fa1c3..a57b08e 100644
54301--- a/drivers/usb/serial/console.c
54302+++ b/drivers/usb/serial/console.c
54303@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
54304
54305 info->port = port;
54306
54307- ++port->port.count;
54308+ atomic_inc(&port->port.count);
54309 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54310 if (serial->type->set_termios) {
54311 /*
54312@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
54313 }
54314 /* Now that any required fake tty operations are completed restore
54315 * the tty port count */
54316- --port->port.count;
54317+ atomic_dec(&port->port.count);
54318 /* The console is special in terms of closing the device so
54319 * indicate this port is now acting as a system console. */
54320 port->port.console = 1;
54321@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
54322 put_tty:
54323 tty_kref_put(tty);
54324 reset_open_count:
54325- port->port.count = 0;
54326+ atomic_set(&port->port.count, 0);
54327 usb_autopm_put_interface(serial->interface);
54328 error_get_interface:
54329 usb_serial_put(serial);
54330@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
54331 static void usb_console_write(struct console *co,
54332 const char *buf, unsigned count)
54333 {
54334- static struct usbcons_info *info = &usbcons_info;
54335+ struct usbcons_info *info = &usbcons_info;
54336 struct usb_serial_port *port = info->port;
54337 struct usb_serial *serial;
54338 int retval = -ENODEV;
54339diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
54340index ccf1df7..54e170d 100644
54341--- a/drivers/usb/serial/generic.c
54342+++ b/drivers/usb/serial/generic.c
54343@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
54344 * character or at least one jiffy.
54345 */
54346 period = max_t(unsigned long, (10 * HZ / bps), 1);
54347- period = min_t(unsigned long, period, timeout);
54348+ if (timeout)
54349+ period = min_t(unsigned long, period, timeout);
54350
54351 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
54352 __func__, jiffies_to_msecs(timeout),
54353@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
54354 schedule_timeout_interruptible(period);
54355 if (signal_pending(current))
54356 break;
54357- if (time_after(jiffies, expire))
54358+ if (timeout && time_after(jiffies, expire))
54359 break;
54360 }
54361 }
54362diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54363index 307e339..6aa97cb 100644
54364--- a/drivers/usb/storage/usb.h
54365+++ b/drivers/usb/storage/usb.h
54366@@ -63,7 +63,7 @@ struct us_unusual_dev {
54367 __u8 useProtocol;
54368 __u8 useTransport;
54369 int (*initFunction)(struct us_data *);
54370-};
54371+} __do_const;
54372
54373
54374 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54375diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54376index a863a98..d272795 100644
54377--- a/drivers/usb/usbip/vhci.h
54378+++ b/drivers/usb/usbip/vhci.h
54379@@ -83,7 +83,7 @@ struct vhci_hcd {
54380 unsigned resuming:1;
54381 unsigned long re_timeout;
54382
54383- atomic_t seqnum;
54384+ atomic_unchecked_t seqnum;
54385
54386 /*
54387 * NOTE:
54388diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54389index 1ae9d40..c62604b 100644
54390--- a/drivers/usb/usbip/vhci_hcd.c
54391+++ b/drivers/usb/usbip/vhci_hcd.c
54392@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54393
54394 spin_lock(&vdev->priv_lock);
54395
54396- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54397+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54398 if (priv->seqnum == 0xffff)
54399 dev_info(&urb->dev->dev, "seqnum max\n");
54400
54401@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54402 return -ENOMEM;
54403 }
54404
54405- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54406+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54407 if (unlink->seqnum == 0xffff)
54408 pr_info("seqnum max\n");
54409
54410@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
54411 vdev->rhport = rhport;
54412 }
54413
54414- atomic_set(&vhci->seqnum, 0);
54415+ atomic_set_unchecked(&vhci->seqnum, 0);
54416 spin_lock_init(&vhci->lock);
54417
54418 hcd->power_budget = 0; /* no limit */
54419diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54420index 00e4a54..d676f85 100644
54421--- a/drivers/usb/usbip/vhci_rx.c
54422+++ b/drivers/usb/usbip/vhci_rx.c
54423@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54424 if (!urb) {
54425 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54426 pr_info("max seqnum %d\n",
54427- atomic_read(&the_controller->seqnum));
54428+ atomic_read_unchecked(&the_controller->seqnum));
54429 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54430 return;
54431 }
54432diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54433index edc7267..9f65ce2 100644
54434--- a/drivers/usb/wusbcore/wa-hc.h
54435+++ b/drivers/usb/wusbcore/wa-hc.h
54436@@ -240,7 +240,7 @@ struct wahc {
54437 spinlock_t xfer_list_lock;
54438 struct work_struct xfer_enqueue_work;
54439 struct work_struct xfer_error_work;
54440- atomic_t xfer_id_count;
54441+ atomic_unchecked_t xfer_id_count;
54442
54443 kernel_ulong_t quirks;
54444 };
54445@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54446 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54447 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54448 wa->dto_in_use = 0;
54449- atomic_set(&wa->xfer_id_count, 1);
54450+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54451 /* init the buf in URBs */
54452 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54453 usb_init_urb(&(wa->buf_in_urbs[index]));
54454diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54455index 69af4fd..da390d7 100644
54456--- a/drivers/usb/wusbcore/wa-xfer.c
54457+++ b/drivers/usb/wusbcore/wa-xfer.c
54458@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54459 */
54460 static void wa_xfer_id_init(struct wa_xfer *xfer)
54461 {
54462- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54463+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54464 }
54465
54466 /* Return the xfer's ID. */
54467diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54468index f018d8d..ccab63f 100644
54469--- a/drivers/vfio/vfio.c
54470+++ b/drivers/vfio/vfio.c
54471@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54472 return 0;
54473
54474 /* TODO Prevent device auto probing */
54475- WARN("Device %s added to live group %d!\n", dev_name(dev),
54476+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54477 iommu_group_id(group->iommu_group));
54478
54479 return 0;
54480diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
54481index 9484d56..d415d69 100644
54482--- a/drivers/vhost/net.c
54483+++ b/drivers/vhost/net.c
54484@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
54485 break;
54486 }
54487 /* TODO: Should check and handle checksum. */
54488-
54489- hdr.num_buffers = cpu_to_vhost16(vq, headcount);
54490 if (likely(mergeable) &&
54491- memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
54492+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
54493 offsetof(typeof(hdr), num_buffers),
54494 sizeof hdr.num_buffers)) {
54495 vq_err(vq, "Failed num_buffers write");
54496diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54497index 3bb02c6..a01ff38 100644
54498--- a/drivers/vhost/vringh.c
54499+++ b/drivers/vhost/vringh.c
54500@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54501 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54502 {
54503 __virtio16 v = 0;
54504- int rc = get_user(v, (__force __virtio16 __user *)p);
54505+ int rc = get_user(v, (__force_user __virtio16 *)p);
54506 *val = vringh16_to_cpu(vrh, v);
54507 return rc;
54508 }
54509@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54510 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54511 {
54512 __virtio16 v = cpu_to_vringh16(vrh, val);
54513- return put_user(v, (__force __virtio16 __user *)p);
54514+ return put_user(v, (__force_user __virtio16 *)p);
54515 }
54516
54517 static inline int copydesc_user(void *dst, const void *src, size_t len)
54518 {
54519- return copy_from_user(dst, (__force void __user *)src, len) ?
54520+ return copy_from_user(dst, (void __force_user *)src, len) ?
54521 -EFAULT : 0;
54522 }
54523
54524@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54525 const struct vring_used_elem *src,
54526 unsigned int num)
54527 {
54528- return copy_to_user((__force void __user *)dst, src,
54529+ return copy_to_user((void __force_user *)dst, src,
54530 sizeof(*dst) * num) ? -EFAULT : 0;
54531 }
54532
54533 static inline int xfer_from_user(void *src, void *dst, size_t len)
54534 {
54535- return copy_from_user(dst, (__force void __user *)src, len) ?
54536+ return copy_from_user(dst, (void __force_user *)src, len) ?
54537 -EFAULT : 0;
54538 }
54539
54540 static inline int xfer_to_user(void *dst, void *src, size_t len)
54541 {
54542- return copy_to_user((__force void __user *)dst, src, len) ?
54543+ return copy_to_user((void __force_user *)dst, src, len) ?
54544 -EFAULT : 0;
54545 }
54546
54547@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54548 vrh->last_used_idx = 0;
54549 vrh->vring.num = num;
54550 /* vring expects kernel addresses, but only used via accessors. */
54551- vrh->vring.desc = (__force struct vring_desc *)desc;
54552- vrh->vring.avail = (__force struct vring_avail *)avail;
54553- vrh->vring.used = (__force struct vring_used *)used;
54554+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54555+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54556+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54557 return 0;
54558 }
54559 EXPORT_SYMBOL(vringh_init_user);
54560@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54561
54562 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54563 {
54564- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54565+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54566 return 0;
54567 }
54568
54569diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54570index 84a110a..96312c3 100644
54571--- a/drivers/video/backlight/kb3886_bl.c
54572+++ b/drivers/video/backlight/kb3886_bl.c
54573@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54574 static unsigned long kb3886bl_flags;
54575 #define KB3886BL_SUSPENDED 0x01
54576
54577-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54578+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54579 {
54580 .ident = "Sahara Touch-iT",
54581 .matches = {
54582diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54583index 1b0b233..6f34c2c 100644
54584--- a/drivers/video/fbdev/arcfb.c
54585+++ b/drivers/video/fbdev/arcfb.c
54586@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54587 return -ENOSPC;
54588
54589 err = 0;
54590- if ((count + p) > fbmemlength) {
54591+ if (count > (fbmemlength - p)) {
54592 count = fbmemlength - p;
54593 err = -ENOSPC;
54594 }
54595diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54596index aedf2fb..47c9aca 100644
54597--- a/drivers/video/fbdev/aty/aty128fb.c
54598+++ b/drivers/video/fbdev/aty/aty128fb.c
54599@@ -149,7 +149,7 @@ enum {
54600 };
54601
54602 /* Must match above enum */
54603-static char * const r128_family[] = {
54604+static const char * const r128_family[] = {
54605 "AGP",
54606 "PCI",
54607 "PRO AGP",
54608diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54609index 37ec09b..98f8862 100644
54610--- a/drivers/video/fbdev/aty/atyfb_base.c
54611+++ b/drivers/video/fbdev/aty/atyfb_base.c
54612@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54613 par->accel_flags = var->accel_flags; /* hack */
54614
54615 if (var->accel_flags) {
54616- info->fbops->fb_sync = atyfb_sync;
54617+ pax_open_kernel();
54618+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54619+ pax_close_kernel();
54620 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54621 } else {
54622- info->fbops->fb_sync = NULL;
54623+ pax_open_kernel();
54624+ *(void **)&info->fbops->fb_sync = NULL;
54625+ pax_close_kernel();
54626 info->flags |= FBINFO_HWACCEL_DISABLED;
54627 }
54628
54629diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54630index 2fa0317..4983f2a 100644
54631--- a/drivers/video/fbdev/aty/mach64_cursor.c
54632+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54633@@ -8,6 +8,7 @@
54634 #include "../core/fb_draw.h"
54635
54636 #include <asm/io.h>
54637+#include <asm/pgtable.h>
54638
54639 #ifdef __sparc__
54640 #include <asm/fbio.h>
54641@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54642 info->sprite.buf_align = 16; /* and 64 lines tall. */
54643 info->sprite.flags = FB_PIXMAP_IO;
54644
54645- info->fbops->fb_cursor = atyfb_cursor;
54646+ pax_open_kernel();
54647+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54648+ pax_close_kernel();
54649
54650 return 0;
54651 }
54652diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54653index d6cab1f..112f680 100644
54654--- a/drivers/video/fbdev/core/fb_defio.c
54655+++ b/drivers/video/fbdev/core/fb_defio.c
54656@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54657
54658 BUG_ON(!fbdefio);
54659 mutex_init(&fbdefio->lock);
54660- info->fbops->fb_mmap = fb_deferred_io_mmap;
54661+ pax_open_kernel();
54662+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54663+ pax_close_kernel();
54664 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54665 INIT_LIST_HEAD(&fbdefio->pagelist);
54666 if (fbdefio->delay == 0) /* set a default of 1 s */
54667@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54668 page->mapping = NULL;
54669 }
54670
54671- info->fbops->fb_mmap = NULL;
54672+ *(void **)&info->fbops->fb_mmap = NULL;
54673 mutex_destroy(&fbdefio->lock);
54674 }
54675 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
54676diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
54677index 0705d88..d9429bf 100644
54678--- a/drivers/video/fbdev/core/fbmem.c
54679+++ b/drivers/video/fbdev/core/fbmem.c
54680@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
54681 __u32 data;
54682 int err;
54683
54684- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
54685+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
54686
54687 data = (__u32) (unsigned long) fix->smem_start;
54688 err |= put_user(data, &fix32->smem_start);
54689diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
54690index 4254336..282567e 100644
54691--- a/drivers/video/fbdev/hyperv_fb.c
54692+++ b/drivers/video/fbdev/hyperv_fb.c
54693@@ -240,7 +240,7 @@ static uint screen_fb_size;
54694 static inline int synthvid_send(struct hv_device *hdev,
54695 struct synthvid_msg *msg)
54696 {
54697- static atomic64_t request_id = ATOMIC64_INIT(0);
54698+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
54699 int ret;
54700
54701 msg->pipe_hdr.type = PIPE_MSG_DATA;
54702@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
54703
54704 ret = vmbus_sendpacket(hdev->channel, msg,
54705 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
54706- atomic64_inc_return(&request_id),
54707+ atomic64_inc_return_unchecked(&request_id),
54708 VM_PKT_DATA_INBAND, 0);
54709
54710 if (ret)
54711diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
54712index 7672d2e..b56437f 100644
54713--- a/drivers/video/fbdev/i810/i810_accel.c
54714+++ b/drivers/video/fbdev/i810/i810_accel.c
54715@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
54716 }
54717 }
54718 printk("ringbuffer lockup!!!\n");
54719+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
54720 i810_report_error(mmio);
54721 par->dev_flags |= LOCKUP;
54722 info->pixmap.scan_align = 1;
54723diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54724index a01147f..5d896f8 100644
54725--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54726+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54727@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54728
54729 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54730 struct matrox_switch matrox_mystique = {
54731- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54732+ .preinit = MGA1064_preinit,
54733+ .reset = MGA1064_reset,
54734+ .init = MGA1064_init,
54735+ .restore = MGA1064_restore,
54736 };
54737 EXPORT_SYMBOL(matrox_mystique);
54738 #endif
54739
54740 #ifdef CONFIG_FB_MATROX_G
54741 struct matrox_switch matrox_G100 = {
54742- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54743+ .preinit = MGAG100_preinit,
54744+ .reset = MGAG100_reset,
54745+ .init = MGAG100_init,
54746+ .restore = MGAG100_restore,
54747 };
54748 EXPORT_SYMBOL(matrox_G100);
54749 #endif
54750diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54751index 195ad7c..09743fc 100644
54752--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54753+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54754@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54755 }
54756
54757 struct matrox_switch matrox_millennium = {
54758- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54759+ .preinit = Ti3026_preinit,
54760+ .reset = Ti3026_reset,
54761+ .init = Ti3026_init,
54762+ .restore = Ti3026_restore
54763 };
54764 EXPORT_SYMBOL(matrox_millennium);
54765 #endif
54766diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54767index fe92eed..106e085 100644
54768--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54769+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54770@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54771 struct mb862xxfb_par *par = info->par;
54772
54773 if (info->var.bits_per_pixel == 32) {
54774- info->fbops->fb_fillrect = cfb_fillrect;
54775- info->fbops->fb_copyarea = cfb_copyarea;
54776- info->fbops->fb_imageblit = cfb_imageblit;
54777+ pax_open_kernel();
54778+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54779+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54780+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54781+ pax_close_kernel();
54782 } else {
54783 outreg(disp, GC_L0EM, 3);
54784- info->fbops->fb_fillrect = mb86290fb_fillrect;
54785- info->fbops->fb_copyarea = mb86290fb_copyarea;
54786- info->fbops->fb_imageblit = mb86290fb_imageblit;
54787+ pax_open_kernel();
54788+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54789+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54790+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54791+ pax_close_kernel();
54792 }
54793 outreg(draw, GDC_REG_DRAW_BASE, 0);
54794 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54795diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
54796index def0412..fed6529 100644
54797--- a/drivers/video/fbdev/nvidia/nvidia.c
54798+++ b/drivers/video/fbdev/nvidia/nvidia.c
54799@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54800 info->fix.line_length = (info->var.xres_virtual *
54801 info->var.bits_per_pixel) >> 3;
54802 if (info->var.accel_flags) {
54803- info->fbops->fb_imageblit = nvidiafb_imageblit;
54804- info->fbops->fb_fillrect = nvidiafb_fillrect;
54805- info->fbops->fb_copyarea = nvidiafb_copyarea;
54806- info->fbops->fb_sync = nvidiafb_sync;
54807+ pax_open_kernel();
54808+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54809+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54810+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54811+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54812+ pax_close_kernel();
54813 info->pixmap.scan_align = 4;
54814 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54815 info->flags |= FBINFO_READS_FAST;
54816 NVResetGraphics(info);
54817 } else {
54818- info->fbops->fb_imageblit = cfb_imageblit;
54819- info->fbops->fb_fillrect = cfb_fillrect;
54820- info->fbops->fb_copyarea = cfb_copyarea;
54821- info->fbops->fb_sync = NULL;
54822+ pax_open_kernel();
54823+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54824+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54825+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54826+ *(void **)&info->fbops->fb_sync = NULL;
54827+ pax_close_kernel();
54828 info->pixmap.scan_align = 1;
54829 info->flags |= FBINFO_HWACCEL_DISABLED;
54830 info->flags &= ~FBINFO_READS_FAST;
54831@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54832 info->pixmap.size = 8 * 1024;
54833 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54834
54835- if (!hwcur)
54836- info->fbops->fb_cursor = NULL;
54837+ if (!hwcur) {
54838+ pax_open_kernel();
54839+ *(void **)&info->fbops->fb_cursor = NULL;
54840+ pax_close_kernel();
54841+ }
54842
54843 info->var.accel_flags = (!noaccel);
54844
54845diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
54846index 2412a0d..294215b 100644
54847--- a/drivers/video/fbdev/omap2/dss/display.c
54848+++ b/drivers/video/fbdev/omap2/dss/display.c
54849@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54850 if (dssdev->name == NULL)
54851 dssdev->name = dssdev->alias;
54852
54853+ pax_open_kernel();
54854 if (drv && drv->get_resolution == NULL)
54855- drv->get_resolution = omapdss_default_get_resolution;
54856+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54857 if (drv && drv->get_recommended_bpp == NULL)
54858- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54859+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54860 if (drv && drv->get_timings == NULL)
54861- drv->get_timings = omapdss_default_get_timings;
54862+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54863+ pax_close_kernel();
54864
54865 mutex_lock(&panel_list_mutex);
54866 list_add_tail(&dssdev->panel_list, &panel_list);
54867diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
54868index 83433cb..71e9b98 100644
54869--- a/drivers/video/fbdev/s1d13xxxfb.c
54870+++ b/drivers/video/fbdev/s1d13xxxfb.c
54871@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54872
54873 switch(prod_id) {
54874 case S1D13506_PROD_ID: /* activate acceleration */
54875- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54876- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54877+ pax_open_kernel();
54878+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54879+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54880+ pax_close_kernel();
54881 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54882 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54883 break;
54884diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54885index d3013cd..95b8285 100644
54886--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
54887+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54888@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
54889 }
54890
54891 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
54892- lcdc_sys_write_index,
54893- lcdc_sys_write_data,
54894- lcdc_sys_read_data,
54895+ .write_index = lcdc_sys_write_index,
54896+ .write_data = lcdc_sys_write_data,
54897+ .read_data = lcdc_sys_read_data,
54898 };
54899
54900 static int sh_mobile_lcdc_sginit(struct fb_info *info,
54901diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
54902index 9279e5f..d5f5276 100644
54903--- a/drivers/video/fbdev/smscufx.c
54904+++ b/drivers/video/fbdev/smscufx.c
54905@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
54906 fb_deferred_io_cleanup(info);
54907 kfree(info->fbdefio);
54908 info->fbdefio = NULL;
54909- info->fbops->fb_mmap = ufx_ops_mmap;
54910+ pax_open_kernel();
54911+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
54912+ pax_close_kernel();
54913 }
54914
54915 pr_debug("released /dev/fb%d user=%d count=%d",
54916diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
54917index ff2b873..626a8d5 100644
54918--- a/drivers/video/fbdev/udlfb.c
54919+++ b/drivers/video/fbdev/udlfb.c
54920@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
54921 dlfb_urb_completion(urb);
54922
54923 error:
54924- atomic_add(bytes_sent, &dev->bytes_sent);
54925- atomic_add(bytes_identical, &dev->bytes_identical);
54926- atomic_add(width*height*2, &dev->bytes_rendered);
54927+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54928+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54929+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
54930 end_cycles = get_cycles();
54931- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54932+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54933 >> 10)), /* Kcycles */
54934 &dev->cpu_kcycles_used);
54935
54936@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
54937 dlfb_urb_completion(urb);
54938
54939 error:
54940- atomic_add(bytes_sent, &dev->bytes_sent);
54941- atomic_add(bytes_identical, &dev->bytes_identical);
54942- atomic_add(bytes_rendered, &dev->bytes_rendered);
54943+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54944+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54945+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
54946 end_cycles = get_cycles();
54947- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54948+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54949 >> 10)), /* Kcycles */
54950 &dev->cpu_kcycles_used);
54951 }
54952@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
54953 fb_deferred_io_cleanup(info);
54954 kfree(info->fbdefio);
54955 info->fbdefio = NULL;
54956- info->fbops->fb_mmap = dlfb_ops_mmap;
54957+ pax_open_kernel();
54958+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
54959+ pax_close_kernel();
54960 }
54961
54962 pr_warn("released /dev/fb%d user=%d count=%d\n",
54963@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
54964 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54965 struct dlfb_data *dev = fb_info->par;
54966 return snprintf(buf, PAGE_SIZE, "%u\n",
54967- atomic_read(&dev->bytes_rendered));
54968+ atomic_read_unchecked(&dev->bytes_rendered));
54969 }
54970
54971 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54972@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54973 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54974 struct dlfb_data *dev = fb_info->par;
54975 return snprintf(buf, PAGE_SIZE, "%u\n",
54976- atomic_read(&dev->bytes_identical));
54977+ atomic_read_unchecked(&dev->bytes_identical));
54978 }
54979
54980 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54981@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54982 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54983 struct dlfb_data *dev = fb_info->par;
54984 return snprintf(buf, PAGE_SIZE, "%u\n",
54985- atomic_read(&dev->bytes_sent));
54986+ atomic_read_unchecked(&dev->bytes_sent));
54987 }
54988
54989 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54990@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54991 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54992 struct dlfb_data *dev = fb_info->par;
54993 return snprintf(buf, PAGE_SIZE, "%u\n",
54994- atomic_read(&dev->cpu_kcycles_used));
54995+ atomic_read_unchecked(&dev->cpu_kcycles_used));
54996 }
54997
54998 static ssize_t edid_show(
54999@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55000 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55001 struct dlfb_data *dev = fb_info->par;
55002
55003- atomic_set(&dev->bytes_rendered, 0);
55004- atomic_set(&dev->bytes_identical, 0);
55005- atomic_set(&dev->bytes_sent, 0);
55006- atomic_set(&dev->cpu_kcycles_used, 0);
55007+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55008+ atomic_set_unchecked(&dev->bytes_identical, 0);
55009+ atomic_set_unchecked(&dev->bytes_sent, 0);
55010+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55011
55012 return count;
55013 }
55014diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
55015index d32d1c4..46722e6 100644
55016--- a/drivers/video/fbdev/uvesafb.c
55017+++ b/drivers/video/fbdev/uvesafb.c
55018@@ -19,6 +19,7 @@
55019 #include <linux/io.h>
55020 #include <linux/mutex.h>
55021 #include <linux/slab.h>
55022+#include <linux/moduleloader.h>
55023 #include <video/edid.h>
55024 #include <video/uvesafb.h>
55025 #ifdef CONFIG_X86
55026@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55027 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55028 par->pmi_setpal = par->ypan = 0;
55029 } else {
55030+
55031+#ifdef CONFIG_PAX_KERNEXEC
55032+#ifdef CONFIG_MODULES
55033+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55034+#endif
55035+ if (!par->pmi_code) {
55036+ par->pmi_setpal = par->ypan = 0;
55037+ return 0;
55038+ }
55039+#endif
55040+
55041 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55042 + task->t.regs.edi);
55043+
55044+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55045+ pax_open_kernel();
55046+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55047+ pax_close_kernel();
55048+
55049+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55050+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55051+#else
55052 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55053 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55054+#endif
55055+
55056 printk(KERN_INFO "uvesafb: protected mode interface info at "
55057 "%04x:%04x\n",
55058 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55059@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55060 par->ypan = ypan;
55061
55062 if (par->pmi_setpal || par->ypan) {
55063+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55064 if (__supported_pte_mask & _PAGE_NX) {
55065 par->pmi_setpal = par->ypan = 0;
55066 printk(KERN_WARNING "uvesafb: NX protection is active, "
55067 "better not use the PMI.\n");
55068- } else {
55069+ } else
55070+#endif
55071 uvesafb_vbe_getpmi(task, par);
55072- }
55073 }
55074 #else
55075 /* The protected mode interface is not available on non-x86. */
55076@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55077 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55078
55079 /* Disable blanking if the user requested so. */
55080- if (!blank)
55081- info->fbops->fb_blank = NULL;
55082+ if (!blank) {
55083+ pax_open_kernel();
55084+ *(void **)&info->fbops->fb_blank = NULL;
55085+ pax_close_kernel();
55086+ }
55087
55088 /*
55089 * Find out how much IO memory is required for the mode with
55090@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55091 info->flags = FBINFO_FLAG_DEFAULT |
55092 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55093
55094- if (!par->ypan)
55095- info->fbops->fb_pan_display = NULL;
55096+ if (!par->ypan) {
55097+ pax_open_kernel();
55098+ *(void **)&info->fbops->fb_pan_display = NULL;
55099+ pax_close_kernel();
55100+ }
55101 }
55102
55103 static void uvesafb_init_mtrr(struct fb_info *info)
55104@@ -1786,6 +1816,11 @@ out_mode:
55105 out:
55106 kfree(par->vbe_modes);
55107
55108+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55109+ if (par->pmi_code)
55110+ module_memfree_exec(par->pmi_code);
55111+#endif
55112+
55113 framebuffer_release(info);
55114 return err;
55115 }
55116@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55117 kfree(par->vbe_state_orig);
55118 kfree(par->vbe_state_saved);
55119
55120+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55121+ if (par->pmi_code)
55122+ module_memfree_exec(par->pmi_code);
55123+#endif
55124+
55125 framebuffer_release(info);
55126 }
55127 return 0;
55128diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55129index d79a0ac..2d0c3d4 100644
55130--- a/drivers/video/fbdev/vesafb.c
55131+++ b/drivers/video/fbdev/vesafb.c
55132@@ -9,6 +9,7 @@
55133 */
55134
55135 #include <linux/module.h>
55136+#include <linux/moduleloader.h>
55137 #include <linux/kernel.h>
55138 #include <linux/errno.h>
55139 #include <linux/string.h>
55140@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55141 static int vram_total; /* Set total amount of memory */
55142 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55143 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55144-static void (*pmi_start)(void) __read_mostly;
55145-static void (*pmi_pal) (void) __read_mostly;
55146+static void (*pmi_start)(void) __read_only;
55147+static void (*pmi_pal) (void) __read_only;
55148 static int depth __read_mostly;
55149 static int vga_compat __read_mostly;
55150 /* --------------------------------------------------------------------- */
55151@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55152 unsigned int size_remap;
55153 unsigned int size_total;
55154 char *option = NULL;
55155+ void *pmi_code = NULL;
55156
55157 /* ignore error return of fb_get_options */
55158 fb_get_options("vesafb", &option);
55159@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55160 size_remap = size_total;
55161 vesafb_fix.smem_len = size_remap;
55162
55163-#ifndef __i386__
55164- screen_info.vesapm_seg = 0;
55165-#endif
55166-
55167 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55168 printk(KERN_WARNING
55169 "vesafb: cannot reserve video memory at 0x%lx\n",
55170@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55171 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55172 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55173
55174+#ifdef __i386__
55175+
55176+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55177+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55178+ if (!pmi_code)
55179+#elif !defined(CONFIG_PAX_KERNEXEC)
55180+ if (0)
55181+#endif
55182+
55183+#endif
55184+ screen_info.vesapm_seg = 0;
55185+
55186 if (screen_info.vesapm_seg) {
55187- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55188- screen_info.vesapm_seg,screen_info.vesapm_off);
55189+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55190+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55191 }
55192
55193 if (screen_info.vesapm_seg < 0xc000)
55194@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55195
55196 if (ypan || pmi_setpal) {
55197 unsigned short *pmi_base;
55198+
55199 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55200- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55201- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55202+
55203+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55204+ pax_open_kernel();
55205+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55206+#else
55207+ pmi_code = pmi_base;
55208+#endif
55209+
55210+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55211+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55212+
55213+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55214+ pmi_start = ktva_ktla(pmi_start);
55215+ pmi_pal = ktva_ktla(pmi_pal);
55216+ pax_close_kernel();
55217+#endif
55218+
55219 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55220 if (pmi_base[3]) {
55221 printk(KERN_INFO "vesafb: pmi: ports = ");
55222@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55223 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55224 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55225
55226- if (!ypan)
55227- info->fbops->fb_pan_display = NULL;
55228+ if (!ypan) {
55229+ pax_open_kernel();
55230+ *(void **)&info->fbops->fb_pan_display = NULL;
55231+ pax_close_kernel();
55232+ }
55233
55234 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55235 err = -ENOMEM;
55236@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55237 fb_info(info, "%s frame buffer device\n", info->fix.id);
55238 return 0;
55239 err:
55240+
55241+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55242+ module_memfree_exec(pmi_code);
55243+#endif
55244+
55245 if (info->screen_base)
55246 iounmap(info->screen_base);
55247 framebuffer_release(info);
55248diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55249index 88714ae..16c2e11 100644
55250--- a/drivers/video/fbdev/via/via_clock.h
55251+++ b/drivers/video/fbdev/via/via_clock.h
55252@@ -56,7 +56,7 @@ struct via_clock {
55253
55254 void (*set_engine_pll_state)(u8 state);
55255 void (*set_engine_pll)(struct via_pll_config config);
55256-};
55257+} __no_const;
55258
55259
55260 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55261diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55262index 3c14e43..2630570 100644
55263--- a/drivers/video/logo/logo_linux_clut224.ppm
55264+++ b/drivers/video/logo/logo_linux_clut224.ppm
55265@@ -2,1603 +2,1123 @@ P3
55266 # Standard 224-color Linux logo
55267 80 80
55268 255
55269- 0 0 0 0 0 0 0 0 0 0 0 0
55270- 0 0 0 0 0 0 0 0 0 0 0 0
55271- 0 0 0 0 0 0 0 0 0 0 0 0
55272- 0 0 0 0 0 0 0 0 0 0 0 0
55273- 0 0 0 0 0 0 0 0 0 0 0 0
55274- 0 0 0 0 0 0 0 0 0 0 0 0
55275- 0 0 0 0 0 0 0 0 0 0 0 0
55276- 0 0 0 0 0 0 0 0 0 0 0 0
55277- 0 0 0 0 0 0 0 0 0 0 0 0
55278- 6 6 6 6 6 6 10 10 10 10 10 10
55279- 10 10 10 6 6 6 6 6 6 6 6 6
55280- 0 0 0 0 0 0 0 0 0 0 0 0
55281- 0 0 0 0 0 0 0 0 0 0 0 0
55282- 0 0 0 0 0 0 0 0 0 0 0 0
55283- 0 0 0 0 0 0 0 0 0 0 0 0
55284- 0 0 0 0 0 0 0 0 0 0 0 0
55285- 0 0 0 0 0 0 0 0 0 0 0 0
55286- 0 0 0 0 0 0 0 0 0 0 0 0
55287- 0 0 0 0 0 0 0 0 0 0 0 0
55288- 0 0 0 0 0 0 0 0 0 0 0 0
55289- 0 0 0 0 0 0 0 0 0 0 0 0
55290- 0 0 0 0 0 0 0 0 0 0 0 0
55291- 0 0 0 0 0 0 0 0 0 0 0 0
55292- 0 0 0 0 0 0 0 0 0 0 0 0
55293- 0 0 0 0 0 0 0 0 0 0 0 0
55294- 0 0 0 0 0 0 0 0 0 0 0 0
55295- 0 0 0 0 0 0 0 0 0 0 0 0
55296- 0 0 0 0 0 0 0 0 0 0 0 0
55297- 0 0 0 6 6 6 10 10 10 14 14 14
55298- 22 22 22 26 26 26 30 30 30 34 34 34
55299- 30 30 30 30 30 30 26 26 26 18 18 18
55300- 14 14 14 10 10 10 6 6 6 0 0 0
55301- 0 0 0 0 0 0 0 0 0 0 0 0
55302- 0 0 0 0 0 0 0 0 0 0 0 0
55303- 0 0 0 0 0 0 0 0 0 0 0 0
55304- 0 0 0 0 0 0 0 0 0 0 0 0
55305- 0 0 0 0 0 0 0 0 0 0 0 0
55306- 0 0 0 0 0 0 0 0 0 0 0 0
55307- 0 0 0 0 0 0 0 0 0 0 0 0
55308- 0 0 0 0 0 0 0 0 0 0 0 0
55309- 0 0 0 0 0 0 0 0 0 0 0 0
55310- 0 0 0 0 0 1 0 0 1 0 0 0
55311- 0 0 0 0 0 0 0 0 0 0 0 0
55312- 0 0 0 0 0 0 0 0 0 0 0 0
55313- 0 0 0 0 0 0 0 0 0 0 0 0
55314- 0 0 0 0 0 0 0 0 0 0 0 0
55315- 0 0 0 0 0 0 0 0 0 0 0 0
55316- 0 0 0 0 0 0 0 0 0 0 0 0
55317- 6 6 6 14 14 14 26 26 26 42 42 42
55318- 54 54 54 66 66 66 78 78 78 78 78 78
55319- 78 78 78 74 74 74 66 66 66 54 54 54
55320- 42 42 42 26 26 26 18 18 18 10 10 10
55321- 6 6 6 0 0 0 0 0 0 0 0 0
55322- 0 0 0 0 0 0 0 0 0 0 0 0
55323- 0 0 0 0 0 0 0 0 0 0 0 0
55324- 0 0 0 0 0 0 0 0 0 0 0 0
55325- 0 0 0 0 0 0 0 0 0 0 0 0
55326- 0 0 0 0 0 0 0 0 0 0 0 0
55327- 0 0 0 0 0 0 0 0 0 0 0 0
55328- 0 0 0 0 0 0 0 0 0 0 0 0
55329- 0 0 0 0 0 0 0 0 0 0 0 0
55330- 0 0 1 0 0 0 0 0 0 0 0 0
55331- 0 0 0 0 0 0 0 0 0 0 0 0
55332- 0 0 0 0 0 0 0 0 0 0 0 0
55333- 0 0 0 0 0 0 0 0 0 0 0 0
55334- 0 0 0 0 0 0 0 0 0 0 0 0
55335- 0 0 0 0 0 0 0 0 0 0 0 0
55336- 0 0 0 0 0 0 0 0 0 10 10 10
55337- 22 22 22 42 42 42 66 66 66 86 86 86
55338- 66 66 66 38 38 38 38 38 38 22 22 22
55339- 26 26 26 34 34 34 54 54 54 66 66 66
55340- 86 86 86 70 70 70 46 46 46 26 26 26
55341- 14 14 14 6 6 6 0 0 0 0 0 0
55342- 0 0 0 0 0 0 0 0 0 0 0 0
55343- 0 0 0 0 0 0 0 0 0 0 0 0
55344- 0 0 0 0 0 0 0 0 0 0 0 0
55345- 0 0 0 0 0 0 0 0 0 0 0 0
55346- 0 0 0 0 0 0 0 0 0 0 0 0
55347- 0 0 0 0 0 0 0 0 0 0 0 0
55348- 0 0 0 0 0 0 0 0 0 0 0 0
55349- 0 0 0 0 0 0 0 0 0 0 0 0
55350- 0 0 1 0 0 1 0 0 1 0 0 0
55351- 0 0 0 0 0 0 0 0 0 0 0 0
55352- 0 0 0 0 0 0 0 0 0 0 0 0
55353- 0 0 0 0 0 0 0 0 0 0 0 0
55354- 0 0 0 0 0 0 0 0 0 0 0 0
55355- 0 0 0 0 0 0 0 0 0 0 0 0
55356- 0 0 0 0 0 0 10 10 10 26 26 26
55357- 50 50 50 82 82 82 58 58 58 6 6 6
55358- 2 2 6 2 2 6 2 2 6 2 2 6
55359- 2 2 6 2 2 6 2 2 6 2 2 6
55360- 6 6 6 54 54 54 86 86 86 66 66 66
55361- 38 38 38 18 18 18 6 6 6 0 0 0
55362- 0 0 0 0 0 0 0 0 0 0 0 0
55363- 0 0 0 0 0 0 0 0 0 0 0 0
55364- 0 0 0 0 0 0 0 0 0 0 0 0
55365- 0 0 0 0 0 0 0 0 0 0 0 0
55366- 0 0 0 0 0 0 0 0 0 0 0 0
55367- 0 0 0 0 0 0 0 0 0 0 0 0
55368- 0 0 0 0 0 0 0 0 0 0 0 0
55369- 0 0 0 0 0 0 0 0 0 0 0 0
55370- 0 0 0 0 0 0 0 0 0 0 0 0
55371- 0 0 0 0 0 0 0 0 0 0 0 0
55372- 0 0 0 0 0 0 0 0 0 0 0 0
55373- 0 0 0 0 0 0 0 0 0 0 0 0
55374- 0 0 0 0 0 0 0 0 0 0 0 0
55375- 0 0 0 0 0 0 0 0 0 0 0 0
55376- 0 0 0 6 6 6 22 22 22 50 50 50
55377- 78 78 78 34 34 34 2 2 6 2 2 6
55378- 2 2 6 2 2 6 2 2 6 2 2 6
55379- 2 2 6 2 2 6 2 2 6 2 2 6
55380- 2 2 6 2 2 6 6 6 6 70 70 70
55381- 78 78 78 46 46 46 22 22 22 6 6 6
55382- 0 0 0 0 0 0 0 0 0 0 0 0
55383- 0 0 0 0 0 0 0 0 0 0 0 0
55384- 0 0 0 0 0 0 0 0 0 0 0 0
55385- 0 0 0 0 0 0 0 0 0 0 0 0
55386- 0 0 0 0 0 0 0 0 0 0 0 0
55387- 0 0 0 0 0 0 0 0 0 0 0 0
55388- 0 0 0 0 0 0 0 0 0 0 0 0
55389- 0 0 0 0 0 0 0 0 0 0 0 0
55390- 0 0 1 0 0 1 0 0 1 0 0 0
55391- 0 0 0 0 0 0 0 0 0 0 0 0
55392- 0 0 0 0 0 0 0 0 0 0 0 0
55393- 0 0 0 0 0 0 0 0 0 0 0 0
55394- 0 0 0 0 0 0 0 0 0 0 0 0
55395- 0 0 0 0 0 0 0 0 0 0 0 0
55396- 6 6 6 18 18 18 42 42 42 82 82 82
55397- 26 26 26 2 2 6 2 2 6 2 2 6
55398- 2 2 6 2 2 6 2 2 6 2 2 6
55399- 2 2 6 2 2 6 2 2 6 14 14 14
55400- 46 46 46 34 34 34 6 6 6 2 2 6
55401- 42 42 42 78 78 78 42 42 42 18 18 18
55402- 6 6 6 0 0 0 0 0 0 0 0 0
55403- 0 0 0 0 0 0 0 0 0 0 0 0
55404- 0 0 0 0 0 0 0 0 0 0 0 0
55405- 0 0 0 0 0 0 0 0 0 0 0 0
55406- 0 0 0 0 0 0 0 0 0 0 0 0
55407- 0 0 0 0 0 0 0 0 0 0 0 0
55408- 0 0 0 0 0 0 0 0 0 0 0 0
55409- 0 0 0 0 0 0 0 0 0 0 0 0
55410- 0 0 1 0 0 0 0 0 1 0 0 0
55411- 0 0 0 0 0 0 0 0 0 0 0 0
55412- 0 0 0 0 0 0 0 0 0 0 0 0
55413- 0 0 0 0 0 0 0 0 0 0 0 0
55414- 0 0 0 0 0 0 0 0 0 0 0 0
55415- 0 0 0 0 0 0 0 0 0 0 0 0
55416- 10 10 10 30 30 30 66 66 66 58 58 58
55417- 2 2 6 2 2 6 2 2 6 2 2 6
55418- 2 2 6 2 2 6 2 2 6 2 2 6
55419- 2 2 6 2 2 6 2 2 6 26 26 26
55420- 86 86 86 101 101 101 46 46 46 10 10 10
55421- 2 2 6 58 58 58 70 70 70 34 34 34
55422- 10 10 10 0 0 0 0 0 0 0 0 0
55423- 0 0 0 0 0 0 0 0 0 0 0 0
55424- 0 0 0 0 0 0 0 0 0 0 0 0
55425- 0 0 0 0 0 0 0 0 0 0 0 0
55426- 0 0 0 0 0 0 0 0 0 0 0 0
55427- 0 0 0 0 0 0 0 0 0 0 0 0
55428- 0 0 0 0 0 0 0 0 0 0 0 0
55429- 0 0 0 0 0 0 0 0 0 0 0 0
55430- 0 0 1 0 0 1 0 0 1 0 0 0
55431- 0 0 0 0 0 0 0 0 0 0 0 0
55432- 0 0 0 0 0 0 0 0 0 0 0 0
55433- 0 0 0 0 0 0 0 0 0 0 0 0
55434- 0 0 0 0 0 0 0 0 0 0 0 0
55435- 0 0 0 0 0 0 0 0 0 0 0 0
55436- 14 14 14 42 42 42 86 86 86 10 10 10
55437- 2 2 6 2 2 6 2 2 6 2 2 6
55438- 2 2 6 2 2 6 2 2 6 2 2 6
55439- 2 2 6 2 2 6 2 2 6 30 30 30
55440- 94 94 94 94 94 94 58 58 58 26 26 26
55441- 2 2 6 6 6 6 78 78 78 54 54 54
55442- 22 22 22 6 6 6 0 0 0 0 0 0
55443- 0 0 0 0 0 0 0 0 0 0 0 0
55444- 0 0 0 0 0 0 0 0 0 0 0 0
55445- 0 0 0 0 0 0 0 0 0 0 0 0
55446- 0 0 0 0 0 0 0 0 0 0 0 0
55447- 0 0 0 0 0 0 0 0 0 0 0 0
55448- 0 0 0 0 0 0 0 0 0 0 0 0
55449- 0 0 0 0 0 0 0 0 0 0 0 0
55450- 0 0 0 0 0 0 0 0 0 0 0 0
55451- 0 0 0 0 0 0 0 0 0 0 0 0
55452- 0 0 0 0 0 0 0 0 0 0 0 0
55453- 0 0 0 0 0 0 0 0 0 0 0 0
55454- 0 0 0 0 0 0 0 0 0 0 0 0
55455- 0 0 0 0 0 0 0 0 0 6 6 6
55456- 22 22 22 62 62 62 62 62 62 2 2 6
55457- 2 2 6 2 2 6 2 2 6 2 2 6
55458- 2 2 6 2 2 6 2 2 6 2 2 6
55459- 2 2 6 2 2 6 2 2 6 26 26 26
55460- 54 54 54 38 38 38 18 18 18 10 10 10
55461- 2 2 6 2 2 6 34 34 34 82 82 82
55462- 38 38 38 14 14 14 0 0 0 0 0 0
55463- 0 0 0 0 0 0 0 0 0 0 0 0
55464- 0 0 0 0 0 0 0 0 0 0 0 0
55465- 0 0 0 0 0 0 0 0 0 0 0 0
55466- 0 0 0 0 0 0 0 0 0 0 0 0
55467- 0 0 0 0 0 0 0 0 0 0 0 0
55468- 0 0 0 0 0 0 0 0 0 0 0 0
55469- 0 0 0 0 0 0 0 0 0 0 0 0
55470- 0 0 0 0 0 1 0 0 1 0 0 0
55471- 0 0 0 0 0 0 0 0 0 0 0 0
55472- 0 0 0 0 0 0 0 0 0 0 0 0
55473- 0 0 0 0 0 0 0 0 0 0 0 0
55474- 0 0 0 0 0 0 0 0 0 0 0 0
55475- 0 0 0 0 0 0 0 0 0 6 6 6
55476- 30 30 30 78 78 78 30 30 30 2 2 6
55477- 2 2 6 2 2 6 2 2 6 2 2 6
55478- 2 2 6 2 2 6 2 2 6 2 2 6
55479- 2 2 6 2 2 6 2 2 6 10 10 10
55480- 10 10 10 2 2 6 2 2 6 2 2 6
55481- 2 2 6 2 2 6 2 2 6 78 78 78
55482- 50 50 50 18 18 18 6 6 6 0 0 0
55483- 0 0 0 0 0 0 0 0 0 0 0 0
55484- 0 0 0 0 0 0 0 0 0 0 0 0
55485- 0 0 0 0 0 0 0 0 0 0 0 0
55486- 0 0 0 0 0 0 0 0 0 0 0 0
55487- 0 0 0 0 0 0 0 0 0 0 0 0
55488- 0 0 0 0 0 0 0 0 0 0 0 0
55489- 0 0 0 0 0 0 0 0 0 0 0 0
55490- 0 0 1 0 0 0 0 0 0 0 0 0
55491- 0 0 0 0 0 0 0 0 0 0 0 0
55492- 0 0 0 0 0 0 0 0 0 0 0 0
55493- 0 0 0 0 0 0 0 0 0 0 0 0
55494- 0 0 0 0 0 0 0 0 0 0 0 0
55495- 0 0 0 0 0 0 0 0 0 10 10 10
55496- 38 38 38 86 86 86 14 14 14 2 2 6
55497- 2 2 6 2 2 6 2 2 6 2 2 6
55498- 2 2 6 2 2 6 2 2 6 2 2 6
55499- 2 2 6 2 2 6 2 2 6 2 2 6
55500- 2 2 6 2 2 6 2 2 6 2 2 6
55501- 2 2 6 2 2 6 2 2 6 54 54 54
55502- 66 66 66 26 26 26 6 6 6 0 0 0
55503- 0 0 0 0 0 0 0 0 0 0 0 0
55504- 0 0 0 0 0 0 0 0 0 0 0 0
55505- 0 0 0 0 0 0 0 0 0 0 0 0
55506- 0 0 0 0 0 0 0 0 0 0 0 0
55507- 0 0 0 0 0 0 0 0 0 0 0 0
55508- 0 0 0 0 0 0 0 0 0 0 0 0
55509- 0 0 0 0 0 0 0 0 0 0 0 0
55510- 0 0 0 0 0 1 0 0 1 0 0 0
55511- 0 0 0 0 0 0 0 0 0 0 0 0
55512- 0 0 0 0 0 0 0 0 0 0 0 0
55513- 0 0 0 0 0 0 0 0 0 0 0 0
55514- 0 0 0 0 0 0 0 0 0 0 0 0
55515- 0 0 0 0 0 0 0 0 0 14 14 14
55516- 42 42 42 82 82 82 2 2 6 2 2 6
55517- 2 2 6 6 6 6 10 10 10 2 2 6
55518- 2 2 6 2 2 6 2 2 6 2 2 6
55519- 2 2 6 2 2 6 2 2 6 6 6 6
55520- 14 14 14 10 10 10 2 2 6 2 2 6
55521- 2 2 6 2 2 6 2 2 6 18 18 18
55522- 82 82 82 34 34 34 10 10 10 0 0 0
55523- 0 0 0 0 0 0 0 0 0 0 0 0
55524- 0 0 0 0 0 0 0 0 0 0 0 0
55525- 0 0 0 0 0 0 0 0 0 0 0 0
55526- 0 0 0 0 0 0 0 0 0 0 0 0
55527- 0 0 0 0 0 0 0 0 0 0 0 0
55528- 0 0 0 0 0 0 0 0 0 0 0 0
55529- 0 0 0 0 0 0 0 0 0 0 0 0
55530- 0 0 1 0 0 0 0 0 0 0 0 0
55531- 0 0 0 0 0 0 0 0 0 0 0 0
55532- 0 0 0 0 0 0 0 0 0 0 0 0
55533- 0 0 0 0 0 0 0 0 0 0 0 0
55534- 0 0 0 0 0 0 0 0 0 0 0 0
55535- 0 0 0 0 0 0 0 0 0 14 14 14
55536- 46 46 46 86 86 86 2 2 6 2 2 6
55537- 6 6 6 6 6 6 22 22 22 34 34 34
55538- 6 6 6 2 2 6 2 2 6 2 2 6
55539- 2 2 6 2 2 6 18 18 18 34 34 34
55540- 10 10 10 50 50 50 22 22 22 2 2 6
55541- 2 2 6 2 2 6 2 2 6 10 10 10
55542- 86 86 86 42 42 42 14 14 14 0 0 0
55543- 0 0 0 0 0 0 0 0 0 0 0 0
55544- 0 0 0 0 0 0 0 0 0 0 0 0
55545- 0 0 0 0 0 0 0 0 0 0 0 0
55546- 0 0 0 0 0 0 0 0 0 0 0 0
55547- 0 0 0 0 0 0 0 0 0 0 0 0
55548- 0 0 0 0 0 0 0 0 0 0 0 0
55549- 0 0 0 0 0 0 0 0 0 0 0 0
55550- 0 0 1 0 0 1 0 0 1 0 0 0
55551- 0 0 0 0 0 0 0 0 0 0 0 0
55552- 0 0 0 0 0 0 0 0 0 0 0 0
55553- 0 0 0 0 0 0 0 0 0 0 0 0
55554- 0 0 0 0 0 0 0 0 0 0 0 0
55555- 0 0 0 0 0 0 0 0 0 14 14 14
55556- 46 46 46 86 86 86 2 2 6 2 2 6
55557- 38 38 38 116 116 116 94 94 94 22 22 22
55558- 22 22 22 2 2 6 2 2 6 2 2 6
55559- 14 14 14 86 86 86 138 138 138 162 162 162
55560-154 154 154 38 38 38 26 26 26 6 6 6
55561- 2 2 6 2 2 6 2 2 6 2 2 6
55562- 86 86 86 46 46 46 14 14 14 0 0 0
55563- 0 0 0 0 0 0 0 0 0 0 0 0
55564- 0 0 0 0 0 0 0 0 0 0 0 0
55565- 0 0 0 0 0 0 0 0 0 0 0 0
55566- 0 0 0 0 0 0 0 0 0 0 0 0
55567- 0 0 0 0 0 0 0 0 0 0 0 0
55568- 0 0 0 0 0 0 0 0 0 0 0 0
55569- 0 0 0 0 0 0 0 0 0 0 0 0
55570- 0 0 0 0 0 0 0 0 0 0 0 0
55571- 0 0 0 0 0 0 0 0 0 0 0 0
55572- 0 0 0 0 0 0 0 0 0 0 0 0
55573- 0 0 0 0 0 0 0 0 0 0 0 0
55574- 0 0 0 0 0 0 0 0 0 0 0 0
55575- 0 0 0 0 0 0 0 0 0 14 14 14
55576- 46 46 46 86 86 86 2 2 6 14 14 14
55577-134 134 134 198 198 198 195 195 195 116 116 116
55578- 10 10 10 2 2 6 2 2 6 6 6 6
55579-101 98 89 187 187 187 210 210 210 218 218 218
55580-214 214 214 134 134 134 14 14 14 6 6 6
55581- 2 2 6 2 2 6 2 2 6 2 2 6
55582- 86 86 86 50 50 50 18 18 18 6 6 6
55583- 0 0 0 0 0 0 0 0 0 0 0 0
55584- 0 0 0 0 0 0 0 0 0 0 0 0
55585- 0 0 0 0 0 0 0 0 0 0 0 0
55586- 0 0 0 0 0 0 0 0 0 0 0 0
55587- 0 0 0 0 0 0 0 0 0 0 0 0
55588- 0 0 0 0 0 0 0 0 0 0 0 0
55589- 0 0 0 0 0 0 0 0 1 0 0 0
55590- 0 0 1 0 0 1 0 0 1 0 0 0
55591- 0 0 0 0 0 0 0 0 0 0 0 0
55592- 0 0 0 0 0 0 0 0 0 0 0 0
55593- 0 0 0 0 0 0 0 0 0 0 0 0
55594- 0 0 0 0 0 0 0 0 0 0 0 0
55595- 0 0 0 0 0 0 0 0 0 14 14 14
55596- 46 46 46 86 86 86 2 2 6 54 54 54
55597-218 218 218 195 195 195 226 226 226 246 246 246
55598- 58 58 58 2 2 6 2 2 6 30 30 30
55599-210 210 210 253 253 253 174 174 174 123 123 123
55600-221 221 221 234 234 234 74 74 74 2 2 6
55601- 2 2 6 2 2 6 2 2 6 2 2 6
55602- 70 70 70 58 58 58 22 22 22 6 6 6
55603- 0 0 0 0 0 0 0 0 0 0 0 0
55604- 0 0 0 0 0 0 0 0 0 0 0 0
55605- 0 0 0 0 0 0 0 0 0 0 0 0
55606- 0 0 0 0 0 0 0 0 0 0 0 0
55607- 0 0 0 0 0 0 0 0 0 0 0 0
55608- 0 0 0 0 0 0 0 0 0 0 0 0
55609- 0 0 0 0 0 0 0 0 0 0 0 0
55610- 0 0 0 0 0 0 0 0 0 0 0 0
55611- 0 0 0 0 0 0 0 0 0 0 0 0
55612- 0 0 0 0 0 0 0 0 0 0 0 0
55613- 0 0 0 0 0 0 0 0 0 0 0 0
55614- 0 0 0 0 0 0 0 0 0 0 0 0
55615- 0 0 0 0 0 0 0 0 0 14 14 14
55616- 46 46 46 82 82 82 2 2 6 106 106 106
55617-170 170 170 26 26 26 86 86 86 226 226 226
55618-123 123 123 10 10 10 14 14 14 46 46 46
55619-231 231 231 190 190 190 6 6 6 70 70 70
55620- 90 90 90 238 238 238 158 158 158 2 2 6
55621- 2 2 6 2 2 6 2 2 6 2 2 6
55622- 70 70 70 58 58 58 22 22 22 6 6 6
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 0 0 0 0
55625- 0 0 0 0 0 0 0 0 0 0 0 0
55626- 0 0 0 0 0 0 0 0 0 0 0 0
55627- 0 0 0 0 0 0 0 0 0 0 0 0
55628- 0 0 0 0 0 0 0 0 0 0 0 0
55629- 0 0 0 0 0 0 0 0 1 0 0 0
55630- 0 0 1 0 0 1 0 0 1 0 0 0
55631- 0 0 0 0 0 0 0 0 0 0 0 0
55632- 0 0 0 0 0 0 0 0 0 0 0 0
55633- 0 0 0 0 0 0 0 0 0 0 0 0
55634- 0 0 0 0 0 0 0 0 0 0 0 0
55635- 0 0 0 0 0 0 0 0 0 14 14 14
55636- 42 42 42 86 86 86 6 6 6 116 116 116
55637-106 106 106 6 6 6 70 70 70 149 149 149
55638-128 128 128 18 18 18 38 38 38 54 54 54
55639-221 221 221 106 106 106 2 2 6 14 14 14
55640- 46 46 46 190 190 190 198 198 198 2 2 6
55641- 2 2 6 2 2 6 2 2 6 2 2 6
55642- 74 74 74 62 62 62 22 22 22 6 6 6
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 0 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 0 0 0
55648- 0 0 0 0 0 0 0 0 0 0 0 0
55649- 0 0 0 0 0 0 0 0 1 0 0 0
55650- 0 0 1 0 0 0 0 0 1 0 0 0
55651- 0 0 0 0 0 0 0 0 0 0 0 0
55652- 0 0 0 0 0 0 0 0 0 0 0 0
55653- 0 0 0 0 0 0 0 0 0 0 0 0
55654- 0 0 0 0 0 0 0 0 0 0 0 0
55655- 0 0 0 0 0 0 0 0 0 14 14 14
55656- 42 42 42 94 94 94 14 14 14 101 101 101
55657-128 128 128 2 2 6 18 18 18 116 116 116
55658-118 98 46 121 92 8 121 92 8 98 78 10
55659-162 162 162 106 106 106 2 2 6 2 2 6
55660- 2 2 6 195 195 195 195 195 195 6 6 6
55661- 2 2 6 2 2 6 2 2 6 2 2 6
55662- 74 74 74 62 62 62 22 22 22 6 6 6
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 0 0 0
55665- 0 0 0 0 0 0 0 0 0 0 0 0
55666- 0 0 0 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 0 0 0
55668- 0 0 0 0 0 0 0 0 0 0 0 0
55669- 0 0 0 0 0 0 0 0 1 0 0 1
55670- 0 0 1 0 0 0 0 0 1 0 0 0
55671- 0 0 0 0 0 0 0 0 0 0 0 0
55672- 0 0 0 0 0 0 0 0 0 0 0 0
55673- 0 0 0 0 0 0 0 0 0 0 0 0
55674- 0 0 0 0 0 0 0 0 0 0 0 0
55675- 0 0 0 0 0 0 0 0 0 10 10 10
55676- 38 38 38 90 90 90 14 14 14 58 58 58
55677-210 210 210 26 26 26 54 38 6 154 114 10
55678-226 170 11 236 186 11 225 175 15 184 144 12
55679-215 174 15 175 146 61 37 26 9 2 2 6
55680- 70 70 70 246 246 246 138 138 138 2 2 6
55681- 2 2 6 2 2 6 2 2 6 2 2 6
55682- 70 70 70 66 66 66 26 26 26 6 6 6
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 0 0 0
55688- 0 0 0 0 0 0 0 0 0 0 0 0
55689- 0 0 0 0 0 0 0 0 0 0 0 0
55690- 0 0 0 0 0 0 0 0 0 0 0 0
55691- 0 0 0 0 0 0 0 0 0 0 0 0
55692- 0 0 0 0 0 0 0 0 0 0 0 0
55693- 0 0 0 0 0 0 0 0 0 0 0 0
55694- 0 0 0 0 0 0 0 0 0 0 0 0
55695- 0 0 0 0 0 0 0 0 0 10 10 10
55696- 38 38 38 86 86 86 14 14 14 10 10 10
55697-195 195 195 188 164 115 192 133 9 225 175 15
55698-239 182 13 234 190 10 232 195 16 232 200 30
55699-245 207 45 241 208 19 232 195 16 184 144 12
55700-218 194 134 211 206 186 42 42 42 2 2 6
55701- 2 2 6 2 2 6 2 2 6 2 2 6
55702- 50 50 50 74 74 74 30 30 30 6 6 6
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 0 0 0 0 0 0 0 0 0 0
55705- 0 0 0 0 0 0 0 0 0 0 0 0
55706- 0 0 0 0 0 0 0 0 0 0 0 0
55707- 0 0 0 0 0 0 0 0 0 0 0 0
55708- 0 0 0 0 0 0 0 0 0 0 0 0
55709- 0 0 0 0 0 0 0 0 0 0 0 0
55710- 0 0 0 0 0 0 0 0 0 0 0 0
55711- 0 0 0 0 0 0 0 0 0 0 0 0
55712- 0 0 0 0 0 0 0 0 0 0 0 0
55713- 0 0 0 0 0 0 0 0 0 0 0 0
55714- 0 0 0 0 0 0 0 0 0 0 0 0
55715- 0 0 0 0 0 0 0 0 0 10 10 10
55716- 34 34 34 86 86 86 14 14 14 2 2 6
55717-121 87 25 192 133 9 219 162 10 239 182 13
55718-236 186 11 232 195 16 241 208 19 244 214 54
55719-246 218 60 246 218 38 246 215 20 241 208 19
55720-241 208 19 226 184 13 121 87 25 2 2 6
55721- 2 2 6 2 2 6 2 2 6 2 2 6
55722- 50 50 50 82 82 82 34 34 34 10 10 10
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 0 0 0
55728- 0 0 0 0 0 0 0 0 0 0 0 0
55729- 0 0 0 0 0 0 0 0 0 0 0 0
55730- 0 0 0 0 0 0 0 0 0 0 0 0
55731- 0 0 0 0 0 0 0 0 0 0 0 0
55732- 0 0 0 0 0 0 0 0 0 0 0 0
55733- 0 0 0 0 0 0 0 0 0 0 0 0
55734- 0 0 0 0 0 0 0 0 0 0 0 0
55735- 0 0 0 0 0 0 0 0 0 10 10 10
55736- 34 34 34 82 82 82 30 30 30 61 42 6
55737-180 123 7 206 145 10 230 174 11 239 182 13
55738-234 190 10 238 202 15 241 208 19 246 218 74
55739-246 218 38 246 215 20 246 215 20 246 215 20
55740-226 184 13 215 174 15 184 144 12 6 6 6
55741- 2 2 6 2 2 6 2 2 6 2 2 6
55742- 26 26 26 94 94 94 42 42 42 14 14 14
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 0 0 0 0 0 0 0 0 0 0 0 0
55745- 0 0 0 0 0 0 0 0 0 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 0 0 0 0 0 0
55748- 0 0 0 0 0 0 0 0 0 0 0 0
55749- 0 0 0 0 0 0 0 0 0 0 0 0
55750- 0 0 0 0 0 0 0 0 0 0 0 0
55751- 0 0 0 0 0 0 0 0 0 0 0 0
55752- 0 0 0 0 0 0 0 0 0 0 0 0
55753- 0 0 0 0 0 0 0 0 0 0 0 0
55754- 0 0 0 0 0 0 0 0 0 0 0 0
55755- 0 0 0 0 0 0 0 0 0 10 10 10
55756- 30 30 30 78 78 78 50 50 50 104 69 6
55757-192 133 9 216 158 10 236 178 12 236 186 11
55758-232 195 16 241 208 19 244 214 54 245 215 43
55759-246 215 20 246 215 20 241 208 19 198 155 10
55760-200 144 11 216 158 10 156 118 10 2 2 6
55761- 2 2 6 2 2 6 2 2 6 2 2 6
55762- 6 6 6 90 90 90 54 54 54 18 18 18
55763- 6 6 6 0 0 0 0 0 0 0 0 0
55764- 0 0 0 0 0 0 0 0 0 0 0 0
55765- 0 0 0 0 0 0 0 0 0 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 0 0 0 0 0 0
55768- 0 0 0 0 0 0 0 0 0 0 0 0
55769- 0 0 0 0 0 0 0 0 0 0 0 0
55770- 0 0 0 0 0 0 0 0 0 0 0 0
55771- 0 0 0 0 0 0 0 0 0 0 0 0
55772- 0 0 0 0 0 0 0 0 0 0 0 0
55773- 0 0 0 0 0 0 0 0 0 0 0 0
55774- 0 0 0 0 0 0 0 0 0 0 0 0
55775- 0 0 0 0 0 0 0 0 0 10 10 10
55776- 30 30 30 78 78 78 46 46 46 22 22 22
55777-137 92 6 210 162 10 239 182 13 238 190 10
55778-238 202 15 241 208 19 246 215 20 246 215 20
55779-241 208 19 203 166 17 185 133 11 210 150 10
55780-216 158 10 210 150 10 102 78 10 2 2 6
55781- 6 6 6 54 54 54 14 14 14 2 2 6
55782- 2 2 6 62 62 62 74 74 74 30 30 30
55783- 10 10 10 0 0 0 0 0 0 0 0 0
55784- 0 0 0 0 0 0 0 0 0 0 0 0
55785- 0 0 0 0 0 0 0 0 0 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 0 0 0 0 0 0 0 0 0
55788- 0 0 0 0 0 0 0 0 0 0 0 0
55789- 0 0 0 0 0 0 0 0 0 0 0 0
55790- 0 0 0 0 0 0 0 0 0 0 0 0
55791- 0 0 0 0 0 0 0 0 0 0 0 0
55792- 0 0 0 0 0 0 0 0 0 0 0 0
55793- 0 0 0 0 0 0 0 0 0 0 0 0
55794- 0 0 0 0 0 0 0 0 0 0 0 0
55795- 0 0 0 0 0 0 0 0 0 10 10 10
55796- 34 34 34 78 78 78 50 50 50 6 6 6
55797- 94 70 30 139 102 15 190 146 13 226 184 13
55798-232 200 30 232 195 16 215 174 15 190 146 13
55799-168 122 10 192 133 9 210 150 10 213 154 11
55800-202 150 34 182 157 106 101 98 89 2 2 6
55801- 2 2 6 78 78 78 116 116 116 58 58 58
55802- 2 2 6 22 22 22 90 90 90 46 46 46
55803- 18 18 18 6 6 6 0 0 0 0 0 0
55804- 0 0 0 0 0 0 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 0 0 0 0 0 0 0 0 0 0 0 0
55808- 0 0 0 0 0 0 0 0 0 0 0 0
55809- 0 0 0 0 0 0 0 0 0 0 0 0
55810- 0 0 0 0 0 0 0 0 0 0 0 0
55811- 0 0 0 0 0 0 0 0 0 0 0 0
55812- 0 0 0 0 0 0 0 0 0 0 0 0
55813- 0 0 0 0 0 0 0 0 0 0 0 0
55814- 0 0 0 0 0 0 0 0 0 0 0 0
55815- 0 0 0 0 0 0 0 0 0 10 10 10
55816- 38 38 38 86 86 86 50 50 50 6 6 6
55817-128 128 128 174 154 114 156 107 11 168 122 10
55818-198 155 10 184 144 12 197 138 11 200 144 11
55819-206 145 10 206 145 10 197 138 11 188 164 115
55820-195 195 195 198 198 198 174 174 174 14 14 14
55821- 2 2 6 22 22 22 116 116 116 116 116 116
55822- 22 22 22 2 2 6 74 74 74 70 70 70
55823- 30 30 30 10 10 10 0 0 0 0 0 0
55824- 0 0 0 0 0 0 0 0 0 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 0 0 0
55827- 0 0 0 0 0 0 0 0 0 0 0 0
55828- 0 0 0 0 0 0 0 0 0 0 0 0
55829- 0 0 0 0 0 0 0 0 0 0 0 0
55830- 0 0 0 0 0 0 0 0 0 0 0 0
55831- 0 0 0 0 0 0 0 0 0 0 0 0
55832- 0 0 0 0 0 0 0 0 0 0 0 0
55833- 0 0 0 0 0 0 0 0 0 0 0 0
55834- 0 0 0 0 0 0 0 0 0 0 0 0
55835- 0 0 0 0 0 0 6 6 6 18 18 18
55836- 50 50 50 101 101 101 26 26 26 10 10 10
55837-138 138 138 190 190 190 174 154 114 156 107 11
55838-197 138 11 200 144 11 197 138 11 192 133 9
55839-180 123 7 190 142 34 190 178 144 187 187 187
55840-202 202 202 221 221 221 214 214 214 66 66 66
55841- 2 2 6 2 2 6 50 50 50 62 62 62
55842- 6 6 6 2 2 6 10 10 10 90 90 90
55843- 50 50 50 18 18 18 6 6 6 0 0 0
55844- 0 0 0 0 0 0 0 0 0 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 0 0 0
55847- 0 0 0 0 0 0 0 0 0 0 0 0
55848- 0 0 0 0 0 0 0 0 0 0 0 0
55849- 0 0 0 0 0 0 0 0 0 0 0 0
55850- 0 0 0 0 0 0 0 0 0 0 0 0
55851- 0 0 0 0 0 0 0 0 0 0 0 0
55852- 0 0 0 0 0 0 0 0 0 0 0 0
55853- 0 0 0 0 0 0 0 0 0 0 0 0
55854- 0 0 0 0 0 0 0 0 0 0 0 0
55855- 0 0 0 0 0 0 10 10 10 34 34 34
55856- 74 74 74 74 74 74 2 2 6 6 6 6
55857-144 144 144 198 198 198 190 190 190 178 166 146
55858-154 121 60 156 107 11 156 107 11 168 124 44
55859-174 154 114 187 187 187 190 190 190 210 210 210
55860-246 246 246 253 253 253 253 253 253 182 182 182
55861- 6 6 6 2 2 6 2 2 6 2 2 6
55862- 2 2 6 2 2 6 2 2 6 62 62 62
55863- 74 74 74 34 34 34 14 14 14 0 0 0
55864- 0 0 0 0 0 0 0 0 0 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 0 0 0 0 0 0
55867- 0 0 0 0 0 0 0 0 0 0 0 0
55868- 0 0 0 0 0 0 0 0 0 0 0 0
55869- 0 0 0 0 0 0 0 0 0 0 0 0
55870- 0 0 0 0 0 0 0 0 0 0 0 0
55871- 0 0 0 0 0 0 0 0 0 0 0 0
55872- 0 0 0 0 0 0 0 0 0 0 0 0
55873- 0 0 0 0 0 0 0 0 0 0 0 0
55874- 0 0 0 0 0 0 0 0 0 0 0 0
55875- 0 0 0 10 10 10 22 22 22 54 54 54
55876- 94 94 94 18 18 18 2 2 6 46 46 46
55877-234 234 234 221 221 221 190 190 190 190 190 190
55878-190 190 190 187 187 187 187 187 187 190 190 190
55879-190 190 190 195 195 195 214 214 214 242 242 242
55880-253 253 253 253 253 253 253 253 253 253 253 253
55881- 82 82 82 2 2 6 2 2 6 2 2 6
55882- 2 2 6 2 2 6 2 2 6 14 14 14
55883- 86 86 86 54 54 54 22 22 22 6 6 6
55884- 0 0 0 0 0 0 0 0 0 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 0 0 0 0 0 0 0 0 0
55887- 0 0 0 0 0 0 0 0 0 0 0 0
55888- 0 0 0 0 0 0 0 0 0 0 0 0
55889- 0 0 0 0 0 0 0 0 0 0 0 0
55890- 0 0 0 0 0 0 0 0 0 0 0 0
55891- 0 0 0 0 0 0 0 0 0 0 0 0
55892- 0 0 0 0 0 0 0 0 0 0 0 0
55893- 0 0 0 0 0 0 0 0 0 0 0 0
55894- 0 0 0 0 0 0 0 0 0 0 0 0
55895- 6 6 6 18 18 18 46 46 46 90 90 90
55896- 46 46 46 18 18 18 6 6 6 182 182 182
55897-253 253 253 246 246 246 206 206 206 190 190 190
55898-190 190 190 190 190 190 190 190 190 190 190 190
55899-206 206 206 231 231 231 250 250 250 253 253 253
55900-253 253 253 253 253 253 253 253 253 253 253 253
55901-202 202 202 14 14 14 2 2 6 2 2 6
55902- 2 2 6 2 2 6 2 2 6 2 2 6
55903- 42 42 42 86 86 86 42 42 42 18 18 18
55904- 6 6 6 0 0 0 0 0 0 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 0 0 0 0 0 0 0 0 0 0 0 0
55907- 0 0 0 0 0 0 0 0 0 0 0 0
55908- 0 0 0 0 0 0 0 0 0 0 0 0
55909- 0 0 0 0 0 0 0 0 0 0 0 0
55910- 0 0 0 0 0 0 0 0 0 0 0 0
55911- 0 0 0 0 0 0 0 0 0 0 0 0
55912- 0 0 0 0 0 0 0 0 0 0 0 0
55913- 0 0 0 0 0 0 0 0 0 0 0 0
55914- 0 0 0 0 0 0 0 0 0 6 6 6
55915- 14 14 14 38 38 38 74 74 74 66 66 66
55916- 2 2 6 6 6 6 90 90 90 250 250 250
55917-253 253 253 253 253 253 238 238 238 198 198 198
55918-190 190 190 190 190 190 195 195 195 221 221 221
55919-246 246 246 253 253 253 253 253 253 253 253 253
55920-253 253 253 253 253 253 253 253 253 253 253 253
55921-253 253 253 82 82 82 2 2 6 2 2 6
55922- 2 2 6 2 2 6 2 2 6 2 2 6
55923- 2 2 6 78 78 78 70 70 70 34 34 34
55924- 14 14 14 6 6 6 0 0 0 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 0 0 0 0 0 0 0 0 0 0 0 0
55927- 0 0 0 0 0 0 0 0 0 0 0 0
55928- 0 0 0 0 0 0 0 0 0 0 0 0
55929- 0 0 0 0 0 0 0 0 0 0 0 0
55930- 0 0 0 0 0 0 0 0 0 0 0 0
55931- 0 0 0 0 0 0 0 0 0 0 0 0
55932- 0 0 0 0 0 0 0 0 0 0 0 0
55933- 0 0 0 0 0 0 0 0 0 0 0 0
55934- 0 0 0 0 0 0 0 0 0 14 14 14
55935- 34 34 34 66 66 66 78 78 78 6 6 6
55936- 2 2 6 18 18 18 218 218 218 253 253 253
55937-253 253 253 253 253 253 253 253 253 246 246 246
55938-226 226 226 231 231 231 246 246 246 253 253 253
55939-253 253 253 253 253 253 253 253 253 253 253 253
55940-253 253 253 253 253 253 253 253 253 253 253 253
55941-253 253 253 178 178 178 2 2 6 2 2 6
55942- 2 2 6 2 2 6 2 2 6 2 2 6
55943- 2 2 6 18 18 18 90 90 90 62 62 62
55944- 30 30 30 10 10 10 0 0 0 0 0 0
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 0 0 0 0 0 0 0 0 0 0 0 0
55947- 0 0 0 0 0 0 0 0 0 0 0 0
55948- 0 0 0 0 0 0 0 0 0 0 0 0
55949- 0 0 0 0 0 0 0 0 0 0 0 0
55950- 0 0 0 0 0 0 0 0 0 0 0 0
55951- 0 0 0 0 0 0 0 0 0 0 0 0
55952- 0 0 0 0 0 0 0 0 0 0 0 0
55953- 0 0 0 0 0 0 0 0 0 0 0 0
55954- 0 0 0 0 0 0 10 10 10 26 26 26
55955- 58 58 58 90 90 90 18 18 18 2 2 6
55956- 2 2 6 110 110 110 253 253 253 253 253 253
55957-253 253 253 253 253 253 253 253 253 253 253 253
55958-250 250 250 253 253 253 253 253 253 253 253 253
55959-253 253 253 253 253 253 253 253 253 253 253 253
55960-253 253 253 253 253 253 253 253 253 253 253 253
55961-253 253 253 231 231 231 18 18 18 2 2 6
55962- 2 2 6 2 2 6 2 2 6 2 2 6
55963- 2 2 6 2 2 6 18 18 18 94 94 94
55964- 54 54 54 26 26 26 10 10 10 0 0 0
55965- 0 0 0 0 0 0 0 0 0 0 0 0
55966- 0 0 0 0 0 0 0 0 0 0 0 0
55967- 0 0 0 0 0 0 0 0 0 0 0 0
55968- 0 0 0 0 0 0 0 0 0 0 0 0
55969- 0 0 0 0 0 0 0 0 0 0 0 0
55970- 0 0 0 0 0 0 0 0 0 0 0 0
55971- 0 0 0 0 0 0 0 0 0 0 0 0
55972- 0 0 0 0 0 0 0 0 0 0 0 0
55973- 0 0 0 0 0 0 0 0 0 0 0 0
55974- 0 0 0 6 6 6 22 22 22 50 50 50
55975- 90 90 90 26 26 26 2 2 6 2 2 6
55976- 14 14 14 195 195 195 250 250 250 253 253 253
55977-253 253 253 253 253 253 253 253 253 253 253 253
55978-253 253 253 253 253 253 253 253 253 253 253 253
55979-253 253 253 253 253 253 253 253 253 253 253 253
55980-253 253 253 253 253 253 253 253 253 253 253 253
55981-250 250 250 242 242 242 54 54 54 2 2 6
55982- 2 2 6 2 2 6 2 2 6 2 2 6
55983- 2 2 6 2 2 6 2 2 6 38 38 38
55984- 86 86 86 50 50 50 22 22 22 6 6 6
55985- 0 0 0 0 0 0 0 0 0 0 0 0
55986- 0 0 0 0 0 0 0 0 0 0 0 0
55987- 0 0 0 0 0 0 0 0 0 0 0 0
55988- 0 0 0 0 0 0 0 0 0 0 0 0
55989- 0 0 0 0 0 0 0 0 0 0 0 0
55990- 0 0 0 0 0 0 0 0 0 0 0 0
55991- 0 0 0 0 0 0 0 0 0 0 0 0
55992- 0 0 0 0 0 0 0 0 0 0 0 0
55993- 0 0 0 0 0 0 0 0 0 0 0 0
55994- 6 6 6 14 14 14 38 38 38 82 82 82
55995- 34 34 34 2 2 6 2 2 6 2 2 6
55996- 42 42 42 195 195 195 246 246 246 253 253 253
55997-253 253 253 253 253 253 253 253 253 250 250 250
55998-242 242 242 242 242 242 250 250 250 253 253 253
55999-253 253 253 253 253 253 253 253 253 253 253 253
56000-253 253 253 250 250 250 246 246 246 238 238 238
56001-226 226 226 231 231 231 101 101 101 6 6 6
56002- 2 2 6 2 2 6 2 2 6 2 2 6
56003- 2 2 6 2 2 6 2 2 6 2 2 6
56004- 38 38 38 82 82 82 42 42 42 14 14 14
56005- 6 6 6 0 0 0 0 0 0 0 0 0
56006- 0 0 0 0 0 0 0 0 0 0 0 0
56007- 0 0 0 0 0 0 0 0 0 0 0 0
56008- 0 0 0 0 0 0 0 0 0 0 0 0
56009- 0 0 0 0 0 0 0 0 0 0 0 0
56010- 0 0 0 0 0 0 0 0 0 0 0 0
56011- 0 0 0 0 0 0 0 0 0 0 0 0
56012- 0 0 0 0 0 0 0 0 0 0 0 0
56013- 0 0 0 0 0 0 0 0 0 0 0 0
56014- 10 10 10 26 26 26 62 62 62 66 66 66
56015- 2 2 6 2 2 6 2 2 6 6 6 6
56016- 70 70 70 170 170 170 206 206 206 234 234 234
56017-246 246 246 250 250 250 250 250 250 238 238 238
56018-226 226 226 231 231 231 238 238 238 250 250 250
56019-250 250 250 250 250 250 246 246 246 231 231 231
56020-214 214 214 206 206 206 202 202 202 202 202 202
56021-198 198 198 202 202 202 182 182 182 18 18 18
56022- 2 2 6 2 2 6 2 2 6 2 2 6
56023- 2 2 6 2 2 6 2 2 6 2 2 6
56024- 2 2 6 62 62 62 66 66 66 30 30 30
56025- 10 10 10 0 0 0 0 0 0 0 0 0
56026- 0 0 0 0 0 0 0 0 0 0 0 0
56027- 0 0 0 0 0 0 0 0 0 0 0 0
56028- 0 0 0 0 0 0 0 0 0 0 0 0
56029- 0 0 0 0 0 0 0 0 0 0 0 0
56030- 0 0 0 0 0 0 0 0 0 0 0 0
56031- 0 0 0 0 0 0 0 0 0 0 0 0
56032- 0 0 0 0 0 0 0 0 0 0 0 0
56033- 0 0 0 0 0 0 0 0 0 0 0 0
56034- 14 14 14 42 42 42 82 82 82 18 18 18
56035- 2 2 6 2 2 6 2 2 6 10 10 10
56036- 94 94 94 182 182 182 218 218 218 242 242 242
56037-250 250 250 253 253 253 253 253 253 250 250 250
56038-234 234 234 253 253 253 253 253 253 253 253 253
56039-253 253 253 253 253 253 253 253 253 246 246 246
56040-238 238 238 226 226 226 210 210 210 202 202 202
56041-195 195 195 195 195 195 210 210 210 158 158 158
56042- 6 6 6 14 14 14 50 50 50 14 14 14
56043- 2 2 6 2 2 6 2 2 6 2 2 6
56044- 2 2 6 6 6 6 86 86 86 46 46 46
56045- 18 18 18 6 6 6 0 0 0 0 0 0
56046- 0 0 0 0 0 0 0 0 0 0 0 0
56047- 0 0 0 0 0 0 0 0 0 0 0 0
56048- 0 0 0 0 0 0 0 0 0 0 0 0
56049- 0 0 0 0 0 0 0 0 0 0 0 0
56050- 0 0 0 0 0 0 0 0 0 0 0 0
56051- 0 0 0 0 0 0 0 0 0 0 0 0
56052- 0 0 0 0 0 0 0 0 0 0 0 0
56053- 0 0 0 0 0 0 0 0 0 6 6 6
56054- 22 22 22 54 54 54 70 70 70 2 2 6
56055- 2 2 6 10 10 10 2 2 6 22 22 22
56056-166 166 166 231 231 231 250 250 250 253 253 253
56057-253 253 253 253 253 253 253 253 253 250 250 250
56058-242 242 242 253 253 253 253 253 253 253 253 253
56059-253 253 253 253 253 253 253 253 253 253 253 253
56060-253 253 253 253 253 253 253 253 253 246 246 246
56061-231 231 231 206 206 206 198 198 198 226 226 226
56062- 94 94 94 2 2 6 6 6 6 38 38 38
56063- 30 30 30 2 2 6 2 2 6 2 2 6
56064- 2 2 6 2 2 6 62 62 62 66 66 66
56065- 26 26 26 10 10 10 0 0 0 0 0 0
56066- 0 0 0 0 0 0 0 0 0 0 0 0
56067- 0 0 0 0 0 0 0 0 0 0 0 0
56068- 0 0 0 0 0 0 0 0 0 0 0 0
56069- 0 0 0 0 0 0 0 0 0 0 0 0
56070- 0 0 0 0 0 0 0 0 0 0 0 0
56071- 0 0 0 0 0 0 0 0 0 0 0 0
56072- 0 0 0 0 0 0 0 0 0 0 0 0
56073- 0 0 0 0 0 0 0 0 0 10 10 10
56074- 30 30 30 74 74 74 50 50 50 2 2 6
56075- 26 26 26 26 26 26 2 2 6 106 106 106
56076-238 238 238 253 253 253 253 253 253 253 253 253
56077-253 253 253 253 253 253 253 253 253 253 253 253
56078-253 253 253 253 253 253 253 253 253 253 253 253
56079-253 253 253 253 253 253 253 253 253 253 253 253
56080-253 253 253 253 253 253 253 253 253 253 253 253
56081-253 253 253 246 246 246 218 218 218 202 202 202
56082-210 210 210 14 14 14 2 2 6 2 2 6
56083- 30 30 30 22 22 22 2 2 6 2 2 6
56084- 2 2 6 2 2 6 18 18 18 86 86 86
56085- 42 42 42 14 14 14 0 0 0 0 0 0
56086- 0 0 0 0 0 0 0 0 0 0 0 0
56087- 0 0 0 0 0 0 0 0 0 0 0 0
56088- 0 0 0 0 0 0 0 0 0 0 0 0
56089- 0 0 0 0 0 0 0 0 0 0 0 0
56090- 0 0 0 0 0 0 0 0 0 0 0 0
56091- 0 0 0 0 0 0 0 0 0 0 0 0
56092- 0 0 0 0 0 0 0 0 0 0 0 0
56093- 0 0 0 0 0 0 0 0 0 14 14 14
56094- 42 42 42 90 90 90 22 22 22 2 2 6
56095- 42 42 42 2 2 6 18 18 18 218 218 218
56096-253 253 253 253 253 253 253 253 253 253 253 253
56097-253 253 253 253 253 253 253 253 253 253 253 253
56098-253 253 253 253 253 253 253 253 253 253 253 253
56099-253 253 253 253 253 253 253 253 253 253 253 253
56100-253 253 253 253 253 253 253 253 253 253 253 253
56101-253 253 253 253 253 253 250 250 250 221 221 221
56102-218 218 218 101 101 101 2 2 6 14 14 14
56103- 18 18 18 38 38 38 10 10 10 2 2 6
56104- 2 2 6 2 2 6 2 2 6 78 78 78
56105- 58 58 58 22 22 22 6 6 6 0 0 0
56106- 0 0 0 0 0 0 0 0 0 0 0 0
56107- 0 0 0 0 0 0 0 0 0 0 0 0
56108- 0 0 0 0 0 0 0 0 0 0 0 0
56109- 0 0 0 0 0 0 0 0 0 0 0 0
56110- 0 0 0 0 0 0 0 0 0 0 0 0
56111- 0 0 0 0 0 0 0 0 0 0 0 0
56112- 0 0 0 0 0 0 0 0 0 0 0 0
56113- 0 0 0 0 0 0 6 6 6 18 18 18
56114- 54 54 54 82 82 82 2 2 6 26 26 26
56115- 22 22 22 2 2 6 123 123 123 253 253 253
56116-253 253 253 253 253 253 253 253 253 253 253 253
56117-253 253 253 253 253 253 253 253 253 253 253 253
56118-253 253 253 253 253 253 253 253 253 253 253 253
56119-253 253 253 253 253 253 253 253 253 253 253 253
56120-253 253 253 253 253 253 253 253 253 253 253 253
56121-253 253 253 253 253 253 253 253 253 250 250 250
56122-238 238 238 198 198 198 6 6 6 38 38 38
56123- 58 58 58 26 26 26 38 38 38 2 2 6
56124- 2 2 6 2 2 6 2 2 6 46 46 46
56125- 78 78 78 30 30 30 10 10 10 0 0 0
56126- 0 0 0 0 0 0 0 0 0 0 0 0
56127- 0 0 0 0 0 0 0 0 0 0 0 0
56128- 0 0 0 0 0 0 0 0 0 0 0 0
56129- 0 0 0 0 0 0 0 0 0 0 0 0
56130- 0 0 0 0 0 0 0 0 0 0 0 0
56131- 0 0 0 0 0 0 0 0 0 0 0 0
56132- 0 0 0 0 0 0 0 0 0 0 0 0
56133- 0 0 0 0 0 0 10 10 10 30 30 30
56134- 74 74 74 58 58 58 2 2 6 42 42 42
56135- 2 2 6 22 22 22 231 231 231 253 253 253
56136-253 253 253 253 253 253 253 253 253 253 253 253
56137-253 253 253 253 253 253 253 253 253 250 250 250
56138-253 253 253 253 253 253 253 253 253 253 253 253
56139-253 253 253 253 253 253 253 253 253 253 253 253
56140-253 253 253 253 253 253 253 253 253 253 253 253
56141-253 253 253 253 253 253 253 253 253 253 253 253
56142-253 253 253 246 246 246 46 46 46 38 38 38
56143- 42 42 42 14 14 14 38 38 38 14 14 14
56144- 2 2 6 2 2 6 2 2 6 6 6 6
56145- 86 86 86 46 46 46 14 14 14 0 0 0
56146- 0 0 0 0 0 0 0 0 0 0 0 0
56147- 0 0 0 0 0 0 0 0 0 0 0 0
56148- 0 0 0 0 0 0 0 0 0 0 0 0
56149- 0 0 0 0 0 0 0 0 0 0 0 0
56150- 0 0 0 0 0 0 0 0 0 0 0 0
56151- 0 0 0 0 0 0 0 0 0 0 0 0
56152- 0 0 0 0 0 0 0 0 0 0 0 0
56153- 0 0 0 6 6 6 14 14 14 42 42 42
56154- 90 90 90 18 18 18 18 18 18 26 26 26
56155- 2 2 6 116 116 116 253 253 253 253 253 253
56156-253 253 253 253 253 253 253 253 253 253 253 253
56157-253 253 253 253 253 253 250 250 250 238 238 238
56158-253 253 253 253 253 253 253 253 253 253 253 253
56159-253 253 253 253 253 253 253 253 253 253 253 253
56160-253 253 253 253 253 253 253 253 253 253 253 253
56161-253 253 253 253 253 253 253 253 253 253 253 253
56162-253 253 253 253 253 253 94 94 94 6 6 6
56163- 2 2 6 2 2 6 10 10 10 34 34 34
56164- 2 2 6 2 2 6 2 2 6 2 2 6
56165- 74 74 74 58 58 58 22 22 22 6 6 6
56166- 0 0 0 0 0 0 0 0 0 0 0 0
56167- 0 0 0 0 0 0 0 0 0 0 0 0
56168- 0 0 0 0 0 0 0 0 0 0 0 0
56169- 0 0 0 0 0 0 0 0 0 0 0 0
56170- 0 0 0 0 0 0 0 0 0 0 0 0
56171- 0 0 0 0 0 0 0 0 0 0 0 0
56172- 0 0 0 0 0 0 0 0 0 0 0 0
56173- 0 0 0 10 10 10 26 26 26 66 66 66
56174- 82 82 82 2 2 6 38 38 38 6 6 6
56175- 14 14 14 210 210 210 253 253 253 253 253 253
56176-253 253 253 253 253 253 253 253 253 253 253 253
56177-253 253 253 253 253 253 246 246 246 242 242 242
56178-253 253 253 253 253 253 253 253 253 253 253 253
56179-253 253 253 253 253 253 253 253 253 253 253 253
56180-253 253 253 253 253 253 253 253 253 253 253 253
56181-253 253 253 253 253 253 253 253 253 253 253 253
56182-253 253 253 253 253 253 144 144 144 2 2 6
56183- 2 2 6 2 2 6 2 2 6 46 46 46
56184- 2 2 6 2 2 6 2 2 6 2 2 6
56185- 42 42 42 74 74 74 30 30 30 10 10 10
56186- 0 0 0 0 0 0 0 0 0 0 0 0
56187- 0 0 0 0 0 0 0 0 0 0 0 0
56188- 0 0 0 0 0 0 0 0 0 0 0 0
56189- 0 0 0 0 0 0 0 0 0 0 0 0
56190- 0 0 0 0 0 0 0 0 0 0 0 0
56191- 0 0 0 0 0 0 0 0 0 0 0 0
56192- 0 0 0 0 0 0 0 0 0 0 0 0
56193- 6 6 6 14 14 14 42 42 42 90 90 90
56194- 26 26 26 6 6 6 42 42 42 2 2 6
56195- 74 74 74 250 250 250 253 253 253 253 253 253
56196-253 253 253 253 253 253 253 253 253 253 253 253
56197-253 253 253 253 253 253 242 242 242 242 242 242
56198-253 253 253 253 253 253 253 253 253 253 253 253
56199-253 253 253 253 253 253 253 253 253 253 253 253
56200-253 253 253 253 253 253 253 253 253 253 253 253
56201-253 253 253 253 253 253 253 253 253 253 253 253
56202-253 253 253 253 253 253 182 182 182 2 2 6
56203- 2 2 6 2 2 6 2 2 6 46 46 46
56204- 2 2 6 2 2 6 2 2 6 2 2 6
56205- 10 10 10 86 86 86 38 38 38 10 10 10
56206- 0 0 0 0 0 0 0 0 0 0 0 0
56207- 0 0 0 0 0 0 0 0 0 0 0 0
56208- 0 0 0 0 0 0 0 0 0 0 0 0
56209- 0 0 0 0 0 0 0 0 0 0 0 0
56210- 0 0 0 0 0 0 0 0 0 0 0 0
56211- 0 0 0 0 0 0 0 0 0 0 0 0
56212- 0 0 0 0 0 0 0 0 0 0 0 0
56213- 10 10 10 26 26 26 66 66 66 82 82 82
56214- 2 2 6 22 22 22 18 18 18 2 2 6
56215-149 149 149 253 253 253 253 253 253 253 253 253
56216-253 253 253 253 253 253 253 253 253 253 253 253
56217-253 253 253 253 253 253 234 234 234 242 242 242
56218-253 253 253 253 253 253 253 253 253 253 253 253
56219-253 253 253 253 253 253 253 253 253 253 253 253
56220-253 253 253 253 253 253 253 253 253 253 253 253
56221-253 253 253 253 253 253 253 253 253 253 253 253
56222-253 253 253 253 253 253 206 206 206 2 2 6
56223- 2 2 6 2 2 6 2 2 6 38 38 38
56224- 2 2 6 2 2 6 2 2 6 2 2 6
56225- 6 6 6 86 86 86 46 46 46 14 14 14
56226- 0 0 0 0 0 0 0 0 0 0 0 0
56227- 0 0 0 0 0 0 0 0 0 0 0 0
56228- 0 0 0 0 0 0 0 0 0 0 0 0
56229- 0 0 0 0 0 0 0 0 0 0 0 0
56230- 0 0 0 0 0 0 0 0 0 0 0 0
56231- 0 0 0 0 0 0 0 0 0 0 0 0
56232- 0 0 0 0 0 0 0 0 0 6 6 6
56233- 18 18 18 46 46 46 86 86 86 18 18 18
56234- 2 2 6 34 34 34 10 10 10 6 6 6
56235-210 210 210 253 253 253 253 253 253 253 253 253
56236-253 253 253 253 253 253 253 253 253 253 253 253
56237-253 253 253 253 253 253 234 234 234 242 242 242
56238-253 253 253 253 253 253 253 253 253 253 253 253
56239-253 253 253 253 253 253 253 253 253 253 253 253
56240-253 253 253 253 253 253 253 253 253 253 253 253
56241-253 253 253 253 253 253 253 253 253 253 253 253
56242-253 253 253 253 253 253 221 221 221 6 6 6
56243- 2 2 6 2 2 6 6 6 6 30 30 30
56244- 2 2 6 2 2 6 2 2 6 2 2 6
56245- 2 2 6 82 82 82 54 54 54 18 18 18
56246- 6 6 6 0 0 0 0 0 0 0 0 0
56247- 0 0 0 0 0 0 0 0 0 0 0 0
56248- 0 0 0 0 0 0 0 0 0 0 0 0
56249- 0 0 0 0 0 0 0 0 0 0 0 0
56250- 0 0 0 0 0 0 0 0 0 0 0 0
56251- 0 0 0 0 0 0 0 0 0 0 0 0
56252- 0 0 0 0 0 0 0 0 0 10 10 10
56253- 26 26 26 66 66 66 62 62 62 2 2 6
56254- 2 2 6 38 38 38 10 10 10 26 26 26
56255-238 238 238 253 253 253 253 253 253 253 253 253
56256-253 253 253 253 253 253 253 253 253 253 253 253
56257-253 253 253 253 253 253 231 231 231 238 238 238
56258-253 253 253 253 253 253 253 253 253 253 253 253
56259-253 253 253 253 253 253 253 253 253 253 253 253
56260-253 253 253 253 253 253 253 253 253 253 253 253
56261-253 253 253 253 253 253 253 253 253 253 253 253
56262-253 253 253 253 253 253 231 231 231 6 6 6
56263- 2 2 6 2 2 6 10 10 10 30 30 30
56264- 2 2 6 2 2 6 2 2 6 2 2 6
56265- 2 2 6 66 66 66 58 58 58 22 22 22
56266- 6 6 6 0 0 0 0 0 0 0 0 0
56267- 0 0 0 0 0 0 0 0 0 0 0 0
56268- 0 0 0 0 0 0 0 0 0 0 0 0
56269- 0 0 0 0 0 0 0 0 0 0 0 0
56270- 0 0 0 0 0 0 0 0 0 0 0 0
56271- 0 0 0 0 0 0 0 0 0 0 0 0
56272- 0 0 0 0 0 0 0 0 0 10 10 10
56273- 38 38 38 78 78 78 6 6 6 2 2 6
56274- 2 2 6 46 46 46 14 14 14 42 42 42
56275-246 246 246 253 253 253 253 253 253 253 253 253
56276-253 253 253 253 253 253 253 253 253 253 253 253
56277-253 253 253 253 253 253 231 231 231 242 242 242
56278-253 253 253 253 253 253 253 253 253 253 253 253
56279-253 253 253 253 253 253 253 253 253 253 253 253
56280-253 253 253 253 253 253 253 253 253 253 253 253
56281-253 253 253 253 253 253 253 253 253 253 253 253
56282-253 253 253 253 253 253 234 234 234 10 10 10
56283- 2 2 6 2 2 6 22 22 22 14 14 14
56284- 2 2 6 2 2 6 2 2 6 2 2 6
56285- 2 2 6 66 66 66 62 62 62 22 22 22
56286- 6 6 6 0 0 0 0 0 0 0 0 0
56287- 0 0 0 0 0 0 0 0 0 0 0 0
56288- 0 0 0 0 0 0 0 0 0 0 0 0
56289- 0 0 0 0 0 0 0 0 0 0 0 0
56290- 0 0 0 0 0 0 0 0 0 0 0 0
56291- 0 0 0 0 0 0 0 0 0 0 0 0
56292- 0 0 0 0 0 0 6 6 6 18 18 18
56293- 50 50 50 74 74 74 2 2 6 2 2 6
56294- 14 14 14 70 70 70 34 34 34 62 62 62
56295-250 250 250 253 253 253 253 253 253 253 253 253
56296-253 253 253 253 253 253 253 253 253 253 253 253
56297-253 253 253 253 253 253 231 231 231 246 246 246
56298-253 253 253 253 253 253 253 253 253 253 253 253
56299-253 253 253 253 253 253 253 253 253 253 253 253
56300-253 253 253 253 253 253 253 253 253 253 253 253
56301-253 253 253 253 253 253 253 253 253 253 253 253
56302-253 253 253 253 253 253 234 234 234 14 14 14
56303- 2 2 6 2 2 6 30 30 30 2 2 6
56304- 2 2 6 2 2 6 2 2 6 2 2 6
56305- 2 2 6 66 66 66 62 62 62 22 22 22
56306- 6 6 6 0 0 0 0 0 0 0 0 0
56307- 0 0 0 0 0 0 0 0 0 0 0 0
56308- 0 0 0 0 0 0 0 0 0 0 0 0
56309- 0 0 0 0 0 0 0 0 0 0 0 0
56310- 0 0 0 0 0 0 0 0 0 0 0 0
56311- 0 0 0 0 0 0 0 0 0 0 0 0
56312- 0 0 0 0 0 0 6 6 6 18 18 18
56313- 54 54 54 62 62 62 2 2 6 2 2 6
56314- 2 2 6 30 30 30 46 46 46 70 70 70
56315-250 250 250 253 253 253 253 253 253 253 253 253
56316-253 253 253 253 253 253 253 253 253 253 253 253
56317-253 253 253 253 253 253 231 231 231 246 246 246
56318-253 253 253 253 253 253 253 253 253 253 253 253
56319-253 253 253 253 253 253 253 253 253 253 253 253
56320-253 253 253 253 253 253 253 253 253 253 253 253
56321-253 253 253 253 253 253 253 253 253 253 253 253
56322-253 253 253 253 253 253 226 226 226 10 10 10
56323- 2 2 6 6 6 6 30 30 30 2 2 6
56324- 2 2 6 2 2 6 2 2 6 2 2 6
56325- 2 2 6 66 66 66 58 58 58 22 22 22
56326- 6 6 6 0 0 0 0 0 0 0 0 0
56327- 0 0 0 0 0 0 0 0 0 0 0 0
56328- 0 0 0 0 0 0 0 0 0 0 0 0
56329- 0 0 0 0 0 0 0 0 0 0 0 0
56330- 0 0 0 0 0 0 0 0 0 0 0 0
56331- 0 0 0 0 0 0 0 0 0 0 0 0
56332- 0 0 0 0 0 0 6 6 6 22 22 22
56333- 58 58 58 62 62 62 2 2 6 2 2 6
56334- 2 2 6 2 2 6 30 30 30 78 78 78
56335-250 250 250 253 253 253 253 253 253 253 253 253
56336-253 253 253 253 253 253 253 253 253 253 253 253
56337-253 253 253 253 253 253 231 231 231 246 246 246
56338-253 253 253 253 253 253 253 253 253 253 253 253
56339-253 253 253 253 253 253 253 253 253 253 253 253
56340-253 253 253 253 253 253 253 253 253 253 253 253
56341-253 253 253 253 253 253 253 253 253 253 253 253
56342-253 253 253 253 253 253 206 206 206 2 2 6
56343- 22 22 22 34 34 34 18 14 6 22 22 22
56344- 26 26 26 18 18 18 6 6 6 2 2 6
56345- 2 2 6 82 82 82 54 54 54 18 18 18
56346- 6 6 6 0 0 0 0 0 0 0 0 0
56347- 0 0 0 0 0 0 0 0 0 0 0 0
56348- 0 0 0 0 0 0 0 0 0 0 0 0
56349- 0 0 0 0 0 0 0 0 0 0 0 0
56350- 0 0 0 0 0 0 0 0 0 0 0 0
56351- 0 0 0 0 0 0 0 0 0 0 0 0
56352- 0 0 0 0 0 0 6 6 6 26 26 26
56353- 62 62 62 106 106 106 74 54 14 185 133 11
56354-210 162 10 121 92 8 6 6 6 62 62 62
56355-238 238 238 253 253 253 253 253 253 253 253 253
56356-253 253 253 253 253 253 253 253 253 253 253 253
56357-253 253 253 253 253 253 231 231 231 246 246 246
56358-253 253 253 253 253 253 253 253 253 253 253 253
56359-253 253 253 253 253 253 253 253 253 253 253 253
56360-253 253 253 253 253 253 253 253 253 253 253 253
56361-253 253 253 253 253 253 253 253 253 253 253 253
56362-253 253 253 253 253 253 158 158 158 18 18 18
56363- 14 14 14 2 2 6 2 2 6 2 2 6
56364- 6 6 6 18 18 18 66 66 66 38 38 38
56365- 6 6 6 94 94 94 50 50 50 18 18 18
56366- 6 6 6 0 0 0 0 0 0 0 0 0
56367- 0 0 0 0 0 0 0 0 0 0 0 0
56368- 0 0 0 0 0 0 0 0 0 0 0 0
56369- 0 0 0 0 0 0 0 0 0 0 0 0
56370- 0 0 0 0 0 0 0 0 0 0 0 0
56371- 0 0 0 0 0 0 0 0 0 6 6 6
56372- 10 10 10 10 10 10 18 18 18 38 38 38
56373- 78 78 78 142 134 106 216 158 10 242 186 14
56374-246 190 14 246 190 14 156 118 10 10 10 10
56375- 90 90 90 238 238 238 253 253 253 253 253 253
56376-253 253 253 253 253 253 253 253 253 253 253 253
56377-253 253 253 253 253 253 231 231 231 250 250 250
56378-253 253 253 253 253 253 253 253 253 253 253 253
56379-253 253 253 253 253 253 253 253 253 253 253 253
56380-253 253 253 253 253 253 253 253 253 253 253 253
56381-253 253 253 253 253 253 253 253 253 246 230 190
56382-238 204 91 238 204 91 181 142 44 37 26 9
56383- 2 2 6 2 2 6 2 2 6 2 2 6
56384- 2 2 6 2 2 6 38 38 38 46 46 46
56385- 26 26 26 106 106 106 54 54 54 18 18 18
56386- 6 6 6 0 0 0 0 0 0 0 0 0
56387- 0 0 0 0 0 0 0 0 0 0 0 0
56388- 0 0 0 0 0 0 0 0 0 0 0 0
56389- 0 0 0 0 0 0 0 0 0 0 0 0
56390- 0 0 0 0 0 0 0 0 0 0 0 0
56391- 0 0 0 6 6 6 14 14 14 22 22 22
56392- 30 30 30 38 38 38 50 50 50 70 70 70
56393-106 106 106 190 142 34 226 170 11 242 186 14
56394-246 190 14 246 190 14 246 190 14 154 114 10
56395- 6 6 6 74 74 74 226 226 226 253 253 253
56396-253 253 253 253 253 253 253 253 253 253 253 253
56397-253 253 253 253 253 253 231 231 231 250 250 250
56398-253 253 253 253 253 253 253 253 253 253 253 253
56399-253 253 253 253 253 253 253 253 253 253 253 253
56400-253 253 253 253 253 253 253 253 253 253 253 253
56401-253 253 253 253 253 253 253 253 253 228 184 62
56402-241 196 14 241 208 19 232 195 16 38 30 10
56403- 2 2 6 2 2 6 2 2 6 2 2 6
56404- 2 2 6 6 6 6 30 30 30 26 26 26
56405-203 166 17 154 142 90 66 66 66 26 26 26
56406- 6 6 6 0 0 0 0 0 0 0 0 0
56407- 0 0 0 0 0 0 0 0 0 0 0 0
56408- 0 0 0 0 0 0 0 0 0 0 0 0
56409- 0 0 0 0 0 0 0 0 0 0 0 0
56410- 0 0 0 0 0 0 0 0 0 0 0 0
56411- 6 6 6 18 18 18 38 38 38 58 58 58
56412- 78 78 78 86 86 86 101 101 101 123 123 123
56413-175 146 61 210 150 10 234 174 13 246 186 14
56414-246 190 14 246 190 14 246 190 14 238 190 10
56415-102 78 10 2 2 6 46 46 46 198 198 198
56416-253 253 253 253 253 253 253 253 253 253 253 253
56417-253 253 253 253 253 253 234 234 234 242 242 242
56418-253 253 253 253 253 253 253 253 253 253 253 253
56419-253 253 253 253 253 253 253 253 253 253 253 253
56420-253 253 253 253 253 253 253 253 253 253 253 253
56421-253 253 253 253 253 253 253 253 253 224 178 62
56422-242 186 14 241 196 14 210 166 10 22 18 6
56423- 2 2 6 2 2 6 2 2 6 2 2 6
56424- 2 2 6 2 2 6 6 6 6 121 92 8
56425-238 202 15 232 195 16 82 82 82 34 34 34
56426- 10 10 10 0 0 0 0 0 0 0 0 0
56427- 0 0 0 0 0 0 0 0 0 0 0 0
56428- 0 0 0 0 0 0 0 0 0 0 0 0
56429- 0 0 0 0 0 0 0 0 0 0 0 0
56430- 0 0 0 0 0 0 0 0 0 0 0 0
56431- 14 14 14 38 38 38 70 70 70 154 122 46
56432-190 142 34 200 144 11 197 138 11 197 138 11
56433-213 154 11 226 170 11 242 186 14 246 190 14
56434-246 190 14 246 190 14 246 190 14 246 190 14
56435-225 175 15 46 32 6 2 2 6 22 22 22
56436-158 158 158 250 250 250 253 253 253 253 253 253
56437-253 253 253 253 253 253 253 253 253 253 253 253
56438-253 253 253 253 253 253 253 253 253 253 253 253
56439-253 253 253 253 253 253 253 253 253 253 253 253
56440-253 253 253 253 253 253 253 253 253 253 253 253
56441-253 253 253 250 250 250 242 242 242 224 178 62
56442-239 182 13 236 186 11 213 154 11 46 32 6
56443- 2 2 6 2 2 6 2 2 6 2 2 6
56444- 2 2 6 2 2 6 61 42 6 225 175 15
56445-238 190 10 236 186 11 112 100 78 42 42 42
56446- 14 14 14 0 0 0 0 0 0 0 0 0
56447- 0 0 0 0 0 0 0 0 0 0 0 0
56448- 0 0 0 0 0 0 0 0 0 0 0 0
56449- 0 0 0 0 0 0 0 0 0 0 0 0
56450- 0 0 0 0 0 0 0 0 0 6 6 6
56451- 22 22 22 54 54 54 154 122 46 213 154 11
56452-226 170 11 230 174 11 226 170 11 226 170 11
56453-236 178 12 242 186 14 246 190 14 246 190 14
56454-246 190 14 246 190 14 246 190 14 246 190 14
56455-241 196 14 184 144 12 10 10 10 2 2 6
56456- 6 6 6 116 116 116 242 242 242 253 253 253
56457-253 253 253 253 253 253 253 253 253 253 253 253
56458-253 253 253 253 253 253 253 253 253 253 253 253
56459-253 253 253 253 253 253 253 253 253 253 253 253
56460-253 253 253 253 253 253 253 253 253 253 253 253
56461-253 253 253 231 231 231 198 198 198 214 170 54
56462-236 178 12 236 178 12 210 150 10 137 92 6
56463- 18 14 6 2 2 6 2 2 6 2 2 6
56464- 6 6 6 70 47 6 200 144 11 236 178 12
56465-239 182 13 239 182 13 124 112 88 58 58 58
56466- 22 22 22 6 6 6 0 0 0 0 0 0
56467- 0 0 0 0 0 0 0 0 0 0 0 0
56468- 0 0 0 0 0 0 0 0 0 0 0 0
56469- 0 0 0 0 0 0 0 0 0 0 0 0
56470- 0 0 0 0 0 0 0 0 0 10 10 10
56471- 30 30 30 70 70 70 180 133 36 226 170 11
56472-239 182 13 242 186 14 242 186 14 246 186 14
56473-246 190 14 246 190 14 246 190 14 246 190 14
56474-246 190 14 246 190 14 246 190 14 246 190 14
56475-246 190 14 232 195 16 98 70 6 2 2 6
56476- 2 2 6 2 2 6 66 66 66 221 221 221
56477-253 253 253 253 253 253 253 253 253 253 253 253
56478-253 253 253 253 253 253 253 253 253 253 253 253
56479-253 253 253 253 253 253 253 253 253 253 253 253
56480-253 253 253 253 253 253 253 253 253 253 253 253
56481-253 253 253 206 206 206 198 198 198 214 166 58
56482-230 174 11 230 174 11 216 158 10 192 133 9
56483-163 110 8 116 81 8 102 78 10 116 81 8
56484-167 114 7 197 138 11 226 170 11 239 182 13
56485-242 186 14 242 186 14 162 146 94 78 78 78
56486- 34 34 34 14 14 14 6 6 6 0 0 0
56487- 0 0 0 0 0 0 0 0 0 0 0 0
56488- 0 0 0 0 0 0 0 0 0 0 0 0
56489- 0 0 0 0 0 0 0 0 0 0 0 0
56490- 0 0 0 0 0 0 0 0 0 6 6 6
56491- 30 30 30 78 78 78 190 142 34 226 170 11
56492-239 182 13 246 190 14 246 190 14 246 190 14
56493-246 190 14 246 190 14 246 190 14 246 190 14
56494-246 190 14 246 190 14 246 190 14 246 190 14
56495-246 190 14 241 196 14 203 166 17 22 18 6
56496- 2 2 6 2 2 6 2 2 6 38 38 38
56497-218 218 218 253 253 253 253 253 253 253 253 253
56498-253 253 253 253 253 253 253 253 253 253 253 253
56499-253 253 253 253 253 253 253 253 253 253 253 253
56500-253 253 253 253 253 253 253 253 253 253 253 253
56501-250 250 250 206 206 206 198 198 198 202 162 69
56502-226 170 11 236 178 12 224 166 10 210 150 10
56503-200 144 11 197 138 11 192 133 9 197 138 11
56504-210 150 10 226 170 11 242 186 14 246 190 14
56505-246 190 14 246 186 14 225 175 15 124 112 88
56506- 62 62 62 30 30 30 14 14 14 6 6 6
56507- 0 0 0 0 0 0 0 0 0 0 0 0
56508- 0 0 0 0 0 0 0 0 0 0 0 0
56509- 0 0 0 0 0 0 0 0 0 0 0 0
56510- 0 0 0 0 0 0 0 0 0 10 10 10
56511- 30 30 30 78 78 78 174 135 50 224 166 10
56512-239 182 13 246 190 14 246 190 14 246 190 14
56513-246 190 14 246 190 14 246 190 14 246 190 14
56514-246 190 14 246 190 14 246 190 14 246 190 14
56515-246 190 14 246 190 14 241 196 14 139 102 15
56516- 2 2 6 2 2 6 2 2 6 2 2 6
56517- 78 78 78 250 250 250 253 253 253 253 253 253
56518-253 253 253 253 253 253 253 253 253 253 253 253
56519-253 253 253 253 253 253 253 253 253 253 253 253
56520-253 253 253 253 253 253 253 253 253 253 253 253
56521-250 250 250 214 214 214 198 198 198 190 150 46
56522-219 162 10 236 178 12 234 174 13 224 166 10
56523-216 158 10 213 154 11 213 154 11 216 158 10
56524-226 170 11 239 182 13 246 190 14 246 190 14
56525-246 190 14 246 190 14 242 186 14 206 162 42
56526-101 101 101 58 58 58 30 30 30 14 14 14
56527- 6 6 6 0 0 0 0 0 0 0 0 0
56528- 0 0 0 0 0 0 0 0 0 0 0 0
56529- 0 0 0 0 0 0 0 0 0 0 0 0
56530- 0 0 0 0 0 0 0 0 0 10 10 10
56531- 30 30 30 74 74 74 174 135 50 216 158 10
56532-236 178 12 246 190 14 246 190 14 246 190 14
56533-246 190 14 246 190 14 246 190 14 246 190 14
56534-246 190 14 246 190 14 246 190 14 246 190 14
56535-246 190 14 246 190 14 241 196 14 226 184 13
56536- 61 42 6 2 2 6 2 2 6 2 2 6
56537- 22 22 22 238 238 238 253 253 253 253 253 253
56538-253 253 253 253 253 253 253 253 253 253 253 253
56539-253 253 253 253 253 253 253 253 253 253 253 253
56540-253 253 253 253 253 253 253 253 253 253 253 253
56541-253 253 253 226 226 226 187 187 187 180 133 36
56542-216 158 10 236 178 12 239 182 13 236 178 12
56543-230 174 11 226 170 11 226 170 11 230 174 11
56544-236 178 12 242 186 14 246 190 14 246 190 14
56545-246 190 14 246 190 14 246 186 14 239 182 13
56546-206 162 42 106 106 106 66 66 66 34 34 34
56547- 14 14 14 6 6 6 0 0 0 0 0 0
56548- 0 0 0 0 0 0 0 0 0 0 0 0
56549- 0 0 0 0 0 0 0 0 0 0 0 0
56550- 0 0 0 0 0 0 0 0 0 6 6 6
56551- 26 26 26 70 70 70 163 133 67 213 154 11
56552-236 178 12 246 190 14 246 190 14 246 190 14
56553-246 190 14 246 190 14 246 190 14 246 190 14
56554-246 190 14 246 190 14 246 190 14 246 190 14
56555-246 190 14 246 190 14 246 190 14 241 196 14
56556-190 146 13 18 14 6 2 2 6 2 2 6
56557- 46 46 46 246 246 246 253 253 253 253 253 253
56558-253 253 253 253 253 253 253 253 253 253 253 253
56559-253 253 253 253 253 253 253 253 253 253 253 253
56560-253 253 253 253 253 253 253 253 253 253 253 253
56561-253 253 253 221 221 221 86 86 86 156 107 11
56562-216 158 10 236 178 12 242 186 14 246 186 14
56563-242 186 14 239 182 13 239 182 13 242 186 14
56564-242 186 14 246 186 14 246 190 14 246 190 14
56565-246 190 14 246 190 14 246 190 14 246 190 14
56566-242 186 14 225 175 15 142 122 72 66 66 66
56567- 30 30 30 10 10 10 0 0 0 0 0 0
56568- 0 0 0 0 0 0 0 0 0 0 0 0
56569- 0 0 0 0 0 0 0 0 0 0 0 0
56570- 0 0 0 0 0 0 0 0 0 6 6 6
56571- 26 26 26 70 70 70 163 133 67 210 150 10
56572-236 178 12 246 190 14 246 190 14 246 190 14
56573-246 190 14 246 190 14 246 190 14 246 190 14
56574-246 190 14 246 190 14 246 190 14 246 190 14
56575-246 190 14 246 190 14 246 190 14 246 190 14
56576-232 195 16 121 92 8 34 34 34 106 106 106
56577-221 221 221 253 253 253 253 253 253 253 253 253
56578-253 253 253 253 253 253 253 253 253 253 253 253
56579-253 253 253 253 253 253 253 253 253 253 253 253
56580-253 253 253 253 253 253 253 253 253 253 253 253
56581-242 242 242 82 82 82 18 14 6 163 110 8
56582-216 158 10 236 178 12 242 186 14 246 190 14
56583-246 190 14 246 190 14 246 190 14 246 190 14
56584-246 190 14 246 190 14 246 190 14 246 190 14
56585-246 190 14 246 190 14 246 190 14 246 190 14
56586-246 190 14 246 190 14 242 186 14 163 133 67
56587- 46 46 46 18 18 18 6 6 6 0 0 0
56588- 0 0 0 0 0 0 0 0 0 0 0 0
56589- 0 0 0 0 0 0 0 0 0 0 0 0
56590- 0 0 0 0 0 0 0 0 0 10 10 10
56591- 30 30 30 78 78 78 163 133 67 210 150 10
56592-236 178 12 246 186 14 246 190 14 246 190 14
56593-246 190 14 246 190 14 246 190 14 246 190 14
56594-246 190 14 246 190 14 246 190 14 246 190 14
56595-246 190 14 246 190 14 246 190 14 246 190 14
56596-241 196 14 215 174 15 190 178 144 253 253 253
56597-253 253 253 253 253 253 253 253 253 253 253 253
56598-253 253 253 253 253 253 253 253 253 253 253 253
56599-253 253 253 253 253 253 253 253 253 253 253 253
56600-253 253 253 253 253 253 253 253 253 218 218 218
56601- 58 58 58 2 2 6 22 18 6 167 114 7
56602-216 158 10 236 178 12 246 186 14 246 190 14
56603-246 190 14 246 190 14 246 190 14 246 190 14
56604-246 190 14 246 190 14 246 190 14 246 190 14
56605-246 190 14 246 190 14 246 190 14 246 190 14
56606-246 190 14 246 186 14 242 186 14 190 150 46
56607- 54 54 54 22 22 22 6 6 6 0 0 0
56608- 0 0 0 0 0 0 0 0 0 0 0 0
56609- 0 0 0 0 0 0 0 0 0 0 0 0
56610- 0 0 0 0 0 0 0 0 0 14 14 14
56611- 38 38 38 86 86 86 180 133 36 213 154 11
56612-236 178 12 246 186 14 246 190 14 246 190 14
56613-246 190 14 246 190 14 246 190 14 246 190 14
56614-246 190 14 246 190 14 246 190 14 246 190 14
56615-246 190 14 246 190 14 246 190 14 246 190 14
56616-246 190 14 232 195 16 190 146 13 214 214 214
56617-253 253 253 253 253 253 253 253 253 253 253 253
56618-253 253 253 253 253 253 253 253 253 253 253 253
56619-253 253 253 253 253 253 253 253 253 253 253 253
56620-253 253 253 250 250 250 170 170 170 26 26 26
56621- 2 2 6 2 2 6 37 26 9 163 110 8
56622-219 162 10 239 182 13 246 186 14 246 190 14
56623-246 190 14 246 190 14 246 190 14 246 190 14
56624-246 190 14 246 190 14 246 190 14 246 190 14
56625-246 190 14 246 190 14 246 190 14 246 190 14
56626-246 186 14 236 178 12 224 166 10 142 122 72
56627- 46 46 46 18 18 18 6 6 6 0 0 0
56628- 0 0 0 0 0 0 0 0 0 0 0 0
56629- 0 0 0 0 0 0 0 0 0 0 0 0
56630- 0 0 0 0 0 0 6 6 6 18 18 18
56631- 50 50 50 109 106 95 192 133 9 224 166 10
56632-242 186 14 246 190 14 246 190 14 246 190 14
56633-246 190 14 246 190 14 246 190 14 246 190 14
56634-246 190 14 246 190 14 246 190 14 246 190 14
56635-246 190 14 246 190 14 246 190 14 246 190 14
56636-242 186 14 226 184 13 210 162 10 142 110 46
56637-226 226 226 253 253 253 253 253 253 253 253 253
56638-253 253 253 253 253 253 253 253 253 253 253 253
56639-253 253 253 253 253 253 253 253 253 253 253 253
56640-198 198 198 66 66 66 2 2 6 2 2 6
56641- 2 2 6 2 2 6 50 34 6 156 107 11
56642-219 162 10 239 182 13 246 186 14 246 190 14
56643-246 190 14 246 190 14 246 190 14 246 190 14
56644-246 190 14 246 190 14 246 190 14 246 190 14
56645-246 190 14 246 190 14 246 190 14 242 186 14
56646-234 174 13 213 154 11 154 122 46 66 66 66
56647- 30 30 30 10 10 10 0 0 0 0 0 0
56648- 0 0 0 0 0 0 0 0 0 0 0 0
56649- 0 0 0 0 0 0 0 0 0 0 0 0
56650- 0 0 0 0 0 0 6 6 6 22 22 22
56651- 58 58 58 154 121 60 206 145 10 234 174 13
56652-242 186 14 246 186 14 246 190 14 246 190 14
56653-246 190 14 246 190 14 246 190 14 246 190 14
56654-246 190 14 246 190 14 246 190 14 246 190 14
56655-246 190 14 246 190 14 246 190 14 246 190 14
56656-246 186 14 236 178 12 210 162 10 163 110 8
56657- 61 42 6 138 138 138 218 218 218 250 250 250
56658-253 253 253 253 253 253 253 253 253 250 250 250
56659-242 242 242 210 210 210 144 144 144 66 66 66
56660- 6 6 6 2 2 6 2 2 6 2 2 6
56661- 2 2 6 2 2 6 61 42 6 163 110 8
56662-216 158 10 236 178 12 246 190 14 246 190 14
56663-246 190 14 246 190 14 246 190 14 246 190 14
56664-246 190 14 246 190 14 246 190 14 246 190 14
56665-246 190 14 239 182 13 230 174 11 216 158 10
56666-190 142 34 124 112 88 70 70 70 38 38 38
56667- 18 18 18 6 6 6 0 0 0 0 0 0
56668- 0 0 0 0 0 0 0 0 0 0 0 0
56669- 0 0 0 0 0 0 0 0 0 0 0 0
56670- 0 0 0 0 0 0 6 6 6 22 22 22
56671- 62 62 62 168 124 44 206 145 10 224 166 10
56672-236 178 12 239 182 13 242 186 14 242 186 14
56673-246 186 14 246 190 14 246 190 14 246 190 14
56674-246 190 14 246 190 14 246 190 14 246 190 14
56675-246 190 14 246 190 14 246 190 14 246 190 14
56676-246 190 14 236 178 12 216 158 10 175 118 6
56677- 80 54 7 2 2 6 6 6 6 30 30 30
56678- 54 54 54 62 62 62 50 50 50 38 38 38
56679- 14 14 14 2 2 6 2 2 6 2 2 6
56680- 2 2 6 2 2 6 2 2 6 2 2 6
56681- 2 2 6 6 6 6 80 54 7 167 114 7
56682-213 154 11 236 178 12 246 190 14 246 190 14
56683-246 190 14 246 190 14 246 190 14 246 190 14
56684-246 190 14 242 186 14 239 182 13 239 182 13
56685-230 174 11 210 150 10 174 135 50 124 112 88
56686- 82 82 82 54 54 54 34 34 34 18 18 18
56687- 6 6 6 0 0 0 0 0 0 0 0 0
56688- 0 0 0 0 0 0 0 0 0 0 0 0
56689- 0 0 0 0 0 0 0 0 0 0 0 0
56690- 0 0 0 0 0 0 6 6 6 18 18 18
56691- 50 50 50 158 118 36 192 133 9 200 144 11
56692-216 158 10 219 162 10 224 166 10 226 170 11
56693-230 174 11 236 178 12 239 182 13 239 182 13
56694-242 186 14 246 186 14 246 190 14 246 190 14
56695-246 190 14 246 190 14 246 190 14 246 190 14
56696-246 186 14 230 174 11 210 150 10 163 110 8
56697-104 69 6 10 10 10 2 2 6 2 2 6
56698- 2 2 6 2 2 6 2 2 6 2 2 6
56699- 2 2 6 2 2 6 2 2 6 2 2 6
56700- 2 2 6 2 2 6 2 2 6 2 2 6
56701- 2 2 6 6 6 6 91 60 6 167 114 7
56702-206 145 10 230 174 11 242 186 14 246 190 14
56703-246 190 14 246 190 14 246 186 14 242 186 14
56704-239 182 13 230 174 11 224 166 10 213 154 11
56705-180 133 36 124 112 88 86 86 86 58 58 58
56706- 38 38 38 22 22 22 10 10 10 6 6 6
56707- 0 0 0 0 0 0 0 0 0 0 0 0
56708- 0 0 0 0 0 0 0 0 0 0 0 0
56709- 0 0 0 0 0 0 0 0 0 0 0 0
56710- 0 0 0 0 0 0 0 0 0 14 14 14
56711- 34 34 34 70 70 70 138 110 50 158 118 36
56712-167 114 7 180 123 7 192 133 9 197 138 11
56713-200 144 11 206 145 10 213 154 11 219 162 10
56714-224 166 10 230 174 11 239 182 13 242 186 14
56715-246 186 14 246 186 14 246 186 14 246 186 14
56716-239 182 13 216 158 10 185 133 11 152 99 6
56717-104 69 6 18 14 6 2 2 6 2 2 6
56718- 2 2 6 2 2 6 2 2 6 2 2 6
56719- 2 2 6 2 2 6 2 2 6 2 2 6
56720- 2 2 6 2 2 6 2 2 6 2 2 6
56721- 2 2 6 6 6 6 80 54 7 152 99 6
56722-192 133 9 219 162 10 236 178 12 239 182 13
56723-246 186 14 242 186 14 239 182 13 236 178 12
56724-224 166 10 206 145 10 192 133 9 154 121 60
56725- 94 94 94 62 62 62 42 42 42 22 22 22
56726- 14 14 14 6 6 6 0 0 0 0 0 0
56727- 0 0 0 0 0 0 0 0 0 0 0 0
56728- 0 0 0 0 0 0 0 0 0 0 0 0
56729- 0 0 0 0 0 0 0 0 0 0 0 0
56730- 0 0 0 0 0 0 0 0 0 6 6 6
56731- 18 18 18 34 34 34 58 58 58 78 78 78
56732-101 98 89 124 112 88 142 110 46 156 107 11
56733-163 110 8 167 114 7 175 118 6 180 123 7
56734-185 133 11 197 138 11 210 150 10 219 162 10
56735-226 170 11 236 178 12 236 178 12 234 174 13
56736-219 162 10 197 138 11 163 110 8 130 83 6
56737- 91 60 6 10 10 10 2 2 6 2 2 6
56738- 18 18 18 38 38 38 38 38 38 38 38 38
56739- 38 38 38 38 38 38 38 38 38 38 38 38
56740- 38 38 38 38 38 38 26 26 26 2 2 6
56741- 2 2 6 6 6 6 70 47 6 137 92 6
56742-175 118 6 200 144 11 219 162 10 230 174 11
56743-234 174 13 230 174 11 219 162 10 210 150 10
56744-192 133 9 163 110 8 124 112 88 82 82 82
56745- 50 50 50 30 30 30 14 14 14 6 6 6
56746- 0 0 0 0 0 0 0 0 0 0 0 0
56747- 0 0 0 0 0 0 0 0 0 0 0 0
56748- 0 0 0 0 0 0 0 0 0 0 0 0
56749- 0 0 0 0 0 0 0 0 0 0 0 0
56750- 0 0 0 0 0 0 0 0 0 0 0 0
56751- 6 6 6 14 14 14 22 22 22 34 34 34
56752- 42 42 42 58 58 58 74 74 74 86 86 86
56753-101 98 89 122 102 70 130 98 46 121 87 25
56754-137 92 6 152 99 6 163 110 8 180 123 7
56755-185 133 11 197 138 11 206 145 10 200 144 11
56756-180 123 7 156 107 11 130 83 6 104 69 6
56757- 50 34 6 54 54 54 110 110 110 101 98 89
56758- 86 86 86 82 82 82 78 78 78 78 78 78
56759- 78 78 78 78 78 78 78 78 78 78 78 78
56760- 78 78 78 82 82 82 86 86 86 94 94 94
56761-106 106 106 101 101 101 86 66 34 124 80 6
56762-156 107 11 180 123 7 192 133 9 200 144 11
56763-206 145 10 200 144 11 192 133 9 175 118 6
56764-139 102 15 109 106 95 70 70 70 42 42 42
56765- 22 22 22 10 10 10 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 0 0 0
56767- 0 0 0 0 0 0 0 0 0 0 0 0
56768- 0 0 0 0 0 0 0 0 0 0 0 0
56769- 0 0 0 0 0 0 0 0 0 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 6 6 6 10 10 10
56772- 14 14 14 22 22 22 30 30 30 38 38 38
56773- 50 50 50 62 62 62 74 74 74 90 90 90
56774-101 98 89 112 100 78 121 87 25 124 80 6
56775-137 92 6 152 99 6 152 99 6 152 99 6
56776-138 86 6 124 80 6 98 70 6 86 66 30
56777-101 98 89 82 82 82 58 58 58 46 46 46
56778- 38 38 38 34 34 34 34 34 34 34 34 34
56779- 34 34 34 34 34 34 34 34 34 34 34 34
56780- 34 34 34 34 34 34 38 38 38 42 42 42
56781- 54 54 54 82 82 82 94 86 76 91 60 6
56782-134 86 6 156 107 11 167 114 7 175 118 6
56783-175 118 6 167 114 7 152 99 6 121 87 25
56784-101 98 89 62 62 62 34 34 34 18 18 18
56785- 6 6 6 0 0 0 0 0 0 0 0 0
56786- 0 0 0 0 0 0 0 0 0 0 0 0
56787- 0 0 0 0 0 0 0 0 0 0 0 0
56788- 0 0 0 0 0 0 0 0 0 0 0 0
56789- 0 0 0 0 0 0 0 0 0 0 0 0
56790- 0 0 0 0 0 0 0 0 0 0 0 0
56791- 0 0 0 0 0 0 0 0 0 0 0 0
56792- 0 0 0 6 6 6 6 6 6 10 10 10
56793- 18 18 18 22 22 22 30 30 30 42 42 42
56794- 50 50 50 66 66 66 86 86 86 101 98 89
56795-106 86 58 98 70 6 104 69 6 104 69 6
56796-104 69 6 91 60 6 82 62 34 90 90 90
56797- 62 62 62 38 38 38 22 22 22 14 14 14
56798- 10 10 10 10 10 10 10 10 10 10 10 10
56799- 10 10 10 10 10 10 6 6 6 10 10 10
56800- 10 10 10 10 10 10 10 10 10 14 14 14
56801- 22 22 22 42 42 42 70 70 70 89 81 66
56802- 80 54 7 104 69 6 124 80 6 137 92 6
56803-134 86 6 116 81 8 100 82 52 86 86 86
56804- 58 58 58 30 30 30 14 14 14 6 6 6
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 6 6 6 10 10 10 14 14 14
56814- 18 18 18 26 26 26 38 38 38 54 54 54
56815- 70 70 70 86 86 86 94 86 76 89 81 66
56816- 89 81 66 86 86 86 74 74 74 50 50 50
56817- 30 30 30 14 14 14 6 6 6 0 0 0
56818- 0 0 0 0 0 0 0 0 0 0 0 0
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821- 6 6 6 18 18 18 34 34 34 58 58 58
56822- 82 82 82 89 81 66 89 81 66 89 81 66
56823- 94 86 66 94 86 76 74 74 74 50 50 50
56824- 26 26 26 14 14 14 6 6 6 0 0 0
56825- 0 0 0 0 0 0 0 0 0 0 0 0
56826- 0 0 0 0 0 0 0 0 0 0 0 0
56827- 0 0 0 0 0 0 0 0 0 0 0 0
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 6 6 6 6 6 6 14 14 14 18 18 18
56835- 30 30 30 38 38 38 46 46 46 54 54 54
56836- 50 50 50 42 42 42 30 30 30 18 18 18
56837- 10 10 10 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 0 0 0
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 0 0 0
56841- 0 0 0 6 6 6 14 14 14 26 26 26
56842- 38 38 38 50 50 50 58 58 58 58 58 58
56843- 54 54 54 42 42 42 30 30 30 18 18 18
56844- 10 10 10 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 0 0 0 0 0 0 0 6 6 6
56855- 6 6 6 10 10 10 14 14 14 18 18 18
56856- 18 18 18 14 14 14 10 10 10 6 6 6
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 0 0 0
56859- 0 0 0 0 0 0 0 0 0 0 0 0
56860- 0 0 0 0 0 0 0 0 0 0 0 0
56861- 0 0 0 0 0 0 0 0 0 6 6 6
56862- 14 14 14 18 18 18 22 22 22 22 22 22
56863- 18 18 18 14 14 14 10 10 10 6 6 6
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 0 0 0 0 0 0 0 0 0 0
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56882+4 4 4 4 4 4
56883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56896+4 4 4 4 4 4
56897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56910+4 4 4 4 4 4
56911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56924+4 4 4 4 4 4
56925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56938+4 4 4 4 4 4
56939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56952+4 4 4 4 4 4
56953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56957+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
56958+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
56959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56962+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
56963+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56964+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
56965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56966+4 4 4 4 4 4
56967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56971+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
56972+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
56973+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56976+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
56977+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
56978+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
56979+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56980+4 4 4 4 4 4
56981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56985+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
56986+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
56987+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56990+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
56991+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
56992+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
56993+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
56994+4 4 4 4 4 4
56995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56998+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
56999+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
57000+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
57001+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
57002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57003+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57004+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
57005+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
57006+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
57007+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
57008+4 4 4 4 4 4
57009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57012+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
57013+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
57014+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
57015+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
57016+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57017+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
57018+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
57019+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
57020+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
57021+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
57022+4 4 4 4 4 4
57023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57026+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
57027+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
57028+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
57029+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
57030+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57031+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
57032+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
57033+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
57034+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
57035+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
57036+4 4 4 4 4 4
57037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57039+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
57040+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
57041+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
57042+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
57043+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57044+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57045+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57046+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57047+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57048+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57049+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57050+4 4 4 4 4 4
57051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57053+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57054+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57055+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57056+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57057+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57058+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57059+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57060+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57061+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57062+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57063+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57064+4 4 4 4 4 4
57065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57067+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57068+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57069+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57070+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57071+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57072+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57073+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57074+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57075+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57076+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57077+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57078+4 4 4 4 4 4
57079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57081+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57082+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57083+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57084+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57085+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57086+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57087+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57088+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57089+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57090+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57091+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57092+4 4 4 4 4 4
57093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57094+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57095+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57096+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57097+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57098+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57099+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57100+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57101+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57102+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57103+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57104+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57105+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57106+4 4 4 4 4 4
57107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57108+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57109+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57110+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57111+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57112+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57113+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57114+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57115+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57116+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57117+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57118+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57119+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57120+0 0 0 4 4 4
57121+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57122+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57123+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57124+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57125+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57126+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57127+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57128+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57129+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57130+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57131+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57132+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57133+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57134+2 0 0 0 0 0
57135+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57136+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57137+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57138+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57139+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57140+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57141+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57142+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57143+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57144+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57145+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57146+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57147+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57148+37 38 37 0 0 0
57149+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57150+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57151+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57152+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57153+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57154+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57155+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57156+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57157+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57158+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57159+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57160+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57161+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57162+85 115 134 4 0 0
57163+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57164+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57165+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57166+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57167+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57168+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57169+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57170+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57171+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57172+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57173+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57174+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57175+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57176+60 73 81 4 0 0
57177+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57178+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57179+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57180+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57181+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57182+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57183+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57184+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57185+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57186+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57187+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57188+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57189+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57190+16 19 21 4 0 0
57191+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57192+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57193+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57194+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57195+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57196+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57197+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57198+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57199+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57200+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57201+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57202+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57203+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57204+4 0 0 4 3 3
57205+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57206+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57207+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57209+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57210+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57211+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57212+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57213+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57214+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57215+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57216+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57217+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57218+3 2 2 4 4 4
57219+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57220+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57221+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57222+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57223+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57224+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57225+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57226+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57227+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57228+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57229+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57230+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57231+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57232+4 4 4 4 4 4
57233+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57234+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57235+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57236+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57237+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57238+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57239+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57240+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57241+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57242+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57243+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57244+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57245+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57246+4 4 4 4 4 4
57247+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57248+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57249+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57250+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57251+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57252+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57253+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57254+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57255+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57256+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57257+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57258+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57259+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57260+5 5 5 5 5 5
57261+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57262+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57263+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57264+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57265+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57266+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57267+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57268+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57269+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57270+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57271+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57272+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57273+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57274+5 5 5 4 4 4
57275+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57276+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57277+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57278+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57279+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57280+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57281+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57282+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57283+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57284+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57285+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57286+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57288+4 4 4 4 4 4
57289+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57290+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57291+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57292+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57293+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57294+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57295+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57296+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57297+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57298+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57299+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57300+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57302+4 4 4 4 4 4
57303+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57304+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57305+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57306+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57307+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57308+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57309+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57310+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57311+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57312+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57313+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57316+4 4 4 4 4 4
57317+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57318+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57319+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57320+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57321+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57322+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57323+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57324+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57325+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57326+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57327+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57330+4 4 4 4 4 4
57331+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57332+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57333+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57334+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57335+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57336+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57337+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57338+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57339+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57340+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57341+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57344+4 4 4 4 4 4
57345+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57346+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57347+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57348+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57349+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57350+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57351+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57352+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57353+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57354+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57355+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57358+4 4 4 4 4 4
57359+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57360+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57361+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57362+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57363+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57364+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57365+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57366+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57367+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57368+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57369+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57372+4 4 4 4 4 4
57373+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57374+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57375+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57376+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57377+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57378+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57379+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57380+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57381+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57382+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57383+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57386+4 4 4 4 4 4
57387+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57388+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57389+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57390+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57391+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57392+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57393+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57394+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57395+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57396+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57397+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57400+4 4 4 4 4 4
57401+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57402+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57403+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57404+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57405+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57406+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57407+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57408+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57409+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57410+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57411+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57414+4 4 4 4 4 4
57415+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57416+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57417+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57418+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57419+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57420+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57421+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57422+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57423+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57424+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57425+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57428+4 4 4 4 4 4
57429+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57430+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57431+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57432+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57433+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57434+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57435+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57436+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57437+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57438+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57439+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57442+4 4 4 4 4 4
57443+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57444+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57445+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57446+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57447+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57448+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57449+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57450+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57451+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57452+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57453+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57456+4 4 4 4 4 4
57457+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57458+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57459+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57460+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57461+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57462+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57463+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57464+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57465+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57466+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57467+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57470+4 4 4 4 4 4
57471+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57472+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57473+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57474+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57475+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57476+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57477+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57478+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57479+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57480+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57481+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57484+4 4 4 4 4 4
57485+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57486+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57487+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57488+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57489+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57490+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57491+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57492+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57493+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57494+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57495+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57498+4 4 4 4 4 4
57499+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57500+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57501+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57502+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57503+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57504+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57505+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57506+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57507+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57508+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57509+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57512+4 4 4 4 4 4
57513+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57514+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57515+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57516+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57517+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57518+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57519+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57520+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57521+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57522+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57523+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57526+4 4 4 4 4 4
57527+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57528+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57529+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57530+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57531+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57532+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57533+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57534+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57535+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57536+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57537+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57540+4 4 4 4 4 4
57541+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57542+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57543+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57544+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57545+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57546+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57547+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57548+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57549+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57550+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57551+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57554+4 4 4 4 4 4
57555+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57556+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57557+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57558+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57559+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57560+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57561+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57562+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57563+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57564+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57565+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57568+4 4 4 4 4 4
57569+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57570+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57571+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57572+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57573+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57574+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57575+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57576+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57577+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57578+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57579+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57582+4 4 4 4 4 4
57583+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57584+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57585+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57586+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57587+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57588+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57589+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57590+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57591+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57592+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57593+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57596+4 4 4 4 4 4
57597+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57598+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57599+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57600+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57601+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57602+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57603+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57604+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57605+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57606+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57607+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57610+4 4 4 4 4 4
57611+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57612+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57613+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57614+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57615+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57616+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57617+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57618+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57619+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57620+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57621+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57624+4 4 4 4 4 4
57625+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57626+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57627+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57628+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57629+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57630+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57631+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57632+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57633+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57634+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57635+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57638+4 4 4 4 4 4
57639+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57640+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57641+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57642+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57643+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57644+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57645+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57646+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57647+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57648+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57649+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57652+4 4 4 4 4 4
57653+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57654+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57655+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57656+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57657+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57658+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57659+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57660+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57661+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57662+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57663+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57666+4 4 4 4 4 4
57667+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57668+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57669+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57670+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57671+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
57672+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
57673+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
57674+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
57675+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
57676+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
57677+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57680+4 4 4 4 4 4
57681+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
57682+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
57683+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57684+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
57685+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
57686+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
57687+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
57688+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
57689+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
57690+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
57691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57694+4 4 4 4 4 4
57695+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57696+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
57697+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
57698+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
57699+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
57700+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
57701+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
57702+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
57703+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
57704+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57708+4 4 4 4 4 4
57709+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
57710+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
57711+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
57712+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
57713+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
57714+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
57715+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
57716+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
57717+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
57718+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57722+4 4 4 4 4 4
57723+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
57724+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
57725+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
57726+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
57727+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
57728+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
57729+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
57730+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
57731+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57732+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57736+4 4 4 4 4 4
57737+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
57738+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57739+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
57740+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57741+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
57742+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
57743+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
57744+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
57745+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
57746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57750+4 4 4 4 4 4
57751+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
57752+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
57753+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
57754+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
57755+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
57756+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
57757+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
57758+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
57759+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
57760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57764+4 4 4 4 4 4
57765+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57766+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
57767+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
57768+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
57769+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
57770+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
57771+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
57772+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
57773+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57778+4 4 4 4 4 4
57779+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
57780+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
57781+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57782+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
57783+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
57784+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
57785+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
57786+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
57787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57792+4 4 4 4 4 4
57793+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57794+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
57795+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
57796+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
57797+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
57798+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
57799+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
57800+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57806+4 4 4 4 4 4
57807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57808+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
57809+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57810+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
57811+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
57812+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
57813+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
57814+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
57815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57820+4 4 4 4 4 4
57821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57822+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
57823+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
57824+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
57825+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
57826+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
57827+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
57828+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
57829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57834+4 4 4 4 4 4
57835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57836+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57837+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
57838+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57839+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
57840+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
57841+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
57842+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57848+4 4 4 4 4 4
57849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57851+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57852+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
57853+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
57854+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
57855+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
57856+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57862+4 4 4 4 4 4
57863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57866+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57867+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
57868+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
57869+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
57870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57876+4 4 4 4 4 4
57877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57880+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57881+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57882+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
57883+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
57884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57890+4 4 4 4 4 4
57891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57894+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57895+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57896+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57897+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
57898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57904+4 4 4 4 4 4
57905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57908+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
57909+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
57910+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
57911+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
57912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57918+4 4 4 4 4 4
57919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57923+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
57924+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57925+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57932+4 4 4 4 4 4
57933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57937+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
57938+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
57939+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57946+4 4 4 4 4 4
57947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57951+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
57952+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
57953+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57960+4 4 4 4 4 4
57961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57965+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
57966+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
57967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57974+4 4 4 4 4 4
57975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57979+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57980+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
57981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57988+4 4 4 4 4 4
57989diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
57990index fef20db..d28b1ab 100644
57991--- a/drivers/xen/xenfs/xenstored.c
57992+++ b/drivers/xen/xenfs/xenstored.c
57993@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
57994 static int xsd_kva_open(struct inode *inode, struct file *file)
57995 {
57996 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
57997+#ifdef CONFIG_GRKERNSEC_HIDESYM
57998+ NULL);
57999+#else
58000 xen_store_interface);
58001+#endif
58002+
58003 if (!file->private_data)
58004 return -ENOMEM;
58005 return 0;
58006diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
58007index eb14e05..5156de7 100644
58008--- a/fs/9p/vfs_addr.c
58009+++ b/fs/9p/vfs_addr.c
58010@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
58011
58012 retval = v9fs_file_write_internal(inode,
58013 v9inode->writeback_fid,
58014- (__force const char __user *)buffer,
58015+ (const char __force_user *)buffer,
58016 len, &offset, 0);
58017 if (retval > 0)
58018 retval = 0;
58019diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
58020index 9ee5343..5165e3c 100644
58021--- a/fs/9p/vfs_inode.c
58022+++ b/fs/9p/vfs_inode.c
58023@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58024 void
58025 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58026 {
58027- char *s = nd_get_link(nd);
58028+ const char *s = nd_get_link(nd);
58029
58030 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
58031 dentry, IS_ERR(s) ? "<error>" : s);
58032diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
58033index c055d56e..a46f4f5 100644
58034--- a/fs/Kconfig.binfmt
58035+++ b/fs/Kconfig.binfmt
58036@@ -106,7 +106,7 @@ config HAVE_AOUT
58037
58038 config BINFMT_AOUT
58039 tristate "Kernel support for a.out and ECOFF binaries"
58040- depends on HAVE_AOUT
58041+ depends on HAVE_AOUT && BROKEN
58042 ---help---
58043 A.out (Assembler.OUTput) is a set of formats for libraries and
58044 executables used in the earliest versions of UNIX. Linux used
58045diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58046index 8a1d38e..300a14e 100644
58047--- a/fs/afs/inode.c
58048+++ b/fs/afs/inode.c
58049@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58050 struct afs_vnode *vnode;
58051 struct super_block *sb;
58052 struct inode *inode;
58053- static atomic_t afs_autocell_ino;
58054+ static atomic_unchecked_t afs_autocell_ino;
58055
58056 _enter("{%x:%u},%*.*s,",
58057 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58058@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58059 data.fid.unique = 0;
58060 data.fid.vnode = 0;
58061
58062- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58063+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58064 afs_iget5_autocell_test, afs_iget5_set,
58065 &data);
58066 if (!inode) {
58067diff --git a/fs/aio.c b/fs/aio.c
58068index c428871..3f3041b 100644
58069--- a/fs/aio.c
58070+++ b/fs/aio.c
58071@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58072 size += sizeof(struct io_event) * nr_events;
58073
58074 nr_pages = PFN_UP(size);
58075- if (nr_pages < 0)
58076+ if (nr_pages <= 0)
58077 return -EINVAL;
58078
58079 file = aio_private_file(ctx, nr_pages);
58080diff --git a/fs/attr.c b/fs/attr.c
58081index 6530ced..4a827e2 100644
58082--- a/fs/attr.c
58083+++ b/fs/attr.c
58084@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58085 unsigned long limit;
58086
58087 limit = rlimit(RLIMIT_FSIZE);
58088+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58089 if (limit != RLIM_INFINITY && offset > limit)
58090 goto out_sig;
58091 if (offset > inode->i_sb->s_maxbytes)
58092diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
58093index aaf96cb..ac7d921 100644
58094--- a/fs/autofs4/dev-ioctl.c
58095+++ b/fs/autofs4/dev-ioctl.c
58096@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
58097 */
58098 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
58099 {
58100- struct autofs_dev_ioctl tmp;
58101+ struct autofs_dev_ioctl tmp, *res;
58102
58103 if (copy_from_user(&tmp, in, sizeof(tmp)))
58104 return ERR_PTR(-EFAULT);
58105@@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
58106 if (tmp.size > (PATH_MAX + sizeof(tmp)))
58107 return ERR_PTR(-ENAMETOOLONG);
58108
58109- return memdup_user(in, tmp.size);
58110+ res = memdup_user(in, tmp.size);
58111+ if (!IS_ERR(res))
58112+ res->size = tmp.size;
58113+
58114+ return res;
58115 }
58116
58117 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
58118diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58119index 116fd38..c04182da 100644
58120--- a/fs/autofs4/waitq.c
58121+++ b/fs/autofs4/waitq.c
58122@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58123 {
58124 unsigned long sigpipe, flags;
58125 mm_segment_t fs;
58126- const char *data = (const char *)addr;
58127+ const char __user *data = (const char __force_user *)addr;
58128 ssize_t wr = 0;
58129
58130 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58131@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58132 return 1;
58133 }
58134
58135+#ifdef CONFIG_GRKERNSEC_HIDESYM
58136+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58137+#endif
58138+
58139 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58140 enum autofs_notify notify)
58141 {
58142@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58143
58144 /* If this is a direct mount request create a dummy name */
58145 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58146+#ifdef CONFIG_GRKERNSEC_HIDESYM
58147+ /* this name does get written to userland via autofs4_write() */
58148+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58149+#else
58150 qstr.len = sprintf(name, "%p", dentry);
58151+#endif
58152 else {
58153 qstr.len = autofs4_getpath(sbi, dentry, &name);
58154 if (!qstr.len) {
58155diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58156index 2722387..56059b5 100644
58157--- a/fs/befs/endian.h
58158+++ b/fs/befs/endian.h
58159@@ -11,7 +11,7 @@
58160
58161 #include <asm/byteorder.h>
58162
58163-static inline u64
58164+static inline u64 __intentional_overflow(-1)
58165 fs64_to_cpu(const struct super_block *sb, fs64 n)
58166 {
58167 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58168@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58169 return (__force fs64)cpu_to_be64(n);
58170 }
58171
58172-static inline u32
58173+static inline u32 __intentional_overflow(-1)
58174 fs32_to_cpu(const struct super_block *sb, fs32 n)
58175 {
58176 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58177@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58178 return (__force fs32)cpu_to_be32(n);
58179 }
58180
58181-static inline u16
58182+static inline u16 __intentional_overflow(-1)
58183 fs16_to_cpu(const struct super_block *sb, fs16 n)
58184 {
58185 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58186diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58187index 4c55668..eeae150 100644
58188--- a/fs/binfmt_aout.c
58189+++ b/fs/binfmt_aout.c
58190@@ -16,6 +16,7 @@
58191 #include <linux/string.h>
58192 #include <linux/fs.h>
58193 #include <linux/file.h>
58194+#include <linux/security.h>
58195 #include <linux/stat.h>
58196 #include <linux/fcntl.h>
58197 #include <linux/ptrace.h>
58198@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58199 #endif
58200 # define START_STACK(u) ((void __user *)u.start_stack)
58201
58202+ memset(&dump, 0, sizeof(dump));
58203+
58204 fs = get_fs();
58205 set_fs(KERNEL_DS);
58206 has_dumped = 1;
58207@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58208
58209 /* If the size of the dump file exceeds the rlimit, then see what would happen
58210 if we wrote the stack, but not the data area. */
58211+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58212 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58213 dump.u_dsize = 0;
58214
58215 /* Make sure we have enough room to write the stack and data areas. */
58216+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58217 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58218 dump.u_ssize = 0;
58219
58220@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58221 rlim = rlimit(RLIMIT_DATA);
58222 if (rlim >= RLIM_INFINITY)
58223 rlim = ~0;
58224+
58225+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58226 if (ex.a_data + ex.a_bss > rlim)
58227 return -ENOMEM;
58228
58229@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58230
58231 install_exec_creds(bprm);
58232
58233+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58234+ current->mm->pax_flags = 0UL;
58235+#endif
58236+
58237+#ifdef CONFIG_PAX_PAGEEXEC
58238+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58239+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58240+
58241+#ifdef CONFIG_PAX_EMUTRAMP
58242+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58243+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58244+#endif
58245+
58246+#ifdef CONFIG_PAX_MPROTECT
58247+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58248+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58249+#endif
58250+
58251+ }
58252+#endif
58253+
58254 if (N_MAGIC(ex) == OMAGIC) {
58255 unsigned long text_addr, map_size;
58256 loff_t pos;
58257@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58258 return error;
58259
58260 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58261- PROT_READ | PROT_WRITE | PROT_EXEC,
58262+ PROT_READ | PROT_WRITE,
58263 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58264 fd_offset + ex.a_text);
58265 if (error != N_DATADDR(ex))
58266diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58267index 995986b..dcc4ef2 100644
58268--- a/fs/binfmt_elf.c
58269+++ b/fs/binfmt_elf.c
58270@@ -34,6 +34,7 @@
58271 #include <linux/utsname.h>
58272 #include <linux/coredump.h>
58273 #include <linux/sched.h>
58274+#include <linux/xattr.h>
58275 #include <asm/uaccess.h>
58276 #include <asm/param.h>
58277 #include <asm/page.h>
58278@@ -47,7 +48,7 @@
58279
58280 static int load_elf_binary(struct linux_binprm *bprm);
58281 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58282- int, int, unsigned long);
58283+ int, int, unsigned long) __intentional_overflow(-1);
58284
58285 #ifdef CONFIG_USELIB
58286 static int load_elf_library(struct file *);
58287@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58288 #define elf_core_dump NULL
58289 #endif
58290
58291+#ifdef CONFIG_PAX_MPROTECT
58292+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58293+#endif
58294+
58295+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58296+static void elf_handle_mmap(struct file *file);
58297+#endif
58298+
58299 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58300 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58301 #else
58302@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58303 .load_binary = load_elf_binary,
58304 .load_shlib = load_elf_library,
58305 .core_dump = elf_core_dump,
58306+
58307+#ifdef CONFIG_PAX_MPROTECT
58308+ .handle_mprotect= elf_handle_mprotect,
58309+#endif
58310+
58311+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58312+ .handle_mmap = elf_handle_mmap,
58313+#endif
58314+
58315 .min_coredump = ELF_EXEC_PAGESIZE,
58316 };
58317
58318@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58319
58320 static int set_brk(unsigned long start, unsigned long end)
58321 {
58322+ unsigned long e = end;
58323+
58324 start = ELF_PAGEALIGN(start);
58325 end = ELF_PAGEALIGN(end);
58326 if (end > start) {
58327@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58328 if (BAD_ADDR(addr))
58329 return addr;
58330 }
58331- current->mm->start_brk = current->mm->brk = end;
58332+ current->mm->start_brk = current->mm->brk = e;
58333 return 0;
58334 }
58335
58336@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58337 elf_addr_t __user *u_rand_bytes;
58338 const char *k_platform = ELF_PLATFORM;
58339 const char *k_base_platform = ELF_BASE_PLATFORM;
58340- unsigned char k_rand_bytes[16];
58341+ u32 k_rand_bytes[4];
58342 int items;
58343 elf_addr_t *elf_info;
58344 int ei_index = 0;
58345 const struct cred *cred = current_cred();
58346 struct vm_area_struct *vma;
58347+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58348
58349 /*
58350 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58351@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58352 * Generate 16 random bytes for userspace PRNG seeding.
58353 */
58354 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58355- u_rand_bytes = (elf_addr_t __user *)
58356- STACK_ALLOC(p, sizeof(k_rand_bytes));
58357+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58358+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58359+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58360+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58361+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58362+ u_rand_bytes = (elf_addr_t __user *) p;
58363 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58364 return -EFAULT;
58365
58366@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58367 return -EFAULT;
58368 current->mm->env_end = p;
58369
58370+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58371+
58372 /* Put the elf_info on the stack in the right place. */
58373 sp = (elf_addr_t __user *)envp + 1;
58374- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58375+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58376 return -EFAULT;
58377 return 0;
58378 }
58379@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58380 an ELF header */
58381
58382 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58383- struct file *interpreter, unsigned long *interp_map_addr,
58384+ struct file *interpreter,
58385 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58386 {
58387 struct elf_phdr *eppnt;
58388- unsigned long load_addr = 0;
58389+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58390 int load_addr_set = 0;
58391 unsigned long last_bss = 0, elf_bss = 0;
58392- unsigned long error = ~0UL;
58393+ unsigned long error = -EINVAL;
58394 unsigned long total_size;
58395 int i;
58396
58397@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58398 goto out;
58399 }
58400
58401+#ifdef CONFIG_PAX_SEGMEXEC
58402+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58403+ pax_task_size = SEGMEXEC_TASK_SIZE;
58404+#endif
58405+
58406 eppnt = interp_elf_phdata;
58407 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58408 if (eppnt->p_type == PT_LOAD) {
58409@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58410 map_addr = elf_map(interpreter, load_addr + vaddr,
58411 eppnt, elf_prot, elf_type, total_size);
58412 total_size = 0;
58413- if (!*interp_map_addr)
58414- *interp_map_addr = map_addr;
58415 error = map_addr;
58416 if (BAD_ADDR(map_addr))
58417 goto out;
58418@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58419 k = load_addr + eppnt->p_vaddr;
58420 if (BAD_ADDR(k) ||
58421 eppnt->p_filesz > eppnt->p_memsz ||
58422- eppnt->p_memsz > TASK_SIZE ||
58423- TASK_SIZE - eppnt->p_memsz < k) {
58424+ eppnt->p_memsz > pax_task_size ||
58425+ pax_task_size - eppnt->p_memsz < k) {
58426 error = -ENOMEM;
58427 goto out;
58428 }
58429@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58430 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58431
58432 /* Map the last of the bss segment */
58433- error = vm_brk(elf_bss, last_bss - elf_bss);
58434- if (BAD_ADDR(error))
58435- goto out;
58436+ if (last_bss > elf_bss) {
58437+ error = vm_brk(elf_bss, last_bss - elf_bss);
58438+ if (BAD_ADDR(error))
58439+ goto out;
58440+ }
58441 }
58442
58443 error = load_addr;
58444@@ -634,6 +666,336 @@ out:
58445 return error;
58446 }
58447
58448+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58449+#ifdef CONFIG_PAX_SOFTMODE
58450+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58451+{
58452+ unsigned long pax_flags = 0UL;
58453+
58454+#ifdef CONFIG_PAX_PAGEEXEC
58455+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58456+ pax_flags |= MF_PAX_PAGEEXEC;
58457+#endif
58458+
58459+#ifdef CONFIG_PAX_SEGMEXEC
58460+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58461+ pax_flags |= MF_PAX_SEGMEXEC;
58462+#endif
58463+
58464+#ifdef CONFIG_PAX_EMUTRAMP
58465+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58466+ pax_flags |= MF_PAX_EMUTRAMP;
58467+#endif
58468+
58469+#ifdef CONFIG_PAX_MPROTECT
58470+ if (elf_phdata->p_flags & PF_MPROTECT)
58471+ pax_flags |= MF_PAX_MPROTECT;
58472+#endif
58473+
58474+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58475+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58476+ pax_flags |= MF_PAX_RANDMMAP;
58477+#endif
58478+
58479+ return pax_flags;
58480+}
58481+#endif
58482+
58483+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58484+{
58485+ unsigned long pax_flags = 0UL;
58486+
58487+#ifdef CONFIG_PAX_PAGEEXEC
58488+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58489+ pax_flags |= MF_PAX_PAGEEXEC;
58490+#endif
58491+
58492+#ifdef CONFIG_PAX_SEGMEXEC
58493+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58494+ pax_flags |= MF_PAX_SEGMEXEC;
58495+#endif
58496+
58497+#ifdef CONFIG_PAX_EMUTRAMP
58498+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58499+ pax_flags |= MF_PAX_EMUTRAMP;
58500+#endif
58501+
58502+#ifdef CONFIG_PAX_MPROTECT
58503+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58504+ pax_flags |= MF_PAX_MPROTECT;
58505+#endif
58506+
58507+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58508+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58509+ pax_flags |= MF_PAX_RANDMMAP;
58510+#endif
58511+
58512+ return pax_flags;
58513+}
58514+#endif
58515+
58516+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58517+#ifdef CONFIG_PAX_SOFTMODE
58518+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58519+{
58520+ unsigned long pax_flags = 0UL;
58521+
58522+#ifdef CONFIG_PAX_PAGEEXEC
58523+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58524+ pax_flags |= MF_PAX_PAGEEXEC;
58525+#endif
58526+
58527+#ifdef CONFIG_PAX_SEGMEXEC
58528+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58529+ pax_flags |= MF_PAX_SEGMEXEC;
58530+#endif
58531+
58532+#ifdef CONFIG_PAX_EMUTRAMP
58533+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58534+ pax_flags |= MF_PAX_EMUTRAMP;
58535+#endif
58536+
58537+#ifdef CONFIG_PAX_MPROTECT
58538+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58539+ pax_flags |= MF_PAX_MPROTECT;
58540+#endif
58541+
58542+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58543+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58544+ pax_flags |= MF_PAX_RANDMMAP;
58545+#endif
58546+
58547+ return pax_flags;
58548+}
58549+#endif
58550+
58551+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58552+{
58553+ unsigned long pax_flags = 0UL;
58554+
58555+#ifdef CONFIG_PAX_PAGEEXEC
58556+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58557+ pax_flags |= MF_PAX_PAGEEXEC;
58558+#endif
58559+
58560+#ifdef CONFIG_PAX_SEGMEXEC
58561+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58562+ pax_flags |= MF_PAX_SEGMEXEC;
58563+#endif
58564+
58565+#ifdef CONFIG_PAX_EMUTRAMP
58566+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58567+ pax_flags |= MF_PAX_EMUTRAMP;
58568+#endif
58569+
58570+#ifdef CONFIG_PAX_MPROTECT
58571+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58572+ pax_flags |= MF_PAX_MPROTECT;
58573+#endif
58574+
58575+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58576+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58577+ pax_flags |= MF_PAX_RANDMMAP;
58578+#endif
58579+
58580+ return pax_flags;
58581+}
58582+#endif
58583+
58584+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58585+static unsigned long pax_parse_defaults(void)
58586+{
58587+ unsigned long pax_flags = 0UL;
58588+
58589+#ifdef CONFIG_PAX_SOFTMODE
58590+ if (pax_softmode)
58591+ return pax_flags;
58592+#endif
58593+
58594+#ifdef CONFIG_PAX_PAGEEXEC
58595+ pax_flags |= MF_PAX_PAGEEXEC;
58596+#endif
58597+
58598+#ifdef CONFIG_PAX_SEGMEXEC
58599+ pax_flags |= MF_PAX_SEGMEXEC;
58600+#endif
58601+
58602+#ifdef CONFIG_PAX_MPROTECT
58603+ pax_flags |= MF_PAX_MPROTECT;
58604+#endif
58605+
58606+#ifdef CONFIG_PAX_RANDMMAP
58607+ if (randomize_va_space)
58608+ pax_flags |= MF_PAX_RANDMMAP;
58609+#endif
58610+
58611+ return pax_flags;
58612+}
58613+
58614+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58615+{
58616+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58617+
58618+#ifdef CONFIG_PAX_EI_PAX
58619+
58620+#ifdef CONFIG_PAX_SOFTMODE
58621+ if (pax_softmode)
58622+ return pax_flags;
58623+#endif
58624+
58625+ pax_flags = 0UL;
58626+
58627+#ifdef CONFIG_PAX_PAGEEXEC
58628+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58629+ pax_flags |= MF_PAX_PAGEEXEC;
58630+#endif
58631+
58632+#ifdef CONFIG_PAX_SEGMEXEC
58633+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58634+ pax_flags |= MF_PAX_SEGMEXEC;
58635+#endif
58636+
58637+#ifdef CONFIG_PAX_EMUTRAMP
58638+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58639+ pax_flags |= MF_PAX_EMUTRAMP;
58640+#endif
58641+
58642+#ifdef CONFIG_PAX_MPROTECT
58643+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58644+ pax_flags |= MF_PAX_MPROTECT;
58645+#endif
58646+
58647+#ifdef CONFIG_PAX_ASLR
58648+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58649+ pax_flags |= MF_PAX_RANDMMAP;
58650+#endif
58651+
58652+#endif
58653+
58654+ return pax_flags;
58655+
58656+}
58657+
58658+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58659+{
58660+
58661+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58662+ unsigned long i;
58663+
58664+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58665+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58666+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58667+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58668+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58669+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58670+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
58671+ return PAX_PARSE_FLAGS_FALLBACK;
58672+
58673+#ifdef CONFIG_PAX_SOFTMODE
58674+ if (pax_softmode)
58675+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58676+ else
58677+#endif
58678+
58679+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58680+ break;
58681+ }
58682+#endif
58683+
58684+ return PAX_PARSE_FLAGS_FALLBACK;
58685+}
58686+
58687+static unsigned long pax_parse_xattr_pax(struct file * const file)
58688+{
58689+
58690+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58691+ ssize_t xattr_size, i;
58692+ unsigned char xattr_value[sizeof("pemrs") - 1];
58693+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
58694+
58695+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
58696+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
58697+ return PAX_PARSE_FLAGS_FALLBACK;
58698+
58699+ for (i = 0; i < xattr_size; i++)
58700+ switch (xattr_value[i]) {
58701+ default:
58702+ return PAX_PARSE_FLAGS_FALLBACK;
58703+
58704+#define parse_flag(option1, option2, flag) \
58705+ case option1: \
58706+ if (pax_flags_hardmode & MF_PAX_##flag) \
58707+ return PAX_PARSE_FLAGS_FALLBACK;\
58708+ pax_flags_hardmode |= MF_PAX_##flag; \
58709+ break; \
58710+ case option2: \
58711+ if (pax_flags_softmode & MF_PAX_##flag) \
58712+ return PAX_PARSE_FLAGS_FALLBACK;\
58713+ pax_flags_softmode |= MF_PAX_##flag; \
58714+ break;
58715+
58716+ parse_flag('p', 'P', PAGEEXEC);
58717+ parse_flag('e', 'E', EMUTRAMP);
58718+ parse_flag('m', 'M', MPROTECT);
58719+ parse_flag('r', 'R', RANDMMAP);
58720+ parse_flag('s', 'S', SEGMEXEC);
58721+
58722+#undef parse_flag
58723+ }
58724+
58725+ if (pax_flags_hardmode & pax_flags_softmode)
58726+ return PAX_PARSE_FLAGS_FALLBACK;
58727+
58728+#ifdef CONFIG_PAX_SOFTMODE
58729+ if (pax_softmode)
58730+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
58731+ else
58732+#endif
58733+
58734+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
58735+#else
58736+ return PAX_PARSE_FLAGS_FALLBACK;
58737+#endif
58738+
58739+}
58740+
58741+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
58742+{
58743+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
58744+
58745+ pax_flags = pax_parse_defaults();
58746+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
58747+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
58748+ xattr_pax_flags = pax_parse_xattr_pax(file);
58749+
58750+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58751+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58752+ pt_pax_flags != xattr_pax_flags)
58753+ return -EINVAL;
58754+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58755+ pax_flags = xattr_pax_flags;
58756+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58757+ pax_flags = pt_pax_flags;
58758+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58759+ pax_flags = ei_pax_flags;
58760+
58761+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
58762+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58763+ if ((__supported_pte_mask & _PAGE_NX))
58764+ pax_flags &= ~MF_PAX_SEGMEXEC;
58765+ else
58766+ pax_flags &= ~MF_PAX_PAGEEXEC;
58767+ }
58768+#endif
58769+
58770+ if (0 > pax_check_flags(&pax_flags))
58771+ return -EINVAL;
58772+
58773+ current->mm->pax_flags = pax_flags;
58774+ return 0;
58775+}
58776+#endif
58777+
58778 /*
58779 * These are the functions used to load ELF style executables and shared
58780 * libraries. There is no binary dependent code anywhere else.
58781@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58782 {
58783 unsigned long random_variable = 0;
58784
58785+#ifdef CONFIG_PAX_RANDUSTACK
58786+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
58787+ return stack_top - current->mm->delta_stack;
58788+#endif
58789+
58790 if ((current->flags & PF_RANDOMIZE) &&
58791 !(current->personality & ADDR_NO_RANDOMIZE)) {
58792 random_variable = (unsigned long) get_random_int();
58793@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58794 unsigned long load_addr = 0, load_bias = 0;
58795 int load_addr_set = 0;
58796 char * elf_interpreter = NULL;
58797- unsigned long error;
58798+ unsigned long error = 0;
58799 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
58800 unsigned long elf_bss, elf_brk;
58801 int retval, i;
58802@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58803 struct elfhdr interp_elf_ex;
58804 } *loc;
58805 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
58806+ unsigned long pax_task_size;
58807
58808 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
58809 if (!loc) {
58810@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
58811 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
58812 may depend on the personality. */
58813 SET_PERSONALITY2(loc->elf_ex, &arch_state);
58814+
58815+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58816+ current->mm->pax_flags = 0UL;
58817+#endif
58818+
58819+#ifdef CONFIG_PAX_DLRESOLVE
58820+ current->mm->call_dl_resolve = 0UL;
58821+#endif
58822+
58823+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
58824+ current->mm->call_syscall = 0UL;
58825+#endif
58826+
58827+#ifdef CONFIG_PAX_ASLR
58828+ current->mm->delta_mmap = 0UL;
58829+ current->mm->delta_stack = 0UL;
58830+#endif
58831+
58832+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58833+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58834+ send_sig(SIGKILL, current, 0);
58835+ goto out_free_dentry;
58836+ }
58837+#endif
58838+
58839+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58840+ pax_set_initial_flags(bprm);
58841+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58842+ if (pax_set_initial_flags_func)
58843+ (pax_set_initial_flags_func)(bprm);
58844+#endif
58845+
58846+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58847+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58848+ current->mm->context.user_cs_limit = PAGE_SIZE;
58849+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
58850+ }
58851+#endif
58852+
58853+#ifdef CONFIG_PAX_SEGMEXEC
58854+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
58855+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
58856+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
58857+ pax_task_size = SEGMEXEC_TASK_SIZE;
58858+ current->mm->def_flags |= VM_NOHUGEPAGE;
58859+ } else
58860+#endif
58861+
58862+ pax_task_size = TASK_SIZE;
58863+
58864+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
58865+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58866+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
58867+ put_cpu();
58868+ }
58869+#endif
58870+
58871+#ifdef CONFIG_PAX_ASLR
58872+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58873+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
58874+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
58875+ }
58876+#endif
58877+
58878+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58879+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58880+ executable_stack = EXSTACK_DISABLE_X;
58881+ current->personality &= ~READ_IMPLIES_EXEC;
58882+ } else
58883+#endif
58884+
58885 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
58886 current->personality |= READ_IMPLIES_EXEC;
58887
58888@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
58889 #else
58890 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
58891 #endif
58892+
58893+#ifdef CONFIG_PAX_RANDMMAP
58894+ /* PaX: randomize base address at the default exe base if requested */
58895+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
58896+#ifdef CONFIG_SPARC64
58897+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
58898+#else
58899+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
58900+#endif
58901+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
58902+ elf_flags |= MAP_FIXED;
58903+ }
58904+#endif
58905+
58906 }
58907
58908 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
58909@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
58910 * allowed task size. Note that p_filesz must always be
58911 * <= p_memsz so it is only necessary to check p_memsz.
58912 */
58913- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58914- elf_ppnt->p_memsz > TASK_SIZE ||
58915- TASK_SIZE - elf_ppnt->p_memsz < k) {
58916+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58917+ elf_ppnt->p_memsz > pax_task_size ||
58918+ pax_task_size - elf_ppnt->p_memsz < k) {
58919 /* set_brk can never work. Avoid overflows. */
58920 retval = -EINVAL;
58921 goto out_free_dentry;
58922@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
58923 if (retval)
58924 goto out_free_dentry;
58925 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
58926- retval = -EFAULT; /* Nobody gets to see this, but.. */
58927- goto out_free_dentry;
58928+ /*
58929+ * This bss-zeroing can fail if the ELF
58930+ * file specifies odd protections. So
58931+ * we don't check the return value
58932+ */
58933 }
58934
58935+#ifdef CONFIG_PAX_RANDMMAP
58936+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58937+ unsigned long start, size, flags;
58938+ vm_flags_t vm_flags;
58939+
58940+ start = ELF_PAGEALIGN(elf_brk);
58941+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
58942+ flags = MAP_FIXED | MAP_PRIVATE;
58943+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
58944+
58945+ down_write(&current->mm->mmap_sem);
58946+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
58947+ retval = -ENOMEM;
58948+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
58949+// if (current->personality & ADDR_NO_RANDOMIZE)
58950+// vm_flags |= VM_READ | VM_MAYREAD;
58951+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
58952+ retval = IS_ERR_VALUE(start) ? start : 0;
58953+ }
58954+ up_write(&current->mm->mmap_sem);
58955+ if (retval == 0)
58956+ retval = set_brk(start + size, start + size + PAGE_SIZE);
58957+ if (retval < 0)
58958+ goto out_free_dentry;
58959+ }
58960+#endif
58961+
58962 if (elf_interpreter) {
58963- unsigned long interp_map_addr = 0;
58964-
58965 elf_entry = load_elf_interp(&loc->interp_elf_ex,
58966 interpreter,
58967- &interp_map_addr,
58968 load_bias, interp_elf_phdata);
58969 if (!IS_ERR((void *)elf_entry)) {
58970 /*
58971@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
58972 * Decide what to dump of a segment, part, all or none.
58973 */
58974 static unsigned long vma_dump_size(struct vm_area_struct *vma,
58975- unsigned long mm_flags)
58976+ unsigned long mm_flags, long signr)
58977 {
58978 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
58979
58980@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58981 if (vma->vm_file == NULL)
58982 return 0;
58983
58984- if (FILTER(MAPPED_PRIVATE))
58985+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
58986 goto whole;
58987
58988 /*
58989@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
58990 {
58991 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
58992 int i = 0;
58993- do
58994+ do {
58995 i += 2;
58996- while (auxv[i - 2] != AT_NULL);
58997+ } while (auxv[i - 2] != AT_NULL);
58998 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
58999 }
59000
59001@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
59002 {
59003 mm_segment_t old_fs = get_fs();
59004 set_fs(KERNEL_DS);
59005- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
59006+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
59007 set_fs(old_fs);
59008 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
59009 }
59010@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59011 vma = next_vma(vma, gate_vma)) {
59012 unsigned long dump_size;
59013
59014- dump_size = vma_dump_size(vma, cprm->mm_flags);
59015+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59016 vma_filesz[i++] = dump_size;
59017 vma_data_size += dump_size;
59018 }
59019@@ -2314,6 +2794,167 @@ out:
59020
59021 #endif /* CONFIG_ELF_CORE */
59022
59023+#ifdef CONFIG_PAX_MPROTECT
59024+/* PaX: non-PIC ELF libraries need relocations on their executable segments
59025+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
59026+ * we'll remove VM_MAYWRITE for good on RELRO segments.
59027+ *
59028+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
59029+ * basis because we want to allow the common case and not the special ones.
59030+ */
59031+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
59032+{
59033+ struct elfhdr elf_h;
59034+ struct elf_phdr elf_p;
59035+ unsigned long i;
59036+ unsigned long oldflags;
59037+ bool is_textrel_rw, is_textrel_rx, is_relro;
59038+
59039+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
59040+ return;
59041+
59042+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
59043+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
59044+
59045+#ifdef CONFIG_PAX_ELFRELOCS
59046+ /* possible TEXTREL */
59047+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
59048+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
59049+#else
59050+ is_textrel_rw = false;
59051+ is_textrel_rx = false;
59052+#endif
59053+
59054+ /* possible RELRO */
59055+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
59056+
59057+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
59058+ return;
59059+
59060+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59061+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59062+
59063+#ifdef CONFIG_PAX_ETEXECRELOCS
59064+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59065+#else
59066+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
59067+#endif
59068+
59069+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59070+ !elf_check_arch(&elf_h) ||
59071+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59072+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59073+ return;
59074+
59075+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59076+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59077+ return;
59078+ switch (elf_p.p_type) {
59079+ case PT_DYNAMIC:
59080+ if (!is_textrel_rw && !is_textrel_rx)
59081+ continue;
59082+ i = 0UL;
59083+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59084+ elf_dyn dyn;
59085+
59086+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59087+ break;
59088+ if (dyn.d_tag == DT_NULL)
59089+ break;
59090+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59091+ gr_log_textrel(vma);
59092+ if (is_textrel_rw)
59093+ vma->vm_flags |= VM_MAYWRITE;
59094+ else
59095+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59096+ vma->vm_flags &= ~VM_MAYWRITE;
59097+ break;
59098+ }
59099+ i++;
59100+ }
59101+ is_textrel_rw = false;
59102+ is_textrel_rx = false;
59103+ continue;
59104+
59105+ case PT_GNU_RELRO:
59106+ if (!is_relro)
59107+ continue;
59108+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59109+ vma->vm_flags &= ~VM_MAYWRITE;
59110+ is_relro = false;
59111+ continue;
59112+
59113+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59114+ case PT_PAX_FLAGS: {
59115+ const char *msg_mprotect = "", *msg_emutramp = "";
59116+ char *buffer_lib, *buffer_exe;
59117+
59118+ if (elf_p.p_flags & PF_NOMPROTECT)
59119+ msg_mprotect = "MPROTECT disabled";
59120+
59121+#ifdef CONFIG_PAX_EMUTRAMP
59122+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59123+ msg_emutramp = "EMUTRAMP enabled";
59124+#endif
59125+
59126+ if (!msg_mprotect[0] && !msg_emutramp[0])
59127+ continue;
59128+
59129+ if (!printk_ratelimit())
59130+ continue;
59131+
59132+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59133+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59134+ if (buffer_lib && buffer_exe) {
59135+ char *path_lib, *path_exe;
59136+
59137+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59138+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59139+
59140+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59141+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59142+
59143+ }
59144+ free_page((unsigned long)buffer_exe);
59145+ free_page((unsigned long)buffer_lib);
59146+ continue;
59147+ }
59148+#endif
59149+
59150+ }
59151+ }
59152+}
59153+#endif
59154+
59155+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59156+
59157+extern int grsec_enable_log_rwxmaps;
59158+
59159+static void elf_handle_mmap(struct file *file)
59160+{
59161+ struct elfhdr elf_h;
59162+ struct elf_phdr elf_p;
59163+ unsigned long i;
59164+
59165+ if (!grsec_enable_log_rwxmaps)
59166+ return;
59167+
59168+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59169+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59170+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59171+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59172+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59173+ return;
59174+
59175+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59176+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59177+ return;
59178+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59179+ gr_log_ptgnustack(file);
59180+ }
59181+}
59182+#endif
59183+
59184 static int __init init_elf_binfmt(void)
59185 {
59186 register_binfmt(&elf_format);
59187diff --git a/fs/block_dev.c b/fs/block_dev.c
59188index b48c41b..e070416 100644
59189--- a/fs/block_dev.c
59190+++ b/fs/block_dev.c
59191@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59192 else if (bdev->bd_contains == bdev)
59193 return true; /* is a whole device which isn't held */
59194
59195- else if (whole->bd_holder == bd_may_claim)
59196+ else if (whole->bd_holder == (void *)bd_may_claim)
59197 return true; /* is a partition of a device that is being partitioned */
59198 else if (whole->bd_holder != NULL)
59199 return false; /* is a partition of a held device */
59200diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59201index f54511d..58acdec 100644
59202--- a/fs/btrfs/ctree.c
59203+++ b/fs/btrfs/ctree.c
59204@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59205 free_extent_buffer(buf);
59206 add_root_to_dirty_list(root);
59207 } else {
59208- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59209- parent_start = parent->start;
59210- else
59211+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59212+ if (parent)
59213+ parent_start = parent->start;
59214+ else
59215+ parent_start = 0;
59216+ } else
59217 parent_start = 0;
59218
59219 WARN_ON(trans->transid != btrfs_header_generation(parent));
59220diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59221index de4e70f..b41dc45 100644
59222--- a/fs/btrfs/delayed-inode.c
59223+++ b/fs/btrfs/delayed-inode.c
59224@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59225
59226 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59227 {
59228- int seq = atomic_inc_return(&delayed_root->items_seq);
59229+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59230 if ((atomic_dec_return(&delayed_root->items) <
59231 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59232 waitqueue_active(&delayed_root->wait))
59233@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59234
59235 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59236 {
59237- int val = atomic_read(&delayed_root->items_seq);
59238+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59239
59240 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59241 return 1;
59242@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59243 int seq;
59244 int ret;
59245
59246- seq = atomic_read(&delayed_root->items_seq);
59247+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59248
59249 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59250 if (ret)
59251diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59252index f70119f..ab5894d 100644
59253--- a/fs/btrfs/delayed-inode.h
59254+++ b/fs/btrfs/delayed-inode.h
59255@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59256 */
59257 struct list_head prepare_list;
59258 atomic_t items; /* for delayed items */
59259- atomic_t items_seq; /* for delayed items */
59260+ atomic_unchecked_t items_seq; /* for delayed items */
59261 int nodes; /* for delayed nodes */
59262 wait_queue_head_t wait;
59263 };
59264@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59265 struct btrfs_delayed_root *delayed_root)
59266 {
59267 atomic_set(&delayed_root->items, 0);
59268- atomic_set(&delayed_root->items_seq, 0);
59269+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59270 delayed_root->nodes = 0;
59271 spin_lock_init(&delayed_root->lock);
59272 init_waitqueue_head(&delayed_root->wait);
59273diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
59274index d49fe8a..2e11037 100644
59275--- a/fs/btrfs/ioctl.c
59276+++ b/fs/btrfs/ioctl.c
59277@@ -3925,9 +3925,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
59278 for (i = 0; i < num_types; i++) {
59279 struct btrfs_space_info *tmp;
59280
59281+ /* Don't copy in more than we allocated */
59282 if (!slot_count)
59283 break;
59284
59285+ slot_count--;
59286+
59287 info = NULL;
59288 rcu_read_lock();
59289 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
59290@@ -3949,10 +3952,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
59291 memcpy(dest, &space, sizeof(space));
59292 dest++;
59293 space_args.total_spaces++;
59294- slot_count--;
59295 }
59296- if (!slot_count)
59297- break;
59298 }
59299 up_read(&info->groups_sem);
59300 }
59301diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59302index 6f49b28..483410f 100644
59303--- a/fs/btrfs/super.c
59304+++ b/fs/btrfs/super.c
59305@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59306 function, line, errstr);
59307 return;
59308 }
59309- ACCESS_ONCE(trans->transaction->aborted) = errno;
59310+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59311 /* Wake up anybody who may be waiting on this transaction */
59312 wake_up(&root->fs_info->transaction_wait);
59313 wake_up(&root->fs_info->transaction_blocked_wait);
59314diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59315index 92db3f6..898a561 100644
59316--- a/fs/btrfs/sysfs.c
59317+++ b/fs/btrfs/sysfs.c
59318@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59319 for (set = 0; set < FEAT_MAX; set++) {
59320 int i;
59321 struct attribute *attrs[2];
59322- struct attribute_group agroup = {
59323+ attribute_group_no_const agroup = {
59324 .name = "features",
59325 .attrs = attrs,
59326 };
59327diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59328index 2299bfd..4098e72 100644
59329--- a/fs/btrfs/tests/free-space-tests.c
59330+++ b/fs/btrfs/tests/free-space-tests.c
59331@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59332 * extent entry.
59333 */
59334 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59335- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59336+ pax_open_kernel();
59337+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59338+ pax_close_kernel();
59339
59340 /*
59341 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59342@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59343 if (ret)
59344 return ret;
59345
59346- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59347+ pax_open_kernel();
59348+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59349+ pax_close_kernel();
59350 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59351
59352 return 0;
59353diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
59354index f78e9dc..069ab24 100644
59355--- a/fs/btrfs/tree-log.c
59356+++ b/fs/btrfs/tree-log.c
59357@@ -1010,7 +1010,7 @@ again:
59358 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
59359
59360 while (cur_offset < item_size) {
59361- extref = (struct btrfs_inode_extref *)base + cur_offset;
59362+ extref = (struct btrfs_inode_extref *)(base + cur_offset);
59363
59364 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
59365
59366diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59367index 154990c..d0cf699 100644
59368--- a/fs/btrfs/tree-log.h
59369+++ b/fs/btrfs/tree-log.h
59370@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59371 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59372 struct btrfs_trans_handle *trans)
59373 {
59374- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59375+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59376 }
59377
59378 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59379diff --git a/fs/buffer.c b/fs/buffer.c
59380index 20805db..2e8fc69 100644
59381--- a/fs/buffer.c
59382+++ b/fs/buffer.c
59383@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59384 bh_cachep = kmem_cache_create("buffer_head",
59385 sizeof(struct buffer_head), 0,
59386 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59387- SLAB_MEM_SPREAD),
59388+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59389 NULL);
59390
59391 /*
59392diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59393index fbb08e9..0fda764 100644
59394--- a/fs/cachefiles/bind.c
59395+++ b/fs/cachefiles/bind.c
59396@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59397 args);
59398
59399 /* start by checking things over */
59400- ASSERT(cache->fstop_percent >= 0 &&
59401- cache->fstop_percent < cache->fcull_percent &&
59402+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59403 cache->fcull_percent < cache->frun_percent &&
59404 cache->frun_percent < 100);
59405
59406- ASSERT(cache->bstop_percent >= 0 &&
59407- cache->bstop_percent < cache->bcull_percent &&
59408+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59409 cache->bcull_percent < cache->brun_percent &&
59410 cache->brun_percent < 100);
59411
59412diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59413index ce1b115..4a6852c 100644
59414--- a/fs/cachefiles/daemon.c
59415+++ b/fs/cachefiles/daemon.c
59416@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59417 if (n > buflen)
59418 return -EMSGSIZE;
59419
59420- if (copy_to_user(_buffer, buffer, n) != 0)
59421+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59422 return -EFAULT;
59423
59424 return n;
59425@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59426 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59427 return -EIO;
59428
59429- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59430+ if (datalen > PAGE_SIZE - 1)
59431 return -EOPNOTSUPP;
59432
59433 /* drag the command string into the kernel so we can parse it */
59434@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59435 if (args[0] != '%' || args[1] != '\0')
59436 return -EINVAL;
59437
59438- if (fstop < 0 || fstop >= cache->fcull_percent)
59439+ if (fstop >= cache->fcull_percent)
59440 return cachefiles_daemon_range_error(cache, args);
59441
59442 cache->fstop_percent = fstop;
59443@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59444 if (args[0] != '%' || args[1] != '\0')
59445 return -EINVAL;
59446
59447- if (bstop < 0 || bstop >= cache->bcull_percent)
59448+ if (bstop >= cache->bcull_percent)
59449 return cachefiles_daemon_range_error(cache, args);
59450
59451 cache->bstop_percent = bstop;
59452diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59453index 8c52472..c4e3a69 100644
59454--- a/fs/cachefiles/internal.h
59455+++ b/fs/cachefiles/internal.h
59456@@ -66,7 +66,7 @@ struct cachefiles_cache {
59457 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59458 struct rb_root active_nodes; /* active nodes (can't be culled) */
59459 rwlock_t active_lock; /* lock for active_nodes */
59460- atomic_t gravecounter; /* graveyard uniquifier */
59461+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59462 unsigned frun_percent; /* when to stop culling (% files) */
59463 unsigned fcull_percent; /* when to start culling (% files) */
59464 unsigned fstop_percent; /* when to stop allocating (% files) */
59465@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59466 * proc.c
59467 */
59468 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59469-extern atomic_t cachefiles_lookup_histogram[HZ];
59470-extern atomic_t cachefiles_mkdir_histogram[HZ];
59471-extern atomic_t cachefiles_create_histogram[HZ];
59472+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59473+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59474+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59475
59476 extern int __init cachefiles_proc_init(void);
59477 extern void cachefiles_proc_cleanup(void);
59478 static inline
59479-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59480+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59481 {
59482 unsigned long jif = jiffies - start_jif;
59483 if (jif >= HZ)
59484 jif = HZ - 1;
59485- atomic_inc(&histogram[jif]);
59486+ atomic_inc_unchecked(&histogram[jif]);
59487 }
59488
59489 #else
59490diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59491index 7f8e83f..8951aa4 100644
59492--- a/fs/cachefiles/namei.c
59493+++ b/fs/cachefiles/namei.c
59494@@ -309,7 +309,7 @@ try_again:
59495 /* first step is to make up a grave dentry in the graveyard */
59496 sprintf(nbuffer, "%08x%08x",
59497 (uint32_t) get_seconds(),
59498- (uint32_t) atomic_inc_return(&cache->gravecounter));
59499+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59500
59501 /* do the multiway lock magic */
59502 trap = lock_rename(cache->graveyard, dir);
59503diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59504index eccd339..4c1d995 100644
59505--- a/fs/cachefiles/proc.c
59506+++ b/fs/cachefiles/proc.c
59507@@ -14,9 +14,9 @@
59508 #include <linux/seq_file.h>
59509 #include "internal.h"
59510
59511-atomic_t cachefiles_lookup_histogram[HZ];
59512-atomic_t cachefiles_mkdir_histogram[HZ];
59513-atomic_t cachefiles_create_histogram[HZ];
59514+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59515+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59516+atomic_unchecked_t cachefiles_create_histogram[HZ];
59517
59518 /*
59519 * display the latency histogram
59520@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59521 return 0;
59522 default:
59523 index = (unsigned long) v - 3;
59524- x = atomic_read(&cachefiles_lookup_histogram[index]);
59525- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59526- z = atomic_read(&cachefiles_create_histogram[index]);
59527+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59528+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59529+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59530 if (x == 0 && y == 0 && z == 0)
59531 return 0;
59532
59533diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59534index c241603..56bae60 100644
59535--- a/fs/ceph/dir.c
59536+++ b/fs/ceph/dir.c
59537@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59538 struct dentry *dentry, *last;
59539 struct ceph_dentry_info *di;
59540 int err = 0;
59541+ char d_name[DNAME_INLINE_LEN];
59542+ const unsigned char *name;
59543
59544 /* claim ref on last dentry we returned */
59545 last = fi->dentry;
59546@@ -192,7 +194,12 @@ more:
59547
59548 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59549 dentry, dentry, dentry->d_inode);
59550- if (!dir_emit(ctx, dentry->d_name.name,
59551+ name = dentry->d_name.name;
59552+ if (name == dentry->d_iname) {
59553+ memcpy(d_name, name, dentry->d_name.len);
59554+ name = d_name;
59555+ }
59556+ if (!dir_emit(ctx, name,
59557 dentry->d_name.len,
59558 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59559 dentry->d_inode->i_mode >> 12)) {
59560@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59561 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59562 struct ceph_mds_client *mdsc = fsc->mdsc;
59563 unsigned frag = fpos_frag(ctx->pos);
59564- int off = fpos_off(ctx->pos);
59565+ unsigned int off = fpos_off(ctx->pos);
59566 int err;
59567 u32 ftype;
59568 struct ceph_mds_reply_info_parsed *rinfo;
59569diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59570index 50f06cd..c7eba3e 100644
59571--- a/fs/ceph/super.c
59572+++ b/fs/ceph/super.c
59573@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59574 /*
59575 * construct our own bdi so we can control readahead, etc.
59576 */
59577-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59578+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59579
59580 static int ceph_register_bdi(struct super_block *sb,
59581 struct ceph_fs_client *fsc)
59582@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
59583 default_backing_dev_info.ra_pages;
59584
59585 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59586- atomic_long_inc_return(&bdi_seq));
59587+ atomic_long_inc_return_unchecked(&bdi_seq));
59588 if (!err)
59589 sb->s_bdi = &fsc->backing_dev_info;
59590 return err;
59591diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59592index 7febcf2..62a5721 100644
59593--- a/fs/cifs/cifs_debug.c
59594+++ b/fs/cifs/cifs_debug.c
59595@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59596
59597 if (strtobool(&c, &bv) == 0) {
59598 #ifdef CONFIG_CIFS_STATS2
59599- atomic_set(&totBufAllocCount, 0);
59600- atomic_set(&totSmBufAllocCount, 0);
59601+ atomic_set_unchecked(&totBufAllocCount, 0);
59602+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59603 #endif /* CONFIG_CIFS_STATS2 */
59604 spin_lock(&cifs_tcp_ses_lock);
59605 list_for_each(tmp1, &cifs_tcp_ses_list) {
59606@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59607 tcon = list_entry(tmp3,
59608 struct cifs_tcon,
59609 tcon_list);
59610- atomic_set(&tcon->num_smbs_sent, 0);
59611+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59612 if (server->ops->clear_stats)
59613 server->ops->clear_stats(tcon);
59614 }
59615@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59616 smBufAllocCount.counter, cifs_min_small);
59617 #ifdef CONFIG_CIFS_STATS2
59618 seq_printf(m, "Total Large %d Small %d Allocations\n",
59619- atomic_read(&totBufAllocCount),
59620- atomic_read(&totSmBufAllocCount));
59621+ atomic_read_unchecked(&totBufAllocCount),
59622+ atomic_read_unchecked(&totSmBufAllocCount));
59623 #endif /* CONFIG_CIFS_STATS2 */
59624
59625 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59626@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59627 if (tcon->need_reconnect)
59628 seq_puts(m, "\tDISCONNECTED ");
59629 seq_printf(m, "\nSMBs: %d",
59630- atomic_read(&tcon->num_smbs_sent));
59631+ atomic_read_unchecked(&tcon->num_smbs_sent));
59632 if (server->ops->print_stats)
59633 server->ops->print_stats(m, tcon);
59634 }
59635diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59636index d72fe37..ded5511 100644
59637--- a/fs/cifs/cifsfs.c
59638+++ b/fs/cifs/cifsfs.c
59639@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59640 */
59641 cifs_req_cachep = kmem_cache_create("cifs_request",
59642 CIFSMaxBufSize + max_hdr_size, 0,
59643- SLAB_HWCACHE_ALIGN, NULL);
59644+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59645 if (cifs_req_cachep == NULL)
59646 return -ENOMEM;
59647
59648@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59649 efficient to alloc 1 per page off the slab compared to 17K (5page)
59650 alloc of large cifs buffers even when page debugging is on */
59651 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59652- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59653+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59654 NULL);
59655 if (cifs_sm_req_cachep == NULL) {
59656 mempool_destroy(cifs_req_poolp);
59657@@ -1204,8 +1204,8 @@ init_cifs(void)
59658 atomic_set(&bufAllocCount, 0);
59659 atomic_set(&smBufAllocCount, 0);
59660 #ifdef CONFIG_CIFS_STATS2
59661- atomic_set(&totBufAllocCount, 0);
59662- atomic_set(&totSmBufAllocCount, 0);
59663+ atomic_set_unchecked(&totBufAllocCount, 0);
59664+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59665 #endif /* CONFIG_CIFS_STATS2 */
59666
59667 atomic_set(&midCount, 0);
59668diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59669index 22b289a..bbbba08 100644
59670--- a/fs/cifs/cifsglob.h
59671+++ b/fs/cifs/cifsglob.h
59672@@ -823,35 +823,35 @@ struct cifs_tcon {
59673 __u16 Flags; /* optional support bits */
59674 enum statusEnum tidStatus;
59675 #ifdef CONFIG_CIFS_STATS
59676- atomic_t num_smbs_sent;
59677+ atomic_unchecked_t num_smbs_sent;
59678 union {
59679 struct {
59680- atomic_t num_writes;
59681- atomic_t num_reads;
59682- atomic_t num_flushes;
59683- atomic_t num_oplock_brks;
59684- atomic_t num_opens;
59685- atomic_t num_closes;
59686- atomic_t num_deletes;
59687- atomic_t num_mkdirs;
59688- atomic_t num_posixopens;
59689- atomic_t num_posixmkdirs;
59690- atomic_t num_rmdirs;
59691- atomic_t num_renames;
59692- atomic_t num_t2renames;
59693- atomic_t num_ffirst;
59694- atomic_t num_fnext;
59695- atomic_t num_fclose;
59696- atomic_t num_hardlinks;
59697- atomic_t num_symlinks;
59698- atomic_t num_locks;
59699- atomic_t num_acl_get;
59700- atomic_t num_acl_set;
59701+ atomic_unchecked_t num_writes;
59702+ atomic_unchecked_t num_reads;
59703+ atomic_unchecked_t num_flushes;
59704+ atomic_unchecked_t num_oplock_brks;
59705+ atomic_unchecked_t num_opens;
59706+ atomic_unchecked_t num_closes;
59707+ atomic_unchecked_t num_deletes;
59708+ atomic_unchecked_t num_mkdirs;
59709+ atomic_unchecked_t num_posixopens;
59710+ atomic_unchecked_t num_posixmkdirs;
59711+ atomic_unchecked_t num_rmdirs;
59712+ atomic_unchecked_t num_renames;
59713+ atomic_unchecked_t num_t2renames;
59714+ atomic_unchecked_t num_ffirst;
59715+ atomic_unchecked_t num_fnext;
59716+ atomic_unchecked_t num_fclose;
59717+ atomic_unchecked_t num_hardlinks;
59718+ atomic_unchecked_t num_symlinks;
59719+ atomic_unchecked_t num_locks;
59720+ atomic_unchecked_t num_acl_get;
59721+ atomic_unchecked_t num_acl_set;
59722 } cifs_stats;
59723 #ifdef CONFIG_CIFS_SMB2
59724 struct {
59725- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59726- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59727+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59728+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59729 } smb2_stats;
59730 #endif /* CONFIG_CIFS_SMB2 */
59731 } stats;
59732@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
59733 }
59734
59735 #ifdef CONFIG_CIFS_STATS
59736-#define cifs_stats_inc atomic_inc
59737+#define cifs_stats_inc atomic_inc_unchecked
59738
59739 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
59740 unsigned int bytes)
59741@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
59742 /* Various Debug counters */
59743 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
59744 #ifdef CONFIG_CIFS_STATS2
59745-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
59746-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
59747+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
59748+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
59749 #endif
59750 GLOBAL_EXTERN atomic_t smBufAllocCount;
59751 GLOBAL_EXTERN atomic_t midCount;
59752diff --git a/fs/cifs/file.c b/fs/cifs/file.c
59753index 74f1287..7ef0237 100644
59754--- a/fs/cifs/file.c
59755+++ b/fs/cifs/file.c
59756@@ -2060,10 +2060,14 @@ static int cifs_writepages(struct address_space *mapping,
59757 index = mapping->writeback_index; /* Start from prev offset */
59758 end = -1;
59759 } else {
59760- index = wbc->range_start >> PAGE_CACHE_SHIFT;
59761- end = wbc->range_end >> PAGE_CACHE_SHIFT;
59762- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
59763+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
59764 range_whole = true;
59765+ index = 0;
59766+ end = ULONG_MAX;
59767+ } else {
59768+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
59769+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
59770+ }
59771 scanned = true;
59772 }
59773 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
59774diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
59775index 3379463..3af418a 100644
59776--- a/fs/cifs/misc.c
59777+++ b/fs/cifs/misc.c
59778@@ -170,7 +170,7 @@ cifs_buf_get(void)
59779 memset(ret_buf, 0, buf_size + 3);
59780 atomic_inc(&bufAllocCount);
59781 #ifdef CONFIG_CIFS_STATS2
59782- atomic_inc(&totBufAllocCount);
59783+ atomic_inc_unchecked(&totBufAllocCount);
59784 #endif /* CONFIG_CIFS_STATS2 */
59785 }
59786
59787@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
59788 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
59789 atomic_inc(&smBufAllocCount);
59790 #ifdef CONFIG_CIFS_STATS2
59791- atomic_inc(&totSmBufAllocCount);
59792+ atomic_inc_unchecked(&totSmBufAllocCount);
59793 #endif /* CONFIG_CIFS_STATS2 */
59794
59795 }
59796diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
59797index d297903..1cb7516 100644
59798--- a/fs/cifs/smb1ops.c
59799+++ b/fs/cifs/smb1ops.c
59800@@ -622,27 +622,27 @@ static void
59801 cifs_clear_stats(struct cifs_tcon *tcon)
59802 {
59803 #ifdef CONFIG_CIFS_STATS
59804- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
59805- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
59806- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
59807- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59808- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
59809- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
59810- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59811- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
59812- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
59813- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
59814- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
59815- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
59816- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
59817- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
59818- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
59819- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
59820- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
59821- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
59822- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
59823- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
59824- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
59825+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
59826+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
59827+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
59828+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59829+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
59830+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
59831+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59832+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
59833+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
59834+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
59835+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
59836+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
59837+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
59838+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
59839+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
59840+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
59841+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
59842+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
59843+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
59844+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
59845+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
59846 #endif
59847 }
59848
59849@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59850 {
59851 #ifdef CONFIG_CIFS_STATS
59852 seq_printf(m, " Oplocks breaks: %d",
59853- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
59854+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
59855 seq_printf(m, "\nReads: %d Bytes: %llu",
59856- atomic_read(&tcon->stats.cifs_stats.num_reads),
59857+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
59858 (long long)(tcon->bytes_read));
59859 seq_printf(m, "\nWrites: %d Bytes: %llu",
59860- atomic_read(&tcon->stats.cifs_stats.num_writes),
59861+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
59862 (long long)(tcon->bytes_written));
59863 seq_printf(m, "\nFlushes: %d",
59864- atomic_read(&tcon->stats.cifs_stats.num_flushes));
59865+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
59866 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
59867- atomic_read(&tcon->stats.cifs_stats.num_locks),
59868- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
59869- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
59870+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
59871+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
59872+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
59873 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
59874- atomic_read(&tcon->stats.cifs_stats.num_opens),
59875- atomic_read(&tcon->stats.cifs_stats.num_closes),
59876- atomic_read(&tcon->stats.cifs_stats.num_deletes));
59877+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
59878+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
59879+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
59880 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
59881- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
59882- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
59883+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
59884+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
59885 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
59886- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
59887- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
59888+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
59889+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
59890 seq_printf(m, "\nRenames: %d T2 Renames %d",
59891- atomic_read(&tcon->stats.cifs_stats.num_renames),
59892- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
59893+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
59894+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
59895 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
59896- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
59897- atomic_read(&tcon->stats.cifs_stats.num_fnext),
59898- atomic_read(&tcon->stats.cifs_stats.num_fclose));
59899+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
59900+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
59901+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
59902 #endif
59903 }
59904
59905diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59906index 96b5d40..e5db0c1 100644
59907--- a/fs/cifs/smb2ops.c
59908+++ b/fs/cifs/smb2ops.c
59909@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
59910 #ifdef CONFIG_CIFS_STATS
59911 int i;
59912 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
59913- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59914- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59915+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59916+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59917 }
59918 #endif
59919 }
59920@@ -459,65 +459,65 @@ static void
59921 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59922 {
59923 #ifdef CONFIG_CIFS_STATS
59924- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59925- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59926+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59927+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59928 seq_printf(m, "\nNegotiates: %d sent %d failed",
59929- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
59930- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
59931+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
59932+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
59933 seq_printf(m, "\nSessionSetups: %d sent %d failed",
59934- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
59935- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
59936+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
59937+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
59938 seq_printf(m, "\nLogoffs: %d sent %d failed",
59939- atomic_read(&sent[SMB2_LOGOFF_HE]),
59940- atomic_read(&failed[SMB2_LOGOFF_HE]));
59941+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
59942+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
59943 seq_printf(m, "\nTreeConnects: %d sent %d failed",
59944- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
59945- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
59946+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
59947+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
59948 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
59949- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
59950- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
59951+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
59952+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
59953 seq_printf(m, "\nCreates: %d sent %d failed",
59954- atomic_read(&sent[SMB2_CREATE_HE]),
59955- atomic_read(&failed[SMB2_CREATE_HE]));
59956+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
59957+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
59958 seq_printf(m, "\nCloses: %d sent %d failed",
59959- atomic_read(&sent[SMB2_CLOSE_HE]),
59960- atomic_read(&failed[SMB2_CLOSE_HE]));
59961+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
59962+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
59963 seq_printf(m, "\nFlushes: %d sent %d failed",
59964- atomic_read(&sent[SMB2_FLUSH_HE]),
59965- atomic_read(&failed[SMB2_FLUSH_HE]));
59966+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
59967+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
59968 seq_printf(m, "\nReads: %d sent %d failed",
59969- atomic_read(&sent[SMB2_READ_HE]),
59970- atomic_read(&failed[SMB2_READ_HE]));
59971+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
59972+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
59973 seq_printf(m, "\nWrites: %d sent %d failed",
59974- atomic_read(&sent[SMB2_WRITE_HE]),
59975- atomic_read(&failed[SMB2_WRITE_HE]));
59976+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
59977+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
59978 seq_printf(m, "\nLocks: %d sent %d failed",
59979- atomic_read(&sent[SMB2_LOCK_HE]),
59980- atomic_read(&failed[SMB2_LOCK_HE]));
59981+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
59982+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
59983 seq_printf(m, "\nIOCTLs: %d sent %d failed",
59984- atomic_read(&sent[SMB2_IOCTL_HE]),
59985- atomic_read(&failed[SMB2_IOCTL_HE]));
59986+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
59987+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
59988 seq_printf(m, "\nCancels: %d sent %d failed",
59989- atomic_read(&sent[SMB2_CANCEL_HE]),
59990- atomic_read(&failed[SMB2_CANCEL_HE]));
59991+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
59992+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
59993 seq_printf(m, "\nEchos: %d sent %d failed",
59994- atomic_read(&sent[SMB2_ECHO_HE]),
59995- atomic_read(&failed[SMB2_ECHO_HE]));
59996+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
59997+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
59998 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
59999- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
60000- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
60001+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
60002+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
60003 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
60004- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
60005- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
60006+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
60007+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
60008 seq_printf(m, "\nQueryInfos: %d sent %d failed",
60009- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
60010- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
60011+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
60012+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
60013 seq_printf(m, "\nSetInfos: %d sent %d failed",
60014- atomic_read(&sent[SMB2_SET_INFO_HE]),
60015- atomic_read(&failed[SMB2_SET_INFO_HE]));
60016+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
60017+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
60018 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
60019- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
60020- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
60021+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
60022+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
60023 #endif
60024 }
60025
60026diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
60027index 3417340..b942390 100644
60028--- a/fs/cifs/smb2pdu.c
60029+++ b/fs/cifs/smb2pdu.c
60030@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
60031 default:
60032 cifs_dbg(VFS, "info level %u isn't supported\n",
60033 srch_inf->info_level);
60034- rc = -EINVAL;
60035- goto qdir_exit;
60036+ return -EINVAL;
60037 }
60038
60039 req->FileIndex = cpu_to_le32(index);
60040diff --git a/fs/coda/cache.c b/fs/coda/cache.c
60041index 46ee6f2..89a9e7f 100644
60042--- a/fs/coda/cache.c
60043+++ b/fs/coda/cache.c
60044@@ -24,7 +24,7 @@
60045 #include "coda_linux.h"
60046 #include "coda_cache.h"
60047
60048-static atomic_t permission_epoch = ATOMIC_INIT(0);
60049+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
60050
60051 /* replace or extend an acl cache hit */
60052 void coda_cache_enter(struct inode *inode, int mask)
60053@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
60054 struct coda_inode_info *cii = ITOC(inode);
60055
60056 spin_lock(&cii->c_lock);
60057- cii->c_cached_epoch = atomic_read(&permission_epoch);
60058+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
60059 if (!uid_eq(cii->c_uid, current_fsuid())) {
60060 cii->c_uid = current_fsuid();
60061 cii->c_cached_perm = mask;
60062@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
60063 {
60064 struct coda_inode_info *cii = ITOC(inode);
60065 spin_lock(&cii->c_lock);
60066- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
60067+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
60068 spin_unlock(&cii->c_lock);
60069 }
60070
60071 /* remove all acl caches */
60072 void coda_cache_clear_all(struct super_block *sb)
60073 {
60074- atomic_inc(&permission_epoch);
60075+ atomic_inc_unchecked(&permission_epoch);
60076 }
60077
60078
60079@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
60080 spin_lock(&cii->c_lock);
60081 hit = (mask & cii->c_cached_perm) == mask &&
60082 uid_eq(cii->c_uid, current_fsuid()) &&
60083- cii->c_cached_epoch == atomic_read(&permission_epoch);
60084+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
60085 spin_unlock(&cii->c_lock);
60086
60087 return hit;
60088diff --git a/fs/compat.c b/fs/compat.c
60089index 6fd272d..dd34ba2 100644
60090--- a/fs/compat.c
60091+++ b/fs/compat.c
60092@@ -54,7 +54,7 @@
60093 #include <asm/ioctls.h>
60094 #include "internal.h"
60095
60096-int compat_log = 1;
60097+int compat_log = 0;
60098
60099 int compat_printk(const char *fmt, ...)
60100 {
60101@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60102
60103 set_fs(KERNEL_DS);
60104 /* The __user pointer cast is valid because of the set_fs() */
60105- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60106+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60107 set_fs(oldfs);
60108 /* truncating is ok because it's a user address */
60109 if (!ret)
60110@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60111 goto out;
60112
60113 ret = -EINVAL;
60114- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60115+ if (nr_segs > UIO_MAXIOV)
60116 goto out;
60117 if (nr_segs > fast_segs) {
60118 ret = -ENOMEM;
60119@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60120 struct compat_readdir_callback {
60121 struct dir_context ctx;
60122 struct compat_old_linux_dirent __user *dirent;
60123+ struct file * file;
60124 int result;
60125 };
60126
60127@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60128 buf->result = -EOVERFLOW;
60129 return -EOVERFLOW;
60130 }
60131+
60132+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60133+ return 0;
60134+
60135 buf->result++;
60136 dirent = buf->dirent;
60137 if (!access_ok(VERIFY_WRITE, dirent,
60138@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60139 if (!f.file)
60140 return -EBADF;
60141
60142+ buf.file = f.file;
60143 error = iterate_dir(f.file, &buf.ctx);
60144 if (buf.result)
60145 error = buf.result;
60146@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60147 struct dir_context ctx;
60148 struct compat_linux_dirent __user *current_dir;
60149 struct compat_linux_dirent __user *previous;
60150+ struct file * file;
60151 int count;
60152 int error;
60153 };
60154@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60155 buf->error = -EOVERFLOW;
60156 return -EOVERFLOW;
60157 }
60158+
60159+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60160+ return 0;
60161+
60162 dirent = buf->previous;
60163 if (dirent) {
60164 if (__put_user(offset, &dirent->d_off))
60165@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60166 if (!f.file)
60167 return -EBADF;
60168
60169+ buf.file = f.file;
60170 error = iterate_dir(f.file, &buf.ctx);
60171 if (error >= 0)
60172 error = buf.error;
60173@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60174 struct dir_context ctx;
60175 struct linux_dirent64 __user *current_dir;
60176 struct linux_dirent64 __user *previous;
60177+ struct file * file;
60178 int count;
60179 int error;
60180 };
60181@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60182 buf->error = -EINVAL; /* only used if we fail.. */
60183 if (reclen > buf->count)
60184 return -EINVAL;
60185+
60186+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60187+ return 0;
60188+
60189 dirent = buf->previous;
60190
60191 if (dirent) {
60192@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60193 if (!f.file)
60194 return -EBADF;
60195
60196+ buf.file = f.file;
60197 error = iterate_dir(f.file, &buf.ctx);
60198 if (error >= 0)
60199 error = buf.error;
60200diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60201index 4d24d17..4f8c09e 100644
60202--- a/fs/compat_binfmt_elf.c
60203+++ b/fs/compat_binfmt_elf.c
60204@@ -30,11 +30,13 @@
60205 #undef elf_phdr
60206 #undef elf_shdr
60207 #undef elf_note
60208+#undef elf_dyn
60209 #undef elf_addr_t
60210 #define elfhdr elf32_hdr
60211 #define elf_phdr elf32_phdr
60212 #define elf_shdr elf32_shdr
60213 #define elf_note elf32_note
60214+#define elf_dyn Elf32_Dyn
60215 #define elf_addr_t Elf32_Addr
60216
60217 /*
60218diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60219index afec645..9c65620 100644
60220--- a/fs/compat_ioctl.c
60221+++ b/fs/compat_ioctl.c
60222@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60223 return -EFAULT;
60224 if (__get_user(udata, &ss32->iomem_base))
60225 return -EFAULT;
60226- ss.iomem_base = compat_ptr(udata);
60227+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60228 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60229 __get_user(ss.port_high, &ss32->port_high))
60230 return -EFAULT;
60231@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60232 for (i = 0; i < nmsgs; i++) {
60233 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60234 return -EFAULT;
60235- if (get_user(datap, &umsgs[i].buf) ||
60236- put_user(compat_ptr(datap), &tmsgs[i].buf))
60237+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60238+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60239 return -EFAULT;
60240 }
60241 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60242@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60243 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60244 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60245 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60246- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60247+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60248 return -EFAULT;
60249
60250 return ioctl_preallocate(file, p);
60251@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60252 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60253 {
60254 unsigned int a, b;
60255- a = *(unsigned int *)p;
60256- b = *(unsigned int *)q;
60257+ a = *(const unsigned int *)p;
60258+ b = *(const unsigned int *)q;
60259 if (a > b)
60260 return 1;
60261 if (a < b)
60262diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60263index c9c298b..544d100 100644
60264--- a/fs/configfs/dir.c
60265+++ b/fs/configfs/dir.c
60266@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60267 }
60268 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60269 struct configfs_dirent *next;
60270- const char *name;
60271+ const unsigned char * name;
60272+ char d_name[sizeof(next->s_dentry->d_iname)];
60273 int len;
60274 struct inode *inode = NULL;
60275
60276@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60277 continue;
60278
60279 name = configfs_get_name(next);
60280- len = strlen(name);
60281+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60282+ len = next->s_dentry->d_name.len;
60283+ memcpy(d_name, name, len);
60284+ name = d_name;
60285+ } else
60286+ len = strlen(name);
60287
60288 /*
60289 * We'll have a dentry and an inode for
60290diff --git a/fs/coredump.c b/fs/coredump.c
60291index b5c86ff..0dac262 100644
60292--- a/fs/coredump.c
60293+++ b/fs/coredump.c
60294@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60295 struct pipe_inode_info *pipe = file->private_data;
60296
60297 pipe_lock(pipe);
60298- pipe->readers++;
60299- pipe->writers--;
60300+ atomic_inc(&pipe->readers);
60301+ atomic_dec(&pipe->writers);
60302 wake_up_interruptible_sync(&pipe->wait);
60303 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60304 pipe_unlock(pipe);
60305@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60306 * We actually want wait_event_freezable() but then we need
60307 * to clear TIF_SIGPENDING and improve dump_interrupted().
60308 */
60309- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60310+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60311
60312 pipe_lock(pipe);
60313- pipe->readers--;
60314- pipe->writers++;
60315+ atomic_dec(&pipe->readers);
60316+ atomic_inc(&pipe->writers);
60317 pipe_unlock(pipe);
60318 }
60319
60320@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60321 struct files_struct *displaced;
60322 bool need_nonrelative = false;
60323 bool core_dumped = false;
60324- static atomic_t core_dump_count = ATOMIC_INIT(0);
60325+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60326+ long signr = siginfo->si_signo;
60327+ int dumpable;
60328 struct coredump_params cprm = {
60329 .siginfo = siginfo,
60330 .regs = signal_pt_regs(),
60331@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60332 .mm_flags = mm->flags,
60333 };
60334
60335- audit_core_dumps(siginfo->si_signo);
60336+ audit_core_dumps(signr);
60337+
60338+ dumpable = __get_dumpable(cprm.mm_flags);
60339+
60340+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60341+ gr_handle_brute_attach(dumpable);
60342
60343 binfmt = mm->binfmt;
60344 if (!binfmt || !binfmt->core_dump)
60345 goto fail;
60346- if (!__get_dumpable(cprm.mm_flags))
60347+ if (!dumpable)
60348 goto fail;
60349
60350 cred = prepare_creds();
60351@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60352 need_nonrelative = true;
60353 }
60354
60355- retval = coredump_wait(siginfo->si_signo, &core_state);
60356+ retval = coredump_wait(signr, &core_state);
60357 if (retval < 0)
60358 goto fail_creds;
60359
60360@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60361 }
60362 cprm.limit = RLIM_INFINITY;
60363
60364- dump_count = atomic_inc_return(&core_dump_count);
60365+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60366 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60367 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60368 task_tgid_vnr(current), current->comm);
60369@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60370 } else {
60371 struct inode *inode;
60372
60373+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60374+
60375 if (cprm.limit < binfmt->min_coredump)
60376 goto fail_unlock;
60377
60378@@ -681,7 +690,7 @@ close_fail:
60379 filp_close(cprm.file, NULL);
60380 fail_dropcount:
60381 if (ispipe)
60382- atomic_dec(&core_dump_count);
60383+ atomic_dec_unchecked(&core_dump_count);
60384 fail_unlock:
60385 kfree(cn.corename);
60386 coredump_finish(mm, core_dumped);
60387@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60388 struct file *file = cprm->file;
60389 loff_t pos = file->f_pos;
60390 ssize_t n;
60391+
60392+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60393 if (cprm->written + nr > cprm->limit)
60394 return 0;
60395 while (nr) {
60396diff --git a/fs/dcache.c b/fs/dcache.c
60397index e368d4f..b40ba59 100644
60398--- a/fs/dcache.c
60399+++ b/fs/dcache.c
60400@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
60401 * dentry_iput drops the locks, at which point nobody (except
60402 * transient RCU lookups) can reach this dentry.
60403 */
60404- BUG_ON((int)dentry->d_lockref.count > 0);
60405+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60406 this_cpu_dec(nr_dentry);
60407 if (dentry->d_op && dentry->d_op->d_release)
60408 dentry->d_op->d_release(dentry);
60409@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60410 struct dentry *parent = dentry->d_parent;
60411 if (IS_ROOT(dentry))
60412 return NULL;
60413- if (unlikely((int)dentry->d_lockref.count < 0))
60414+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60415 return NULL;
60416 if (likely(spin_trylock(&parent->d_lock)))
60417 return parent;
60418@@ -638,7 +638,7 @@ repeat:
60419 dentry->d_flags |= DCACHE_REFERENCED;
60420 dentry_lru_add(dentry);
60421
60422- dentry->d_lockref.count--;
60423+ __lockref_dec(&dentry->d_lockref);
60424 spin_unlock(&dentry->d_lock);
60425 return;
60426
60427@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
60428 /* This must be called with d_lock held */
60429 static inline void __dget_dlock(struct dentry *dentry)
60430 {
60431- dentry->d_lockref.count++;
60432+ __lockref_inc(&dentry->d_lockref);
60433 }
60434
60435 static inline void __dget(struct dentry *dentry)
60436@@ -694,8 +694,8 @@ repeat:
60437 goto repeat;
60438 }
60439 rcu_read_unlock();
60440- BUG_ON(!ret->d_lockref.count);
60441- ret->d_lockref.count++;
60442+ BUG_ON(!__lockref_read(&ret->d_lockref));
60443+ __lockref_inc(&ret->d_lockref);
60444 spin_unlock(&ret->d_lock);
60445 return ret;
60446 }
60447@@ -773,9 +773,9 @@ restart:
60448 spin_lock(&inode->i_lock);
60449 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60450 spin_lock(&dentry->d_lock);
60451- if (!dentry->d_lockref.count) {
60452+ if (!__lockref_read(&dentry->d_lockref)) {
60453 struct dentry *parent = lock_parent(dentry);
60454- if (likely(!dentry->d_lockref.count)) {
60455+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60456 __dentry_kill(dentry);
60457 dput(parent);
60458 goto restart;
60459@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
60460 * We found an inuse dentry which was not removed from
60461 * the LRU because of laziness during lookup. Do not free it.
60462 */
60463- if ((int)dentry->d_lockref.count > 0) {
60464+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60465 spin_unlock(&dentry->d_lock);
60466 if (parent)
60467 spin_unlock(&parent->d_lock);
60468@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
60469 dentry = parent;
60470 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60471 parent = lock_parent(dentry);
60472- if (dentry->d_lockref.count != 1) {
60473- dentry->d_lockref.count--;
60474+ if (__lockref_read(&dentry->d_lockref) != 1) {
60475+ __lockref_inc(&dentry->d_lockref);
60476 spin_unlock(&dentry->d_lock);
60477 if (parent)
60478 spin_unlock(&parent->d_lock);
60479@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60480 * counts, just remove them from the LRU. Otherwise give them
60481 * another pass through the LRU.
60482 */
60483- if (dentry->d_lockref.count) {
60484+ if (__lockref_read(&dentry->d_lockref) > 0) {
60485 d_lru_isolate(dentry);
60486 spin_unlock(&dentry->d_lock);
60487 return LRU_REMOVED;
60488@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60489 } else {
60490 if (dentry->d_flags & DCACHE_LRU_LIST)
60491 d_lru_del(dentry);
60492- if (!dentry->d_lockref.count) {
60493+ if (!__lockref_read(&dentry->d_lockref)) {
60494 d_shrink_add(dentry, &data->dispose);
60495 data->found++;
60496 }
60497@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60498 return D_WALK_CONTINUE;
60499
60500 /* root with refcount 1 is fine */
60501- if (dentry == _data && dentry->d_lockref.count == 1)
60502+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60503 return D_WALK_CONTINUE;
60504
60505 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60506@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60507 dentry->d_inode ?
60508 dentry->d_inode->i_ino : 0UL,
60509 dentry,
60510- dentry->d_lockref.count,
60511+ __lockref_read(&dentry->d_lockref),
60512 dentry->d_sb->s_type->name,
60513 dentry->d_sb->s_id);
60514 WARN_ON(1);
60515@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60516 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60517 if (name->len > DNAME_INLINE_LEN-1) {
60518 size_t size = offsetof(struct external_name, name[1]);
60519- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60520+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60521 if (!p) {
60522 kmem_cache_free(dentry_cache, dentry);
60523 return NULL;
60524@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60525 smp_wmb();
60526 dentry->d_name.name = dname;
60527
60528- dentry->d_lockref.count = 1;
60529+ __lockref_set(&dentry->d_lockref, 1);
60530 dentry->d_flags = 0;
60531 spin_lock_init(&dentry->d_lock);
60532 seqcount_init(&dentry->d_seq);
60533@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60534 dentry->d_sb = sb;
60535 dentry->d_op = NULL;
60536 dentry->d_fsdata = NULL;
60537+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60538+ atomic_set(&dentry->chroot_refcnt, 0);
60539+#endif
60540 INIT_HLIST_BL_NODE(&dentry->d_hash);
60541 INIT_LIST_HEAD(&dentry->d_lru);
60542 INIT_LIST_HEAD(&dentry->d_subdirs);
60543@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60544 goto next;
60545 }
60546
60547- dentry->d_lockref.count++;
60548+ __lockref_inc(&dentry->d_lockref);
60549 found = dentry;
60550 spin_unlock(&dentry->d_lock);
60551 break;
60552@@ -2250,7 +2253,7 @@ again:
60553 spin_lock(&dentry->d_lock);
60554 inode = dentry->d_inode;
60555 isdir = S_ISDIR(inode->i_mode);
60556- if (dentry->d_lockref.count == 1) {
60557+ if (__lockref_read(&dentry->d_lockref) == 1) {
60558 if (!spin_trylock(&inode->i_lock)) {
60559 spin_unlock(&dentry->d_lock);
60560 cpu_relax();
60561@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60562
60563 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60564 dentry->d_flags |= DCACHE_GENOCIDE;
60565- dentry->d_lockref.count--;
60566+ __lockref_dec(&dentry->d_lockref);
60567 }
60568 }
60569 return D_WALK_CONTINUE;
60570@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
60571 mempages -= reserve;
60572
60573 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60574- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60575+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60576+ SLAB_NO_SANITIZE, NULL);
60577
60578 dcache_init();
60579 inode_init();
60580diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60581index 05f2960..780f4f8 100644
60582--- a/fs/debugfs/inode.c
60583+++ b/fs/debugfs/inode.c
60584@@ -246,10 +246,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
60585 return 0;
60586 }
60587
60588+static void debugfs_evict_inode(struct inode *inode)
60589+{
60590+ truncate_inode_pages_final(&inode->i_data);
60591+ clear_inode(inode);
60592+ if (S_ISLNK(inode->i_mode))
60593+ kfree(inode->i_private);
60594+}
60595+
60596 static const struct super_operations debugfs_super_operations = {
60597 .statfs = simple_statfs,
60598 .remount_fs = debugfs_remount,
60599 .show_options = debugfs_show_options,
60600+ .evict_inode = debugfs_evict_inode,
60601 };
60602
60603 static int debug_fill_super(struct super_block *sb, void *data, int silent)
60604@@ -416,7 +425,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60605 */
60606 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60607 {
60608+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60609+ return __create_file(name, S_IFDIR | S_IRWXU,
60610+#else
60611 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60612+#endif
60613 parent, NULL, NULL);
60614 }
60615 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60616@@ -466,23 +479,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
60617 int ret = 0;
60618
60619 if (debugfs_positive(dentry)) {
60620- if (dentry->d_inode) {
60621- dget(dentry);
60622- switch (dentry->d_inode->i_mode & S_IFMT) {
60623- case S_IFDIR:
60624- ret = simple_rmdir(parent->d_inode, dentry);
60625- break;
60626- case S_IFLNK:
60627- kfree(dentry->d_inode->i_private);
60628- /* fall through */
60629- default:
60630- simple_unlink(parent->d_inode, dentry);
60631- break;
60632- }
60633- if (!ret)
60634- d_delete(dentry);
60635- dput(dentry);
60636- }
60637+ dget(dentry);
60638+ if (S_ISDIR(dentry->d_inode->i_mode))
60639+ ret = simple_rmdir(parent->d_inode, dentry);
60640+ else
60641+ simple_unlink(parent->d_inode, dentry);
60642+ if (!ret)
60643+ d_delete(dentry);
60644+ dput(dentry);
60645 }
60646 return ret;
60647 }
60648diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60649index 1686dc2..9611c50 100644
60650--- a/fs/ecryptfs/inode.c
60651+++ b/fs/ecryptfs/inode.c
60652@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60653 old_fs = get_fs();
60654 set_fs(get_ds());
60655 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60656- (char __user *)lower_buf,
60657+ (char __force_user *)lower_buf,
60658 PATH_MAX);
60659 set_fs(old_fs);
60660 if (rc < 0)
60661diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60662index e4141f2..d8263e8 100644
60663--- a/fs/ecryptfs/miscdev.c
60664+++ b/fs/ecryptfs/miscdev.c
60665@@ -304,7 +304,7 @@ check_list:
60666 goto out_unlock_msg_ctx;
60667 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60668 if (msg_ctx->msg) {
60669- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60670+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60671 goto out_unlock_msg_ctx;
60672 i += packet_length_size;
60673 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60674diff --git a/fs/exec.c b/fs/exec.c
60675index ad8798e..5f872c9 100644
60676--- a/fs/exec.c
60677+++ b/fs/exec.c
60678@@ -56,8 +56,20 @@
60679 #include <linux/pipe_fs_i.h>
60680 #include <linux/oom.h>
60681 #include <linux/compat.h>
60682+#include <linux/random.h>
60683+#include <linux/seq_file.h>
60684+#include <linux/coredump.h>
60685+#include <linux/mman.h>
60686+
60687+#ifdef CONFIG_PAX_REFCOUNT
60688+#include <linux/kallsyms.h>
60689+#include <linux/kdebug.h>
60690+#endif
60691+
60692+#include <trace/events/fs.h>
60693
60694 #include <asm/uaccess.h>
60695+#include <asm/sections.h>
60696 #include <asm/mmu_context.h>
60697 #include <asm/tlb.h>
60698
60699@@ -66,19 +78,34 @@
60700
60701 #include <trace/events/sched.h>
60702
60703+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60704+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60705+{
60706+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
60707+}
60708+#endif
60709+
60710+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
60711+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60712+EXPORT_SYMBOL(pax_set_initial_flags_func);
60713+#endif
60714+
60715 int suid_dumpable = 0;
60716
60717 static LIST_HEAD(formats);
60718 static DEFINE_RWLOCK(binfmt_lock);
60719
60720+extern int gr_process_kernel_exec_ban(void);
60721+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
60722+
60723 void __register_binfmt(struct linux_binfmt * fmt, int insert)
60724 {
60725 BUG_ON(!fmt);
60726 if (WARN_ON(!fmt->load_binary))
60727 return;
60728 write_lock(&binfmt_lock);
60729- insert ? list_add(&fmt->lh, &formats) :
60730- list_add_tail(&fmt->lh, &formats);
60731+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
60732+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
60733 write_unlock(&binfmt_lock);
60734 }
60735
60736@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
60737 void unregister_binfmt(struct linux_binfmt * fmt)
60738 {
60739 write_lock(&binfmt_lock);
60740- list_del(&fmt->lh);
60741+ pax_list_del((struct list_head *)&fmt->lh);
60742 write_unlock(&binfmt_lock);
60743 }
60744
60745@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60746 int write)
60747 {
60748 struct page *page;
60749- int ret;
60750
60751-#ifdef CONFIG_STACK_GROWSUP
60752- if (write) {
60753- ret = expand_downwards(bprm->vma, pos);
60754- if (ret < 0)
60755- return NULL;
60756- }
60757-#endif
60758- ret = get_user_pages(current, bprm->mm, pos,
60759- 1, write, 1, &page, NULL);
60760- if (ret <= 0)
60761+ if (0 > expand_downwards(bprm->vma, pos))
60762+ return NULL;
60763+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
60764 return NULL;
60765
60766 if (write) {
60767@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60768 if (size <= ARG_MAX)
60769 return page;
60770
60771+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60772+ // only allow 512KB for argv+env on suid/sgid binaries
60773+ // to prevent easy ASLR exhaustion
60774+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
60775+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
60776+ (size > (512 * 1024))) {
60777+ put_page(page);
60778+ return NULL;
60779+ }
60780+#endif
60781+
60782 /*
60783 * Limit to 1/4-th the stack size for the argv+env strings.
60784 * This ensures that:
60785@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60786 vma->vm_end = STACK_TOP_MAX;
60787 vma->vm_start = vma->vm_end - PAGE_SIZE;
60788 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
60789+
60790+#ifdef CONFIG_PAX_SEGMEXEC
60791+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
60792+#endif
60793+
60794 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
60795 INIT_LIST_HEAD(&vma->anon_vma_chain);
60796
60797@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60798 arch_bprm_mm_init(mm, vma);
60799 up_write(&mm->mmap_sem);
60800 bprm->p = vma->vm_end - sizeof(void *);
60801+
60802+#ifdef CONFIG_PAX_RANDUSTACK
60803+ if (randomize_va_space)
60804+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
60805+#endif
60806+
60807 return 0;
60808 err:
60809 up_write(&mm->mmap_sem);
60810@@ -396,7 +437,7 @@ struct user_arg_ptr {
60811 } ptr;
60812 };
60813
60814-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60815+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60816 {
60817 const char __user *native;
60818
60819@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60820 compat_uptr_t compat;
60821
60822 if (get_user(compat, argv.ptr.compat + nr))
60823- return ERR_PTR(-EFAULT);
60824+ return (const char __force_user *)ERR_PTR(-EFAULT);
60825
60826 return compat_ptr(compat);
60827 }
60828 #endif
60829
60830 if (get_user(native, argv.ptr.native + nr))
60831- return ERR_PTR(-EFAULT);
60832+ return (const char __force_user *)ERR_PTR(-EFAULT);
60833
60834 return native;
60835 }
60836@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
60837 if (!p)
60838 break;
60839
60840- if (IS_ERR(p))
60841+ if (IS_ERR((const char __force_kernel *)p))
60842 return -EFAULT;
60843
60844 if (i >= max)
60845@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
60846
60847 ret = -EFAULT;
60848 str = get_user_arg_ptr(argv, argc);
60849- if (IS_ERR(str))
60850+ if (IS_ERR((const char __force_kernel *)str))
60851 goto out;
60852
60853 len = strnlen_user(str, MAX_ARG_STRLEN);
60854@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
60855 int r;
60856 mm_segment_t oldfs = get_fs();
60857 struct user_arg_ptr argv = {
60858- .ptr.native = (const char __user *const __user *)__argv,
60859+ .ptr.native = (const char __user * const __force_user *)__argv,
60860 };
60861
60862 set_fs(KERNEL_DS);
60863@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60864 unsigned long new_end = old_end - shift;
60865 struct mmu_gather tlb;
60866
60867- BUG_ON(new_start > new_end);
60868+ if (new_start >= new_end || new_start < mmap_min_addr)
60869+ return -ENOMEM;
60870
60871 /*
60872 * ensure there are no vmas between where we want to go
60873@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60874 if (vma != find_vma(mm, new_start))
60875 return -EFAULT;
60876
60877+#ifdef CONFIG_PAX_SEGMEXEC
60878+ BUG_ON(pax_find_mirror_vma(vma));
60879+#endif
60880+
60881 /*
60882 * cover the whole range: [new_start, old_end)
60883 */
60884@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60885 stack_top = arch_align_stack(stack_top);
60886 stack_top = PAGE_ALIGN(stack_top);
60887
60888- if (unlikely(stack_top < mmap_min_addr) ||
60889- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
60890- return -ENOMEM;
60891-
60892 stack_shift = vma->vm_end - stack_top;
60893
60894 bprm->p -= stack_shift;
60895@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
60896 bprm->exec -= stack_shift;
60897
60898 down_write(&mm->mmap_sem);
60899+
60900+ /* Move stack pages down in memory. */
60901+ if (stack_shift) {
60902+ ret = shift_arg_pages(vma, stack_shift);
60903+ if (ret)
60904+ goto out_unlock;
60905+ }
60906+
60907 vm_flags = VM_STACK_FLAGS;
60908
60909+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60910+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60911+ vm_flags &= ~VM_EXEC;
60912+
60913+#ifdef CONFIG_PAX_MPROTECT
60914+ if (mm->pax_flags & MF_PAX_MPROTECT)
60915+ vm_flags &= ~VM_MAYEXEC;
60916+#endif
60917+
60918+ }
60919+#endif
60920+
60921 /*
60922 * Adjust stack execute permissions; explicitly enable for
60923 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
60924@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60925 goto out_unlock;
60926 BUG_ON(prev != vma);
60927
60928- /* Move stack pages down in memory. */
60929- if (stack_shift) {
60930- ret = shift_arg_pages(vma, stack_shift);
60931- if (ret)
60932- goto out_unlock;
60933- }
60934-
60935 /* mprotect_fixup is overkill to remove the temporary stack flags */
60936 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
60937
60938@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
60939 #endif
60940 current->mm->start_stack = bprm->p;
60941 ret = expand_stack(vma, stack_base);
60942+
60943+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
60944+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
60945+ unsigned long size;
60946+ vm_flags_t vm_flags;
60947+
60948+ size = STACK_TOP - vma->vm_end;
60949+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
60950+
60951+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
60952+
60953+#ifdef CONFIG_X86
60954+ if (!ret) {
60955+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
60956+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
60957+ }
60958+#endif
60959+
60960+ }
60961+#endif
60962+
60963 if (ret)
60964 ret = -EFAULT;
60965
60966@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
60967 if (err)
60968 goto exit;
60969
60970- if (name->name[0] != '\0')
60971+ if (name->name[0] != '\0') {
60972 fsnotify_open(file);
60973+ trace_open_exec(name->name);
60974+ }
60975
60976 out:
60977 return file;
60978@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
60979 old_fs = get_fs();
60980 set_fs(get_ds());
60981 /* The cast to a user pointer is valid due to the set_fs() */
60982- result = vfs_read(file, (void __user *)addr, count, &pos);
60983+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
60984 set_fs(old_fs);
60985 return result;
60986 }
60987@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
60988 tsk->mm = mm;
60989 tsk->active_mm = mm;
60990 activate_mm(active_mm, mm);
60991+ populate_stack();
60992 tsk->mm->vmacache_seqnum = 0;
60993 vmacache_flush(tsk);
60994 task_unlock(tsk);
60995@@ -1252,7 +1331,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
60996 }
60997 rcu_read_unlock();
60998
60999- if (p->fs->users > n_fs)
61000+ if (atomic_read(&p->fs->users) > n_fs)
61001 bprm->unsafe |= LSM_UNSAFE_SHARE;
61002 else
61003 p->fs->in_exec = 1;
61004@@ -1433,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
61005 return ret;
61006 }
61007
61008+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61009+static DEFINE_PER_CPU(u64, exec_counter);
61010+static int __init init_exec_counters(void)
61011+{
61012+ unsigned int cpu;
61013+
61014+ for_each_possible_cpu(cpu) {
61015+ per_cpu(exec_counter, cpu) = (u64)cpu;
61016+ }
61017+
61018+ return 0;
61019+}
61020+early_initcall(init_exec_counters);
61021+static inline void increment_exec_counter(void)
61022+{
61023+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
61024+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
61025+}
61026+#else
61027+static inline void increment_exec_counter(void) {}
61028+#endif
61029+
61030+extern void gr_handle_exec_args(struct linux_binprm *bprm,
61031+ struct user_arg_ptr argv);
61032+
61033 /*
61034 * sys_execve() executes a new program.
61035 */
61036@@ -1441,6 +1545,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61037 struct user_arg_ptr envp,
61038 int flags)
61039 {
61040+#ifdef CONFIG_GRKERNSEC
61041+ struct file *old_exec_file;
61042+ struct acl_subject_label *old_acl;
61043+ struct rlimit old_rlim[RLIM_NLIMITS];
61044+#endif
61045 char *pathbuf = NULL;
61046 struct linux_binprm *bprm;
61047 struct file *file;
61048@@ -1450,6 +1559,8 @@ static int do_execveat_common(int fd, struct filename *filename,
61049 if (IS_ERR(filename))
61050 return PTR_ERR(filename);
61051
61052+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
61053+
61054 /*
61055 * We move the actual failure in case of RLIMIT_NPROC excess from
61056 * set*uid() to execve() because too many poorly written programs
61057@@ -1487,6 +1598,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61058 if (IS_ERR(file))
61059 goto out_unmark;
61060
61061+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
61062+ retval = -EPERM;
61063+ goto out_unmark;
61064+ }
61065+
61066 sched_exec();
61067
61068 bprm->file = file;
61069@@ -1513,6 +1629,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61070 }
61071 bprm->interp = bprm->filename;
61072
61073+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
61074+ retval = -EACCES;
61075+ goto out_unmark;
61076+ }
61077+
61078 retval = bprm_mm_init(bprm);
61079 if (retval)
61080 goto out_unmark;
61081@@ -1529,24 +1650,70 @@ static int do_execveat_common(int fd, struct filename *filename,
61082 if (retval < 0)
61083 goto out;
61084
61085+#ifdef CONFIG_GRKERNSEC
61086+ old_acl = current->acl;
61087+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61088+ old_exec_file = current->exec_file;
61089+ get_file(file);
61090+ current->exec_file = file;
61091+#endif
61092+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61093+ /* limit suid stack to 8MB
61094+ * we saved the old limits above and will restore them if this exec fails
61095+ */
61096+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61097+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61098+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61099+#endif
61100+
61101+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61102+ retval = -EPERM;
61103+ goto out_fail;
61104+ }
61105+
61106+ if (!gr_tpe_allow(file)) {
61107+ retval = -EACCES;
61108+ goto out_fail;
61109+ }
61110+
61111+ if (gr_check_crash_exec(file)) {
61112+ retval = -EACCES;
61113+ goto out_fail;
61114+ }
61115+
61116+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61117+ bprm->unsafe);
61118+ if (retval < 0)
61119+ goto out_fail;
61120+
61121 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61122 if (retval < 0)
61123- goto out;
61124+ goto out_fail;
61125
61126 bprm->exec = bprm->p;
61127 retval = copy_strings(bprm->envc, envp, bprm);
61128 if (retval < 0)
61129- goto out;
61130+ goto out_fail;
61131
61132 retval = copy_strings(bprm->argc, argv, bprm);
61133 if (retval < 0)
61134- goto out;
61135+ goto out_fail;
61136+
61137+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61138+
61139+ gr_handle_exec_args(bprm, argv);
61140
61141 retval = exec_binprm(bprm);
61142 if (retval < 0)
61143- goto out;
61144+ goto out_fail;
61145+#ifdef CONFIG_GRKERNSEC
61146+ if (old_exec_file)
61147+ fput(old_exec_file);
61148+#endif
61149
61150 /* execve succeeded */
61151+
61152+ increment_exec_counter();
61153 current->fs->in_exec = 0;
61154 current->in_execve = 0;
61155 acct_update_integrals(current);
61156@@ -1558,6 +1725,14 @@ static int do_execveat_common(int fd, struct filename *filename,
61157 put_files_struct(displaced);
61158 return retval;
61159
61160+out_fail:
61161+#ifdef CONFIG_GRKERNSEC
61162+ current->acl = old_acl;
61163+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61164+ fput(current->exec_file);
61165+ current->exec_file = old_exec_file;
61166+#endif
61167+
61168 out:
61169 if (bprm->mm) {
61170 acct_arg_size(bprm, 0);
61171@@ -1704,3 +1879,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61172 argv, envp, flags);
61173 }
61174 #endif
61175+
61176+int pax_check_flags(unsigned long *flags)
61177+{
61178+ int retval = 0;
61179+
61180+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61181+ if (*flags & MF_PAX_SEGMEXEC)
61182+ {
61183+ *flags &= ~MF_PAX_SEGMEXEC;
61184+ retval = -EINVAL;
61185+ }
61186+#endif
61187+
61188+ if ((*flags & MF_PAX_PAGEEXEC)
61189+
61190+#ifdef CONFIG_PAX_PAGEEXEC
61191+ && (*flags & MF_PAX_SEGMEXEC)
61192+#endif
61193+
61194+ )
61195+ {
61196+ *flags &= ~MF_PAX_PAGEEXEC;
61197+ retval = -EINVAL;
61198+ }
61199+
61200+ if ((*flags & MF_PAX_MPROTECT)
61201+
61202+#ifdef CONFIG_PAX_MPROTECT
61203+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61204+#endif
61205+
61206+ )
61207+ {
61208+ *flags &= ~MF_PAX_MPROTECT;
61209+ retval = -EINVAL;
61210+ }
61211+
61212+ if ((*flags & MF_PAX_EMUTRAMP)
61213+
61214+#ifdef CONFIG_PAX_EMUTRAMP
61215+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61216+#endif
61217+
61218+ )
61219+ {
61220+ *flags &= ~MF_PAX_EMUTRAMP;
61221+ retval = -EINVAL;
61222+ }
61223+
61224+ return retval;
61225+}
61226+
61227+EXPORT_SYMBOL(pax_check_flags);
61228+
61229+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61230+char *pax_get_path(const struct path *path, char *buf, int buflen)
61231+{
61232+ char *pathname = d_path(path, buf, buflen);
61233+
61234+ if (IS_ERR(pathname))
61235+ goto toolong;
61236+
61237+ pathname = mangle_path(buf, pathname, "\t\n\\");
61238+ if (!pathname)
61239+ goto toolong;
61240+
61241+ *pathname = 0;
61242+ return buf;
61243+
61244+toolong:
61245+ return "<path too long>";
61246+}
61247+EXPORT_SYMBOL(pax_get_path);
61248+
61249+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61250+{
61251+ struct task_struct *tsk = current;
61252+ struct mm_struct *mm = current->mm;
61253+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61254+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61255+ char *path_exec = NULL;
61256+ char *path_fault = NULL;
61257+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61258+ siginfo_t info = { };
61259+
61260+ if (buffer_exec && buffer_fault) {
61261+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61262+
61263+ down_read(&mm->mmap_sem);
61264+ vma = mm->mmap;
61265+ while (vma && (!vma_exec || !vma_fault)) {
61266+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61267+ vma_exec = vma;
61268+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61269+ vma_fault = vma;
61270+ vma = vma->vm_next;
61271+ }
61272+ if (vma_exec)
61273+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61274+ if (vma_fault) {
61275+ start = vma_fault->vm_start;
61276+ end = vma_fault->vm_end;
61277+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61278+ if (vma_fault->vm_file)
61279+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61280+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61281+ path_fault = "<heap>";
61282+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61283+ path_fault = "<stack>";
61284+ else
61285+ path_fault = "<anonymous mapping>";
61286+ }
61287+ up_read(&mm->mmap_sem);
61288+ }
61289+ if (tsk->signal->curr_ip)
61290+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61291+ else
61292+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61293+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61294+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61295+ free_page((unsigned long)buffer_exec);
61296+ free_page((unsigned long)buffer_fault);
61297+ pax_report_insns(regs, pc, sp);
61298+ info.si_signo = SIGKILL;
61299+ info.si_errno = 0;
61300+ info.si_code = SI_KERNEL;
61301+ info.si_pid = 0;
61302+ info.si_uid = 0;
61303+ do_coredump(&info);
61304+}
61305+#endif
61306+
61307+#ifdef CONFIG_PAX_REFCOUNT
61308+void pax_report_refcount_overflow(struct pt_regs *regs)
61309+{
61310+ if (current->signal->curr_ip)
61311+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61312+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61313+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61314+ else
61315+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61316+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61317+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61318+ preempt_disable();
61319+ show_regs(regs);
61320+ preempt_enable();
61321+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61322+}
61323+#endif
61324+
61325+#ifdef CONFIG_PAX_USERCOPY
61326+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61327+static noinline int check_stack_object(const void *obj, unsigned long len)
61328+{
61329+ const void * const stack = task_stack_page(current);
61330+ const void * const stackend = stack + THREAD_SIZE;
61331+
61332+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61333+ const void *frame = NULL;
61334+ const void *oldframe;
61335+#endif
61336+
61337+ if (obj + len < obj)
61338+ return -1;
61339+
61340+ if (obj + len <= stack || stackend <= obj)
61341+ return 0;
61342+
61343+ if (obj < stack || stackend < obj + len)
61344+ return -1;
61345+
61346+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61347+ oldframe = __builtin_frame_address(1);
61348+ if (oldframe)
61349+ frame = __builtin_frame_address(2);
61350+ /*
61351+ low ----------------------------------------------> high
61352+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61353+ ^----------------^
61354+ allow copies only within here
61355+ */
61356+ while (stack <= frame && frame < stackend) {
61357+ /* if obj + len extends past the last frame, this
61358+ check won't pass and the next frame will be 0,
61359+ causing us to bail out and correctly report
61360+ the copy as invalid
61361+ */
61362+ if (obj + len <= frame)
61363+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61364+ oldframe = frame;
61365+ frame = *(const void * const *)frame;
61366+ }
61367+ return -1;
61368+#else
61369+ return 1;
61370+#endif
61371+}
61372+
61373+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61374+{
61375+ if (current->signal->curr_ip)
61376+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61377+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61378+ else
61379+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61380+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61381+ dump_stack();
61382+ gr_handle_kernel_exploit();
61383+ do_group_exit(SIGKILL);
61384+}
61385+#endif
61386+
61387+#ifdef CONFIG_PAX_USERCOPY
61388+
61389+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61390+{
61391+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61392+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61393+#ifdef CONFIG_MODULES
61394+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61395+#else
61396+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61397+#endif
61398+
61399+#else
61400+ unsigned long textlow = (unsigned long)_stext;
61401+ unsigned long texthigh = (unsigned long)_etext;
61402+
61403+#ifdef CONFIG_X86_64
61404+ /* check against linear mapping as well */
61405+ if (high > (unsigned long)__va(__pa(textlow)) &&
61406+ low < (unsigned long)__va(__pa(texthigh)))
61407+ return true;
61408+#endif
61409+
61410+#endif
61411+
61412+ if (high <= textlow || low >= texthigh)
61413+ return false;
61414+ else
61415+ return true;
61416+}
61417+#endif
61418+
61419+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61420+{
61421+#ifdef CONFIG_PAX_USERCOPY
61422+ const char *type;
61423+#endif
61424+
61425+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61426+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61427+ unsigned long currentsp = (unsigned long)&stackstart;
61428+ if (unlikely((currentsp < stackstart + 512 ||
61429+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61430+ BUG();
61431+#endif
61432+
61433+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61434+ if (const_size)
61435+ return;
61436+#endif
61437+
61438+#ifdef CONFIG_PAX_USERCOPY
61439+ if (!n)
61440+ return;
61441+
61442+ type = check_heap_object(ptr, n);
61443+ if (!type) {
61444+ int ret = check_stack_object(ptr, n);
61445+ if (ret == 1 || ret == 2)
61446+ return;
61447+ if (ret == 0) {
61448+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61449+ type = "<kernel text>";
61450+ else
61451+ return;
61452+ } else
61453+ type = "<process stack>";
61454+ }
61455+
61456+ pax_report_usercopy(ptr, n, to_user, type);
61457+#endif
61458+
61459+}
61460+EXPORT_SYMBOL(__check_object_size);
61461+
61462+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61463+void pax_track_stack(void)
61464+{
61465+ unsigned long sp = (unsigned long)&sp;
61466+ if (sp < current_thread_info()->lowest_stack &&
61467+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61468+ current_thread_info()->lowest_stack = sp;
61469+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61470+ BUG();
61471+}
61472+EXPORT_SYMBOL(pax_track_stack);
61473+#endif
61474+
61475+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61476+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61477+{
61478+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61479+ dump_stack();
61480+ do_group_exit(SIGKILL);
61481+}
61482+EXPORT_SYMBOL(report_size_overflow);
61483+#endif
61484diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61485index 9f9992b..8b59411 100644
61486--- a/fs/ext2/balloc.c
61487+++ b/fs/ext2/balloc.c
61488@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61489
61490 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61491 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61492- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61493+ if (free_blocks < root_blocks + 1 &&
61494 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61495 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61496- !in_group_p (sbi->s_resgid))) {
61497+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61498 return 0;
61499 }
61500 return 1;
61501diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61502index ae55fdd..5e64c27 100644
61503--- a/fs/ext2/super.c
61504+++ b/fs/ext2/super.c
61505@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61506 #ifdef CONFIG_EXT2_FS_XATTR
61507 if (test_opt(sb, XATTR_USER))
61508 seq_puts(seq, ",user_xattr");
61509- if (!test_opt(sb, XATTR_USER) &&
61510- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61511+ if (!test_opt(sb, XATTR_USER))
61512 seq_puts(seq, ",nouser_xattr");
61513- }
61514 #endif
61515
61516 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61517@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61518 if (def_mount_opts & EXT2_DEFM_UID16)
61519 set_opt(sbi->s_mount_opt, NO_UID32);
61520 #ifdef CONFIG_EXT2_FS_XATTR
61521- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61522- set_opt(sbi->s_mount_opt, XATTR_USER);
61523+ /* always enable user xattrs */
61524+ set_opt(sbi->s_mount_opt, XATTR_USER);
61525 #endif
61526 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61527 if (def_mount_opts & EXT2_DEFM_ACL)
61528diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61529index 9142614..97484fa 100644
61530--- a/fs/ext2/xattr.c
61531+++ b/fs/ext2/xattr.c
61532@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61533 struct buffer_head *bh = NULL;
61534 struct ext2_xattr_entry *entry;
61535 char *end;
61536- size_t rest = buffer_size;
61537+ size_t rest = buffer_size, total_size = 0;
61538 int error;
61539
61540 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61541@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61542 buffer += size;
61543 }
61544 rest -= size;
61545+ total_size += size;
61546 }
61547 }
61548- error = buffer_size - rest; /* total size */
61549+ error = total_size;
61550
61551 cleanup:
61552 brelse(bh);
61553diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61554index 158b5d4..2432610 100644
61555--- a/fs/ext3/balloc.c
61556+++ b/fs/ext3/balloc.c
61557@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61558
61559 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61560 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61561- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61562+ if (free_blocks < root_blocks + 1 &&
61563 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61564 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61565- !in_group_p (sbi->s_resgid))) {
61566+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61567 return 0;
61568 }
61569 return 1;
61570diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61571index 9b4e7d7..048d025 100644
61572--- a/fs/ext3/super.c
61573+++ b/fs/ext3/super.c
61574@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61575 #ifdef CONFIG_EXT3_FS_XATTR
61576 if (test_opt(sb, XATTR_USER))
61577 seq_puts(seq, ",user_xattr");
61578- if (!test_opt(sb, XATTR_USER) &&
61579- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61580+ if (!test_opt(sb, XATTR_USER))
61581 seq_puts(seq, ",nouser_xattr");
61582- }
61583 #endif
61584 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61585 if (test_opt(sb, POSIX_ACL))
61586@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61587 if (def_mount_opts & EXT3_DEFM_UID16)
61588 set_opt(sbi->s_mount_opt, NO_UID32);
61589 #ifdef CONFIG_EXT3_FS_XATTR
61590- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61591- set_opt(sbi->s_mount_opt, XATTR_USER);
61592+ /* always enable user xattrs */
61593+ set_opt(sbi->s_mount_opt, XATTR_USER);
61594 #endif
61595 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61596 if (def_mount_opts & EXT3_DEFM_ACL)
61597diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61598index c6874be..f8a6ae8 100644
61599--- a/fs/ext3/xattr.c
61600+++ b/fs/ext3/xattr.c
61601@@ -330,7 +330,7 @@ static int
61602 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61603 char *buffer, size_t buffer_size)
61604 {
61605- size_t rest = buffer_size;
61606+ size_t rest = buffer_size, total_size = 0;
61607
61608 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61609 const struct xattr_handler *handler =
61610@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61611 buffer += size;
61612 }
61613 rest -= size;
61614+ total_size += size;
61615 }
61616 }
61617- return buffer_size - rest;
61618+ return total_size;
61619 }
61620
61621 static int
61622diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61623index 83a6f49..d4e4d03 100644
61624--- a/fs/ext4/balloc.c
61625+++ b/fs/ext4/balloc.c
61626@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61627 /* Hm, nope. Are (enough) root reserved clusters available? */
61628 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61629 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61630- capable(CAP_SYS_RESOURCE) ||
61631- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61632+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61633+ capable_nolog(CAP_SYS_RESOURCE)) {
61634
61635 if (free_clusters >= (nclusters + dirty_clusters +
61636 resv_clusters))
61637diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61638index a75fba6..8235fca 100644
61639--- a/fs/ext4/ext4.h
61640+++ b/fs/ext4/ext4.h
61641@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
61642 unsigned long s_mb_last_start;
61643
61644 /* stats for buddy allocator */
61645- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61646- atomic_t s_bal_success; /* we found long enough chunks */
61647- atomic_t s_bal_allocated; /* in blocks */
61648- atomic_t s_bal_ex_scanned; /* total extents scanned */
61649- atomic_t s_bal_goals; /* goal hits */
61650- atomic_t s_bal_breaks; /* too long searches */
61651- atomic_t s_bal_2orders; /* 2^order hits */
61652+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61653+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61654+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61655+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61656+ atomic_unchecked_t s_bal_goals; /* goal hits */
61657+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61658+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61659 spinlock_t s_bal_lock;
61660 unsigned long s_mb_buddies_generated;
61661 unsigned long long s_mb_generation_time;
61662- atomic_t s_mb_lost_chunks;
61663- atomic_t s_mb_preallocated;
61664- atomic_t s_mb_discarded;
61665+ atomic_unchecked_t s_mb_lost_chunks;
61666+ atomic_unchecked_t s_mb_preallocated;
61667+ atomic_unchecked_t s_mb_discarded;
61668 atomic_t s_lock_busy;
61669
61670 /* locality groups */
61671diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61672index 8d1e602..abf497b 100644
61673--- a/fs/ext4/mballoc.c
61674+++ b/fs/ext4/mballoc.c
61675@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61676 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61677
61678 if (EXT4_SB(sb)->s_mb_stats)
61679- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61680+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61681
61682 break;
61683 }
61684@@ -2211,7 +2211,7 @@ repeat:
61685 ac->ac_status = AC_STATUS_CONTINUE;
61686 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61687 cr = 3;
61688- atomic_inc(&sbi->s_mb_lost_chunks);
61689+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61690 goto repeat;
61691 }
61692 }
61693@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
61694 if (sbi->s_mb_stats) {
61695 ext4_msg(sb, KERN_INFO,
61696 "mballoc: %u blocks %u reqs (%u success)",
61697- atomic_read(&sbi->s_bal_allocated),
61698- atomic_read(&sbi->s_bal_reqs),
61699- atomic_read(&sbi->s_bal_success));
61700+ atomic_read_unchecked(&sbi->s_bal_allocated),
61701+ atomic_read_unchecked(&sbi->s_bal_reqs),
61702+ atomic_read_unchecked(&sbi->s_bal_success));
61703 ext4_msg(sb, KERN_INFO,
61704 "mballoc: %u extents scanned, %u goal hits, "
61705 "%u 2^N hits, %u breaks, %u lost",
61706- atomic_read(&sbi->s_bal_ex_scanned),
61707- atomic_read(&sbi->s_bal_goals),
61708- atomic_read(&sbi->s_bal_2orders),
61709- atomic_read(&sbi->s_bal_breaks),
61710- atomic_read(&sbi->s_mb_lost_chunks));
61711+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61712+ atomic_read_unchecked(&sbi->s_bal_goals),
61713+ atomic_read_unchecked(&sbi->s_bal_2orders),
61714+ atomic_read_unchecked(&sbi->s_bal_breaks),
61715+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61716 ext4_msg(sb, KERN_INFO,
61717 "mballoc: %lu generated and it took %Lu",
61718 sbi->s_mb_buddies_generated,
61719 sbi->s_mb_generation_time);
61720 ext4_msg(sb, KERN_INFO,
61721 "mballoc: %u preallocated, %u discarded",
61722- atomic_read(&sbi->s_mb_preallocated),
61723- atomic_read(&sbi->s_mb_discarded));
61724+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61725+ atomic_read_unchecked(&sbi->s_mb_discarded));
61726 }
61727
61728 free_percpu(sbi->s_locality_groups);
61729@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61730 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61731
61732 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61733- atomic_inc(&sbi->s_bal_reqs);
61734- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61735+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61736+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61737 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61738- atomic_inc(&sbi->s_bal_success);
61739- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61740+ atomic_inc_unchecked(&sbi->s_bal_success);
61741+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61742 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61743 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61744- atomic_inc(&sbi->s_bal_goals);
61745+ atomic_inc_unchecked(&sbi->s_bal_goals);
61746 if (ac->ac_found > sbi->s_mb_max_to_scan)
61747- atomic_inc(&sbi->s_bal_breaks);
61748+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61749 }
61750
61751 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61752@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
61753 trace_ext4_mb_new_inode_pa(ac, pa);
61754
61755 ext4_mb_use_inode_pa(ac, pa);
61756- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
61757+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
61758
61759 ei = EXT4_I(ac->ac_inode);
61760 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61761@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
61762 trace_ext4_mb_new_group_pa(ac, pa);
61763
61764 ext4_mb_use_group_pa(ac, pa);
61765- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61766+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61767
61768 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61769 lg = ac->ac_lg;
61770@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
61771 * from the bitmap and continue.
61772 */
61773 }
61774- atomic_add(free, &sbi->s_mb_discarded);
61775+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
61776
61777 return err;
61778 }
61779@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
61780 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
61781 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
61782 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
61783- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61784+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61785 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
61786
61787 return 0;
61788diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
61789index 8313ca3..8a37d08 100644
61790--- a/fs/ext4/mmp.c
61791+++ b/fs/ext4/mmp.c
61792@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
61793 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
61794 const char *function, unsigned int line, const char *msg)
61795 {
61796- __ext4_warning(sb, function, line, msg);
61797+ __ext4_warning(sb, function, line, "%s", msg);
61798 __ext4_warning(sb, function, line,
61799 "MMP failure info: last update time: %llu, last update "
61800 "node: %s, last update device: %s\n",
61801diff --git a/fs/ext4/super.c b/fs/ext4/super.c
61802index fc29b2c..6c8b255 100644
61803--- a/fs/ext4/super.c
61804+++ b/fs/ext4/super.c
61805@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
61806 }
61807
61808 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
61809-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61810+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61811 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
61812
61813 #ifdef CONFIG_QUOTA
61814@@ -2440,7 +2440,7 @@ struct ext4_attr {
61815 int offset;
61816 int deprecated_val;
61817 } u;
61818-};
61819+} __do_const;
61820
61821 static int parse_strtoull(const char *buf,
61822 unsigned long long max, unsigned long long *value)
61823diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
61824index 1e09fc7..0400dd4 100644
61825--- a/fs/ext4/xattr.c
61826+++ b/fs/ext4/xattr.c
61827@@ -399,7 +399,7 @@ static int
61828 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61829 char *buffer, size_t buffer_size)
61830 {
61831- size_t rest = buffer_size;
61832+ size_t rest = buffer_size, total_size = 0;
61833
61834 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
61835 const struct xattr_handler *handler =
61836@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61837 buffer += size;
61838 }
61839 rest -= size;
61840+ total_size += size;
61841 }
61842 }
61843- return buffer_size - rest;
61844+ return total_size;
61845 }
61846
61847 static int
61848diff --git a/fs/fcntl.c b/fs/fcntl.c
61849index ee85cd4..9dd0d20 100644
61850--- a/fs/fcntl.c
61851+++ b/fs/fcntl.c
61852@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
61853 int force)
61854 {
61855 security_file_set_fowner(filp);
61856+ if (gr_handle_chroot_fowner(pid, type))
61857+ return;
61858+ if (gr_check_protected_task_fowner(pid, type))
61859+ return;
61860 f_modown(filp, pid, type, force);
61861 }
61862 EXPORT_SYMBOL(__f_setown);
61863diff --git a/fs/fhandle.c b/fs/fhandle.c
61864index 999ff5c..2281df9 100644
61865--- a/fs/fhandle.c
61866+++ b/fs/fhandle.c
61867@@ -8,6 +8,7 @@
61868 #include <linux/fs_struct.h>
61869 #include <linux/fsnotify.h>
61870 #include <linux/personality.h>
61871+#include <linux/grsecurity.h>
61872 #include <asm/uaccess.h>
61873 #include "internal.h"
61874 #include "mount.h"
61875@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
61876 } else
61877 retval = 0;
61878 /* copy the mount id */
61879- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
61880- sizeof(*mnt_id)) ||
61881+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
61882 copy_to_user(ufh, handle,
61883 sizeof(struct file_handle) + handle_bytes))
61884 retval = -EFAULT;
61885@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61886 * the directory. Ideally we would like CAP_DAC_SEARCH.
61887 * But we don't have that
61888 */
61889- if (!capable(CAP_DAC_READ_SEARCH)) {
61890+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
61891 retval = -EPERM;
61892 goto out_err;
61893 }
61894@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61895 goto out_err;
61896 }
61897 /* copy the full handle */
61898- if (copy_from_user(handle, ufh,
61899- sizeof(struct file_handle) +
61900+ *handle = f_handle;
61901+ if (copy_from_user(&handle->f_handle,
61902+ &ufh->f_handle,
61903 f_handle.handle_bytes)) {
61904 retval = -EFAULT;
61905 goto out_handle;
61906diff --git a/fs/file.c b/fs/file.c
61907index ee738ea..f6c1562 100644
61908--- a/fs/file.c
61909+++ b/fs/file.c
61910@@ -16,6 +16,7 @@
61911 #include <linux/slab.h>
61912 #include <linux/vmalloc.h>
61913 #include <linux/file.h>
61914+#include <linux/security.h>
61915 #include <linux/fdtable.h>
61916 #include <linux/bitops.h>
61917 #include <linux/interrupt.h>
61918@@ -139,7 +140,7 @@ out:
61919 * Return <0 error code on error; 1 on successful completion.
61920 * The files->file_lock should be held on entry, and will be held on exit.
61921 */
61922-static int expand_fdtable(struct files_struct *files, int nr)
61923+static int expand_fdtable(struct files_struct *files, unsigned int nr)
61924 __releases(files->file_lock)
61925 __acquires(files->file_lock)
61926 {
61927@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
61928 * expanded and execution may have blocked.
61929 * The files->file_lock should be held on entry, and will be held on exit.
61930 */
61931-static int expand_files(struct files_struct *files, int nr)
61932+static int expand_files(struct files_struct *files, unsigned int nr)
61933 {
61934 struct fdtable *fdt;
61935
61936@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
61937 if (!file)
61938 return __close_fd(files, fd);
61939
61940+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
61941 if (fd >= rlimit(RLIMIT_NOFILE))
61942 return -EBADF;
61943
61944@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
61945 if (unlikely(oldfd == newfd))
61946 return -EINVAL;
61947
61948+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
61949 if (newfd >= rlimit(RLIMIT_NOFILE))
61950 return -EBADF;
61951
61952@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
61953 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
61954 {
61955 int err;
61956+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
61957 if (from >= rlimit(RLIMIT_NOFILE))
61958 return -EINVAL;
61959 err = alloc_fd(from, flags);
61960diff --git a/fs/filesystems.c b/fs/filesystems.c
61961index 5797d45..7d7d79a 100644
61962--- a/fs/filesystems.c
61963+++ b/fs/filesystems.c
61964@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
61965 int len = dot ? dot - name : strlen(name);
61966
61967 fs = __get_fs_type(name, len);
61968+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61969+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
61970+#else
61971 if (!fs && (request_module("fs-%.*s", len, name) == 0))
61972+#endif
61973 fs = __get_fs_type(name, len);
61974
61975 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
61976diff --git a/fs/fs_struct.c b/fs/fs_struct.c
61977index 7dca743..2f2786d 100644
61978--- a/fs/fs_struct.c
61979+++ b/fs/fs_struct.c
61980@@ -4,6 +4,7 @@
61981 #include <linux/path.h>
61982 #include <linux/slab.h>
61983 #include <linux/fs_struct.h>
61984+#include <linux/grsecurity.h>
61985 #include "internal.h"
61986
61987 /*
61988@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
61989 struct path old_root;
61990
61991 path_get(path);
61992+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
61993 spin_lock(&fs->lock);
61994 write_seqcount_begin(&fs->seq);
61995 old_root = fs->root;
61996 fs->root = *path;
61997+ gr_set_chroot_entries(current, path);
61998 write_seqcount_end(&fs->seq);
61999 spin_unlock(&fs->lock);
62000- if (old_root.dentry)
62001+ if (old_root.dentry) {
62002+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
62003 path_put(&old_root);
62004+ }
62005 }
62006
62007 /*
62008@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62009 int hits = 0;
62010 spin_lock(&fs->lock);
62011 write_seqcount_begin(&fs->seq);
62012+ /* this root replacement is only done by pivot_root,
62013+ leave grsec's chroot tagging alone for this task
62014+ so that a pivoted root isn't treated as a chroot
62015+ */
62016 hits += replace_path(&fs->root, old_root, new_root);
62017 hits += replace_path(&fs->pwd, old_root, new_root);
62018 write_seqcount_end(&fs->seq);
62019@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62020
62021 void free_fs_struct(struct fs_struct *fs)
62022 {
62023+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
62024 path_put(&fs->root);
62025 path_put(&fs->pwd);
62026 kmem_cache_free(fs_cachep, fs);
62027@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
62028 task_lock(tsk);
62029 spin_lock(&fs->lock);
62030 tsk->fs = NULL;
62031- kill = !--fs->users;
62032+ gr_clear_chroot_entries(tsk);
62033+ kill = !atomic_dec_return(&fs->users);
62034 spin_unlock(&fs->lock);
62035 task_unlock(tsk);
62036 if (kill)
62037@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62038 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62039 /* We don't need to lock fs - think why ;-) */
62040 if (fs) {
62041- fs->users = 1;
62042+ atomic_set(&fs->users, 1);
62043 fs->in_exec = 0;
62044 spin_lock_init(&fs->lock);
62045 seqcount_init(&fs->seq);
62046@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62047 spin_lock(&old->lock);
62048 fs->root = old->root;
62049 path_get(&fs->root);
62050+ /* instead of calling gr_set_chroot_entries here,
62051+ we call it from every caller of this function
62052+ */
62053 fs->pwd = old->pwd;
62054 path_get(&fs->pwd);
62055 spin_unlock(&old->lock);
62056@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
62057
62058 task_lock(current);
62059 spin_lock(&fs->lock);
62060- kill = !--fs->users;
62061+ kill = !atomic_dec_return(&fs->users);
62062 current->fs = new_fs;
62063+ gr_set_chroot_entries(current, &new_fs->root);
62064 spin_unlock(&fs->lock);
62065 task_unlock(current);
62066
62067@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
62068
62069 int current_umask(void)
62070 {
62071- return current->fs->umask;
62072+ return current->fs->umask | gr_acl_umask();
62073 }
62074 EXPORT_SYMBOL(current_umask);
62075
62076 /* to be mentioned only in INIT_TASK */
62077 struct fs_struct init_fs = {
62078- .users = 1,
62079+ .users = ATOMIC_INIT(1),
62080 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
62081 .seq = SEQCNT_ZERO(init_fs.seq),
62082 .umask = 0022,
62083diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
62084index 89acec7..a575262 100644
62085--- a/fs/fscache/cookie.c
62086+++ b/fs/fscache/cookie.c
62087@@ -19,7 +19,7 @@
62088
62089 struct kmem_cache *fscache_cookie_jar;
62090
62091-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62092+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62093
62094 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62095 static int fscache_alloc_object(struct fscache_cache *cache,
62096@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62097 parent ? (char *) parent->def->name : "<no-parent>",
62098 def->name, netfs_data, enable);
62099
62100- fscache_stat(&fscache_n_acquires);
62101+ fscache_stat_unchecked(&fscache_n_acquires);
62102
62103 /* if there's no parent cookie, then we don't create one here either */
62104 if (!parent) {
62105- fscache_stat(&fscache_n_acquires_null);
62106+ fscache_stat_unchecked(&fscache_n_acquires_null);
62107 _leave(" [no parent]");
62108 return NULL;
62109 }
62110@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62111 /* allocate and initialise a cookie */
62112 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62113 if (!cookie) {
62114- fscache_stat(&fscache_n_acquires_oom);
62115+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62116 _leave(" [ENOMEM]");
62117 return NULL;
62118 }
62119@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62120
62121 switch (cookie->def->type) {
62122 case FSCACHE_COOKIE_TYPE_INDEX:
62123- fscache_stat(&fscache_n_cookie_index);
62124+ fscache_stat_unchecked(&fscache_n_cookie_index);
62125 break;
62126 case FSCACHE_COOKIE_TYPE_DATAFILE:
62127- fscache_stat(&fscache_n_cookie_data);
62128+ fscache_stat_unchecked(&fscache_n_cookie_data);
62129 break;
62130 default:
62131- fscache_stat(&fscache_n_cookie_special);
62132+ fscache_stat_unchecked(&fscache_n_cookie_special);
62133 break;
62134 }
62135
62136@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62137 } else {
62138 atomic_dec(&parent->n_children);
62139 __fscache_cookie_put(cookie);
62140- fscache_stat(&fscache_n_acquires_nobufs);
62141+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62142 _leave(" = NULL");
62143 return NULL;
62144 }
62145@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62146 }
62147 }
62148
62149- fscache_stat(&fscache_n_acquires_ok);
62150+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62151 _leave(" = %p", cookie);
62152 return cookie;
62153 }
62154@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62155 cache = fscache_select_cache_for_object(cookie->parent);
62156 if (!cache) {
62157 up_read(&fscache_addremove_sem);
62158- fscache_stat(&fscache_n_acquires_no_cache);
62159+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62160 _leave(" = -ENOMEDIUM [no cache]");
62161 return -ENOMEDIUM;
62162 }
62163@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62164 object = cache->ops->alloc_object(cache, cookie);
62165 fscache_stat_d(&fscache_n_cop_alloc_object);
62166 if (IS_ERR(object)) {
62167- fscache_stat(&fscache_n_object_no_alloc);
62168+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62169 ret = PTR_ERR(object);
62170 goto error;
62171 }
62172
62173- fscache_stat(&fscache_n_object_alloc);
62174+ fscache_stat_unchecked(&fscache_n_object_alloc);
62175
62176- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62177+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62178
62179 _debug("ALLOC OBJ%x: %s {%lx}",
62180 object->debug_id, cookie->def->name, object->events);
62181@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62182
62183 _enter("{%s}", cookie->def->name);
62184
62185- fscache_stat(&fscache_n_invalidates);
62186+ fscache_stat_unchecked(&fscache_n_invalidates);
62187
62188 /* Only permit invalidation of data files. Invalidating an index will
62189 * require the caller to release all its attachments to the tree rooted
62190@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62191 {
62192 struct fscache_object *object;
62193
62194- fscache_stat(&fscache_n_updates);
62195+ fscache_stat_unchecked(&fscache_n_updates);
62196
62197 if (!cookie) {
62198- fscache_stat(&fscache_n_updates_null);
62199+ fscache_stat_unchecked(&fscache_n_updates_null);
62200 _leave(" [no cookie]");
62201 return;
62202 }
62203@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62204 */
62205 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62206 {
62207- fscache_stat(&fscache_n_relinquishes);
62208+ fscache_stat_unchecked(&fscache_n_relinquishes);
62209 if (retire)
62210- fscache_stat(&fscache_n_relinquishes_retire);
62211+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62212
62213 if (!cookie) {
62214- fscache_stat(&fscache_n_relinquishes_null);
62215+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62216 _leave(" [no cookie]");
62217 return;
62218 }
62219@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62220 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62221 goto inconsistent;
62222
62223- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62224+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62225
62226 __fscache_use_cookie(cookie);
62227 if (fscache_submit_op(object, op) < 0)
62228diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62229index 7872a62..d91b19f 100644
62230--- a/fs/fscache/internal.h
62231+++ b/fs/fscache/internal.h
62232@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62233 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62234 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62235 struct fscache_operation *,
62236- atomic_t *,
62237- atomic_t *,
62238+ atomic_unchecked_t *,
62239+ atomic_unchecked_t *,
62240 void (*)(struct fscache_operation *));
62241 extern void fscache_invalidate_writes(struct fscache_cookie *);
62242
62243@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62244 * stats.c
62245 */
62246 #ifdef CONFIG_FSCACHE_STATS
62247-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62248-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62249+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62250+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62251
62252-extern atomic_t fscache_n_op_pend;
62253-extern atomic_t fscache_n_op_run;
62254-extern atomic_t fscache_n_op_enqueue;
62255-extern atomic_t fscache_n_op_deferred_release;
62256-extern atomic_t fscache_n_op_release;
62257-extern atomic_t fscache_n_op_gc;
62258-extern atomic_t fscache_n_op_cancelled;
62259-extern atomic_t fscache_n_op_rejected;
62260+extern atomic_unchecked_t fscache_n_op_pend;
62261+extern atomic_unchecked_t fscache_n_op_run;
62262+extern atomic_unchecked_t fscache_n_op_enqueue;
62263+extern atomic_unchecked_t fscache_n_op_deferred_release;
62264+extern atomic_unchecked_t fscache_n_op_release;
62265+extern atomic_unchecked_t fscache_n_op_gc;
62266+extern atomic_unchecked_t fscache_n_op_cancelled;
62267+extern atomic_unchecked_t fscache_n_op_rejected;
62268
62269-extern atomic_t fscache_n_attr_changed;
62270-extern atomic_t fscache_n_attr_changed_ok;
62271-extern atomic_t fscache_n_attr_changed_nobufs;
62272-extern atomic_t fscache_n_attr_changed_nomem;
62273-extern atomic_t fscache_n_attr_changed_calls;
62274+extern atomic_unchecked_t fscache_n_attr_changed;
62275+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62276+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62277+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62278+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62279
62280-extern atomic_t fscache_n_allocs;
62281-extern atomic_t fscache_n_allocs_ok;
62282-extern atomic_t fscache_n_allocs_wait;
62283-extern atomic_t fscache_n_allocs_nobufs;
62284-extern atomic_t fscache_n_allocs_intr;
62285-extern atomic_t fscache_n_allocs_object_dead;
62286-extern atomic_t fscache_n_alloc_ops;
62287-extern atomic_t fscache_n_alloc_op_waits;
62288+extern atomic_unchecked_t fscache_n_allocs;
62289+extern atomic_unchecked_t fscache_n_allocs_ok;
62290+extern atomic_unchecked_t fscache_n_allocs_wait;
62291+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62292+extern atomic_unchecked_t fscache_n_allocs_intr;
62293+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62294+extern atomic_unchecked_t fscache_n_alloc_ops;
62295+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62296
62297-extern atomic_t fscache_n_retrievals;
62298-extern atomic_t fscache_n_retrievals_ok;
62299-extern atomic_t fscache_n_retrievals_wait;
62300-extern atomic_t fscache_n_retrievals_nodata;
62301-extern atomic_t fscache_n_retrievals_nobufs;
62302-extern atomic_t fscache_n_retrievals_intr;
62303-extern atomic_t fscache_n_retrievals_nomem;
62304-extern atomic_t fscache_n_retrievals_object_dead;
62305-extern atomic_t fscache_n_retrieval_ops;
62306-extern atomic_t fscache_n_retrieval_op_waits;
62307+extern atomic_unchecked_t fscache_n_retrievals;
62308+extern atomic_unchecked_t fscache_n_retrievals_ok;
62309+extern atomic_unchecked_t fscache_n_retrievals_wait;
62310+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62311+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62312+extern atomic_unchecked_t fscache_n_retrievals_intr;
62313+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62314+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62315+extern atomic_unchecked_t fscache_n_retrieval_ops;
62316+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62317
62318-extern atomic_t fscache_n_stores;
62319-extern atomic_t fscache_n_stores_ok;
62320-extern atomic_t fscache_n_stores_again;
62321-extern atomic_t fscache_n_stores_nobufs;
62322-extern atomic_t fscache_n_stores_oom;
62323-extern atomic_t fscache_n_store_ops;
62324-extern atomic_t fscache_n_store_calls;
62325-extern atomic_t fscache_n_store_pages;
62326-extern atomic_t fscache_n_store_radix_deletes;
62327-extern atomic_t fscache_n_store_pages_over_limit;
62328+extern atomic_unchecked_t fscache_n_stores;
62329+extern atomic_unchecked_t fscache_n_stores_ok;
62330+extern atomic_unchecked_t fscache_n_stores_again;
62331+extern atomic_unchecked_t fscache_n_stores_nobufs;
62332+extern atomic_unchecked_t fscache_n_stores_oom;
62333+extern atomic_unchecked_t fscache_n_store_ops;
62334+extern atomic_unchecked_t fscache_n_store_calls;
62335+extern atomic_unchecked_t fscache_n_store_pages;
62336+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62337+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62338
62339-extern atomic_t fscache_n_store_vmscan_not_storing;
62340-extern atomic_t fscache_n_store_vmscan_gone;
62341-extern atomic_t fscache_n_store_vmscan_busy;
62342-extern atomic_t fscache_n_store_vmscan_cancelled;
62343-extern atomic_t fscache_n_store_vmscan_wait;
62344+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62345+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62346+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62347+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62348+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62349
62350-extern atomic_t fscache_n_marks;
62351-extern atomic_t fscache_n_uncaches;
62352+extern atomic_unchecked_t fscache_n_marks;
62353+extern atomic_unchecked_t fscache_n_uncaches;
62354
62355-extern atomic_t fscache_n_acquires;
62356-extern atomic_t fscache_n_acquires_null;
62357-extern atomic_t fscache_n_acquires_no_cache;
62358-extern atomic_t fscache_n_acquires_ok;
62359-extern atomic_t fscache_n_acquires_nobufs;
62360-extern atomic_t fscache_n_acquires_oom;
62361+extern atomic_unchecked_t fscache_n_acquires;
62362+extern atomic_unchecked_t fscache_n_acquires_null;
62363+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62364+extern atomic_unchecked_t fscache_n_acquires_ok;
62365+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62366+extern atomic_unchecked_t fscache_n_acquires_oom;
62367
62368-extern atomic_t fscache_n_invalidates;
62369-extern atomic_t fscache_n_invalidates_run;
62370+extern atomic_unchecked_t fscache_n_invalidates;
62371+extern atomic_unchecked_t fscache_n_invalidates_run;
62372
62373-extern atomic_t fscache_n_updates;
62374-extern atomic_t fscache_n_updates_null;
62375-extern atomic_t fscache_n_updates_run;
62376+extern atomic_unchecked_t fscache_n_updates;
62377+extern atomic_unchecked_t fscache_n_updates_null;
62378+extern atomic_unchecked_t fscache_n_updates_run;
62379
62380-extern atomic_t fscache_n_relinquishes;
62381-extern atomic_t fscache_n_relinquishes_null;
62382-extern atomic_t fscache_n_relinquishes_waitcrt;
62383-extern atomic_t fscache_n_relinquishes_retire;
62384+extern atomic_unchecked_t fscache_n_relinquishes;
62385+extern atomic_unchecked_t fscache_n_relinquishes_null;
62386+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62387+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62388
62389-extern atomic_t fscache_n_cookie_index;
62390-extern atomic_t fscache_n_cookie_data;
62391-extern atomic_t fscache_n_cookie_special;
62392+extern atomic_unchecked_t fscache_n_cookie_index;
62393+extern atomic_unchecked_t fscache_n_cookie_data;
62394+extern atomic_unchecked_t fscache_n_cookie_special;
62395
62396-extern atomic_t fscache_n_object_alloc;
62397-extern atomic_t fscache_n_object_no_alloc;
62398-extern atomic_t fscache_n_object_lookups;
62399-extern atomic_t fscache_n_object_lookups_negative;
62400-extern atomic_t fscache_n_object_lookups_positive;
62401-extern atomic_t fscache_n_object_lookups_timed_out;
62402-extern atomic_t fscache_n_object_created;
62403-extern atomic_t fscache_n_object_avail;
62404-extern atomic_t fscache_n_object_dead;
62405+extern atomic_unchecked_t fscache_n_object_alloc;
62406+extern atomic_unchecked_t fscache_n_object_no_alloc;
62407+extern atomic_unchecked_t fscache_n_object_lookups;
62408+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62409+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62410+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62411+extern atomic_unchecked_t fscache_n_object_created;
62412+extern atomic_unchecked_t fscache_n_object_avail;
62413+extern atomic_unchecked_t fscache_n_object_dead;
62414
62415-extern atomic_t fscache_n_checkaux_none;
62416-extern atomic_t fscache_n_checkaux_okay;
62417-extern atomic_t fscache_n_checkaux_update;
62418-extern atomic_t fscache_n_checkaux_obsolete;
62419+extern atomic_unchecked_t fscache_n_checkaux_none;
62420+extern atomic_unchecked_t fscache_n_checkaux_okay;
62421+extern atomic_unchecked_t fscache_n_checkaux_update;
62422+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62423
62424 extern atomic_t fscache_n_cop_alloc_object;
62425 extern atomic_t fscache_n_cop_lookup_object;
62426@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62427 atomic_inc(stat);
62428 }
62429
62430+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62431+{
62432+ atomic_inc_unchecked(stat);
62433+}
62434+
62435 static inline void fscache_stat_d(atomic_t *stat)
62436 {
62437 atomic_dec(stat);
62438@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62439
62440 #define __fscache_stat(stat) (NULL)
62441 #define fscache_stat(stat) do {} while (0)
62442+#define fscache_stat_unchecked(stat) do {} while (0)
62443 #define fscache_stat_d(stat) do {} while (0)
62444 #endif
62445
62446diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62447index da032da..0076ce7 100644
62448--- a/fs/fscache/object.c
62449+++ b/fs/fscache/object.c
62450@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62451 _debug("LOOKUP \"%s\" in \"%s\"",
62452 cookie->def->name, object->cache->tag->name);
62453
62454- fscache_stat(&fscache_n_object_lookups);
62455+ fscache_stat_unchecked(&fscache_n_object_lookups);
62456 fscache_stat(&fscache_n_cop_lookup_object);
62457 ret = object->cache->ops->lookup_object(object);
62458 fscache_stat_d(&fscache_n_cop_lookup_object);
62459@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62460 if (ret == -ETIMEDOUT) {
62461 /* probably stuck behind another object, so move this one to
62462 * the back of the queue */
62463- fscache_stat(&fscache_n_object_lookups_timed_out);
62464+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62465 _leave(" [timeout]");
62466 return NO_TRANSIT;
62467 }
62468@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62469 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62470
62471 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62472- fscache_stat(&fscache_n_object_lookups_negative);
62473+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62474
62475 /* Allow write requests to begin stacking up and read requests to begin
62476 * returning ENODATA.
62477@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62478 /* if we were still looking up, then we must have a positive lookup
62479 * result, in which case there may be data available */
62480 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62481- fscache_stat(&fscache_n_object_lookups_positive);
62482+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62483
62484 /* We do (presumably) have data */
62485 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62486@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62487 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62488 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62489 } else {
62490- fscache_stat(&fscache_n_object_created);
62491+ fscache_stat_unchecked(&fscache_n_object_created);
62492 }
62493
62494 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62495@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62496 fscache_stat_d(&fscache_n_cop_lookup_complete);
62497
62498 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62499- fscache_stat(&fscache_n_object_avail);
62500+ fscache_stat_unchecked(&fscache_n_object_avail);
62501
62502 _leave("");
62503 return transit_to(JUMPSTART_DEPS);
62504@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62505
62506 /* this just shifts the object release to the work processor */
62507 fscache_put_object(object);
62508- fscache_stat(&fscache_n_object_dead);
62509+ fscache_stat_unchecked(&fscache_n_object_dead);
62510
62511 _leave("");
62512 return transit_to(OBJECT_DEAD);
62513@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62514 enum fscache_checkaux result;
62515
62516 if (!object->cookie->def->check_aux) {
62517- fscache_stat(&fscache_n_checkaux_none);
62518+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62519 return FSCACHE_CHECKAUX_OKAY;
62520 }
62521
62522@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62523 switch (result) {
62524 /* entry okay as is */
62525 case FSCACHE_CHECKAUX_OKAY:
62526- fscache_stat(&fscache_n_checkaux_okay);
62527+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62528 break;
62529
62530 /* entry requires update */
62531 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62532- fscache_stat(&fscache_n_checkaux_update);
62533+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62534 break;
62535
62536 /* entry requires deletion */
62537 case FSCACHE_CHECKAUX_OBSOLETE:
62538- fscache_stat(&fscache_n_checkaux_obsolete);
62539+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62540 break;
62541
62542 default:
62543@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62544 {
62545 const struct fscache_state *s;
62546
62547- fscache_stat(&fscache_n_invalidates_run);
62548+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62549 fscache_stat(&fscache_n_cop_invalidate_object);
62550 s = _fscache_invalidate_object(object, event);
62551 fscache_stat_d(&fscache_n_cop_invalidate_object);
62552@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62553 {
62554 _enter("{OBJ%x},%d", object->debug_id, event);
62555
62556- fscache_stat(&fscache_n_updates_run);
62557+ fscache_stat_unchecked(&fscache_n_updates_run);
62558 fscache_stat(&fscache_n_cop_update_object);
62559 object->cache->ops->update_object(object);
62560 fscache_stat_d(&fscache_n_cop_update_object);
62561diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62562index e7b87a0..a85d47a 100644
62563--- a/fs/fscache/operation.c
62564+++ b/fs/fscache/operation.c
62565@@ -17,7 +17,7 @@
62566 #include <linux/slab.h>
62567 #include "internal.h"
62568
62569-atomic_t fscache_op_debug_id;
62570+atomic_unchecked_t fscache_op_debug_id;
62571 EXPORT_SYMBOL(fscache_op_debug_id);
62572
62573 /**
62574@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62575 ASSERTCMP(atomic_read(&op->usage), >, 0);
62576 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62577
62578- fscache_stat(&fscache_n_op_enqueue);
62579+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62580 switch (op->flags & FSCACHE_OP_TYPE) {
62581 case FSCACHE_OP_ASYNC:
62582 _debug("queue async");
62583@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62584 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62585 if (op->processor)
62586 fscache_enqueue_operation(op);
62587- fscache_stat(&fscache_n_op_run);
62588+ fscache_stat_unchecked(&fscache_n_op_run);
62589 }
62590
62591 /*
62592@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62593 if (object->n_in_progress > 0) {
62594 atomic_inc(&op->usage);
62595 list_add_tail(&op->pend_link, &object->pending_ops);
62596- fscache_stat(&fscache_n_op_pend);
62597+ fscache_stat_unchecked(&fscache_n_op_pend);
62598 } else if (!list_empty(&object->pending_ops)) {
62599 atomic_inc(&op->usage);
62600 list_add_tail(&op->pend_link, &object->pending_ops);
62601- fscache_stat(&fscache_n_op_pend);
62602+ fscache_stat_unchecked(&fscache_n_op_pend);
62603 fscache_start_operations(object);
62604 } else {
62605 ASSERTCMP(object->n_in_progress, ==, 0);
62606@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62607 object->n_exclusive++; /* reads and writes must wait */
62608 atomic_inc(&op->usage);
62609 list_add_tail(&op->pend_link, &object->pending_ops);
62610- fscache_stat(&fscache_n_op_pend);
62611+ fscache_stat_unchecked(&fscache_n_op_pend);
62612 ret = 0;
62613 } else {
62614 /* If we're in any other state, there must have been an I/O
62615@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62616 if (object->n_exclusive > 0) {
62617 atomic_inc(&op->usage);
62618 list_add_tail(&op->pend_link, &object->pending_ops);
62619- fscache_stat(&fscache_n_op_pend);
62620+ fscache_stat_unchecked(&fscache_n_op_pend);
62621 } else if (!list_empty(&object->pending_ops)) {
62622 atomic_inc(&op->usage);
62623 list_add_tail(&op->pend_link, &object->pending_ops);
62624- fscache_stat(&fscache_n_op_pend);
62625+ fscache_stat_unchecked(&fscache_n_op_pend);
62626 fscache_start_operations(object);
62627 } else {
62628 ASSERTCMP(object->n_exclusive, ==, 0);
62629@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62630 object->n_ops++;
62631 atomic_inc(&op->usage);
62632 list_add_tail(&op->pend_link, &object->pending_ops);
62633- fscache_stat(&fscache_n_op_pend);
62634+ fscache_stat_unchecked(&fscache_n_op_pend);
62635 ret = 0;
62636 } else if (fscache_object_is_dying(object)) {
62637- fscache_stat(&fscache_n_op_rejected);
62638+ fscache_stat_unchecked(&fscache_n_op_rejected);
62639 op->state = FSCACHE_OP_ST_CANCELLED;
62640 ret = -ENOBUFS;
62641 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62642@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62643 ret = -EBUSY;
62644 if (op->state == FSCACHE_OP_ST_PENDING) {
62645 ASSERT(!list_empty(&op->pend_link));
62646- fscache_stat(&fscache_n_op_cancelled);
62647+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62648 list_del_init(&op->pend_link);
62649 if (do_cancel)
62650 do_cancel(op);
62651@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62652 while (!list_empty(&object->pending_ops)) {
62653 op = list_entry(object->pending_ops.next,
62654 struct fscache_operation, pend_link);
62655- fscache_stat(&fscache_n_op_cancelled);
62656+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62657 list_del_init(&op->pend_link);
62658
62659 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62660@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62661 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62662 op->state = FSCACHE_OP_ST_DEAD;
62663
62664- fscache_stat(&fscache_n_op_release);
62665+ fscache_stat_unchecked(&fscache_n_op_release);
62666
62667 if (op->release) {
62668 op->release(op);
62669@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62670 * lock, and defer it otherwise */
62671 if (!spin_trylock(&object->lock)) {
62672 _debug("defer put");
62673- fscache_stat(&fscache_n_op_deferred_release);
62674+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62675
62676 cache = object->cache;
62677 spin_lock(&cache->op_gc_list_lock);
62678@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62679
62680 _debug("GC DEFERRED REL OBJ%x OP%x",
62681 object->debug_id, op->debug_id);
62682- fscache_stat(&fscache_n_op_gc);
62683+ fscache_stat_unchecked(&fscache_n_op_gc);
62684
62685 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62686 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62687diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62688index de33b3f..8be4d29 100644
62689--- a/fs/fscache/page.c
62690+++ b/fs/fscache/page.c
62691@@ -74,7 +74,7 @@ try_again:
62692 val = radix_tree_lookup(&cookie->stores, page->index);
62693 if (!val) {
62694 rcu_read_unlock();
62695- fscache_stat(&fscache_n_store_vmscan_not_storing);
62696+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62697 __fscache_uncache_page(cookie, page);
62698 return true;
62699 }
62700@@ -104,11 +104,11 @@ try_again:
62701 spin_unlock(&cookie->stores_lock);
62702
62703 if (xpage) {
62704- fscache_stat(&fscache_n_store_vmscan_cancelled);
62705- fscache_stat(&fscache_n_store_radix_deletes);
62706+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62707+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62708 ASSERTCMP(xpage, ==, page);
62709 } else {
62710- fscache_stat(&fscache_n_store_vmscan_gone);
62711+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62712 }
62713
62714 wake_up_bit(&cookie->flags, 0);
62715@@ -123,11 +123,11 @@ page_busy:
62716 * sleeping on memory allocation, so we may need to impose a timeout
62717 * too. */
62718 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
62719- fscache_stat(&fscache_n_store_vmscan_busy);
62720+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
62721 return false;
62722 }
62723
62724- fscache_stat(&fscache_n_store_vmscan_wait);
62725+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
62726 if (!release_page_wait_timeout(cookie, page))
62727 _debug("fscache writeout timeout page: %p{%lx}",
62728 page, page->index);
62729@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
62730 FSCACHE_COOKIE_STORING_TAG);
62731 if (!radix_tree_tag_get(&cookie->stores, page->index,
62732 FSCACHE_COOKIE_PENDING_TAG)) {
62733- fscache_stat(&fscache_n_store_radix_deletes);
62734+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62735 xpage = radix_tree_delete(&cookie->stores, page->index);
62736 }
62737 spin_unlock(&cookie->stores_lock);
62738@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
62739
62740 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
62741
62742- fscache_stat(&fscache_n_attr_changed_calls);
62743+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
62744
62745 if (fscache_object_is_active(object)) {
62746 fscache_stat(&fscache_n_cop_attr_changed);
62747@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62748
62749 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62750
62751- fscache_stat(&fscache_n_attr_changed);
62752+ fscache_stat_unchecked(&fscache_n_attr_changed);
62753
62754 op = kzalloc(sizeof(*op), GFP_KERNEL);
62755 if (!op) {
62756- fscache_stat(&fscache_n_attr_changed_nomem);
62757+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
62758 _leave(" = -ENOMEM");
62759 return -ENOMEM;
62760 }
62761@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62762 if (fscache_submit_exclusive_op(object, op) < 0)
62763 goto nobufs_dec;
62764 spin_unlock(&cookie->lock);
62765- fscache_stat(&fscache_n_attr_changed_ok);
62766+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
62767 fscache_put_operation(op);
62768 _leave(" = 0");
62769 return 0;
62770@@ -242,7 +242,7 @@ nobufs:
62771 kfree(op);
62772 if (wake_cookie)
62773 __fscache_wake_unused_cookie(cookie);
62774- fscache_stat(&fscache_n_attr_changed_nobufs);
62775+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
62776 _leave(" = %d", -ENOBUFS);
62777 return -ENOBUFS;
62778 }
62779@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
62780 /* allocate a retrieval operation and attempt to submit it */
62781 op = kzalloc(sizeof(*op), GFP_NOIO);
62782 if (!op) {
62783- fscache_stat(&fscache_n_retrievals_nomem);
62784+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62785 return NULL;
62786 }
62787
62788@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
62789 return 0;
62790 }
62791
62792- fscache_stat(&fscache_n_retrievals_wait);
62793+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
62794
62795 jif = jiffies;
62796 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
62797 TASK_INTERRUPTIBLE) != 0) {
62798- fscache_stat(&fscache_n_retrievals_intr);
62799+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62800 _leave(" = -ERESTARTSYS");
62801 return -ERESTARTSYS;
62802 }
62803@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
62804 */
62805 int fscache_wait_for_operation_activation(struct fscache_object *object,
62806 struct fscache_operation *op,
62807- atomic_t *stat_op_waits,
62808- atomic_t *stat_object_dead,
62809+ atomic_unchecked_t *stat_op_waits,
62810+ atomic_unchecked_t *stat_object_dead,
62811 void (*do_cancel)(struct fscache_operation *))
62812 {
62813 int ret;
62814@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62815
62816 _debug(">>> WT");
62817 if (stat_op_waits)
62818- fscache_stat(stat_op_waits);
62819+ fscache_stat_unchecked(stat_op_waits);
62820 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
62821 TASK_INTERRUPTIBLE) != 0) {
62822 ret = fscache_cancel_op(op, do_cancel);
62823@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62824 check_if_dead:
62825 if (op->state == FSCACHE_OP_ST_CANCELLED) {
62826 if (stat_object_dead)
62827- fscache_stat(stat_object_dead);
62828+ fscache_stat_unchecked(stat_object_dead);
62829 _leave(" = -ENOBUFS [cancelled]");
62830 return -ENOBUFS;
62831 }
62832@@ -381,7 +381,7 @@ check_if_dead:
62833 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
62834 fscache_cancel_op(op, do_cancel);
62835 if (stat_object_dead)
62836- fscache_stat(stat_object_dead);
62837+ fscache_stat_unchecked(stat_object_dead);
62838 return -ENOBUFS;
62839 }
62840 return 0;
62841@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62842
62843 _enter("%p,%p,,,", cookie, page);
62844
62845- fscache_stat(&fscache_n_retrievals);
62846+ fscache_stat_unchecked(&fscache_n_retrievals);
62847
62848 if (hlist_empty(&cookie->backing_objects))
62849 goto nobufs;
62850@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62851 goto nobufs_unlock_dec;
62852 spin_unlock(&cookie->lock);
62853
62854- fscache_stat(&fscache_n_retrieval_ops);
62855+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62856
62857 /* pin the netfs read context in case we need to do the actual netfs
62858 * read because we've encountered a cache read failure */
62859@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62860
62861 error:
62862 if (ret == -ENOMEM)
62863- fscache_stat(&fscache_n_retrievals_nomem);
62864+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62865 else if (ret == -ERESTARTSYS)
62866- fscache_stat(&fscache_n_retrievals_intr);
62867+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62868 else if (ret == -ENODATA)
62869- fscache_stat(&fscache_n_retrievals_nodata);
62870+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62871 else if (ret < 0)
62872- fscache_stat(&fscache_n_retrievals_nobufs);
62873+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62874 else
62875- fscache_stat(&fscache_n_retrievals_ok);
62876+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62877
62878 fscache_put_retrieval(op);
62879 _leave(" = %d", ret);
62880@@ -505,7 +505,7 @@ nobufs_unlock:
62881 __fscache_wake_unused_cookie(cookie);
62882 kfree(op);
62883 nobufs:
62884- fscache_stat(&fscache_n_retrievals_nobufs);
62885+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62886 _leave(" = -ENOBUFS");
62887 return -ENOBUFS;
62888 }
62889@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62890
62891 _enter("%p,,%d,,,", cookie, *nr_pages);
62892
62893- fscache_stat(&fscache_n_retrievals);
62894+ fscache_stat_unchecked(&fscache_n_retrievals);
62895
62896 if (hlist_empty(&cookie->backing_objects))
62897 goto nobufs;
62898@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62899 goto nobufs_unlock_dec;
62900 spin_unlock(&cookie->lock);
62901
62902- fscache_stat(&fscache_n_retrieval_ops);
62903+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62904
62905 /* pin the netfs read context in case we need to do the actual netfs
62906 * read because we've encountered a cache read failure */
62907@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62908
62909 error:
62910 if (ret == -ENOMEM)
62911- fscache_stat(&fscache_n_retrievals_nomem);
62912+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62913 else if (ret == -ERESTARTSYS)
62914- fscache_stat(&fscache_n_retrievals_intr);
62915+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62916 else if (ret == -ENODATA)
62917- fscache_stat(&fscache_n_retrievals_nodata);
62918+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62919 else if (ret < 0)
62920- fscache_stat(&fscache_n_retrievals_nobufs);
62921+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62922 else
62923- fscache_stat(&fscache_n_retrievals_ok);
62924+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62925
62926 fscache_put_retrieval(op);
62927 _leave(" = %d", ret);
62928@@ -636,7 +636,7 @@ nobufs_unlock:
62929 if (wake_cookie)
62930 __fscache_wake_unused_cookie(cookie);
62931 nobufs:
62932- fscache_stat(&fscache_n_retrievals_nobufs);
62933+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62934 _leave(" = -ENOBUFS");
62935 return -ENOBUFS;
62936 }
62937@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62938
62939 _enter("%p,%p,,,", cookie, page);
62940
62941- fscache_stat(&fscache_n_allocs);
62942+ fscache_stat_unchecked(&fscache_n_allocs);
62943
62944 if (hlist_empty(&cookie->backing_objects))
62945 goto nobufs;
62946@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62947 goto nobufs_unlock_dec;
62948 spin_unlock(&cookie->lock);
62949
62950- fscache_stat(&fscache_n_alloc_ops);
62951+ fscache_stat_unchecked(&fscache_n_alloc_ops);
62952
62953 ret = fscache_wait_for_operation_activation(
62954 object, &op->op,
62955@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62956
62957 error:
62958 if (ret == -ERESTARTSYS)
62959- fscache_stat(&fscache_n_allocs_intr);
62960+ fscache_stat_unchecked(&fscache_n_allocs_intr);
62961 else if (ret < 0)
62962- fscache_stat(&fscache_n_allocs_nobufs);
62963+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62964 else
62965- fscache_stat(&fscache_n_allocs_ok);
62966+ fscache_stat_unchecked(&fscache_n_allocs_ok);
62967
62968 fscache_put_retrieval(op);
62969 _leave(" = %d", ret);
62970@@ -730,7 +730,7 @@ nobufs_unlock:
62971 if (wake_cookie)
62972 __fscache_wake_unused_cookie(cookie);
62973 nobufs:
62974- fscache_stat(&fscache_n_allocs_nobufs);
62975+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62976 _leave(" = -ENOBUFS");
62977 return -ENOBUFS;
62978 }
62979@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62980
62981 spin_lock(&cookie->stores_lock);
62982
62983- fscache_stat(&fscache_n_store_calls);
62984+ fscache_stat_unchecked(&fscache_n_store_calls);
62985
62986 /* find a page to store */
62987 page = NULL;
62988@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62989 page = results[0];
62990 _debug("gang %d [%lx]", n, page->index);
62991 if (page->index > op->store_limit) {
62992- fscache_stat(&fscache_n_store_pages_over_limit);
62993+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
62994 goto superseded;
62995 }
62996
62997@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62998 spin_unlock(&cookie->stores_lock);
62999 spin_unlock(&object->lock);
63000
63001- fscache_stat(&fscache_n_store_pages);
63002+ fscache_stat_unchecked(&fscache_n_store_pages);
63003 fscache_stat(&fscache_n_cop_write_page);
63004 ret = object->cache->ops->write_page(op, page);
63005 fscache_stat_d(&fscache_n_cop_write_page);
63006@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63007 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63008 ASSERT(PageFsCache(page));
63009
63010- fscache_stat(&fscache_n_stores);
63011+ fscache_stat_unchecked(&fscache_n_stores);
63012
63013 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63014 _leave(" = -ENOBUFS [invalidating]");
63015@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63016 spin_unlock(&cookie->stores_lock);
63017 spin_unlock(&object->lock);
63018
63019- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63020+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63021 op->store_limit = object->store_limit;
63022
63023 __fscache_use_cookie(cookie);
63024@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63025
63026 spin_unlock(&cookie->lock);
63027 radix_tree_preload_end();
63028- fscache_stat(&fscache_n_store_ops);
63029- fscache_stat(&fscache_n_stores_ok);
63030+ fscache_stat_unchecked(&fscache_n_store_ops);
63031+ fscache_stat_unchecked(&fscache_n_stores_ok);
63032
63033 /* the work queue now carries its own ref on the object */
63034 fscache_put_operation(&op->op);
63035@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63036 return 0;
63037
63038 already_queued:
63039- fscache_stat(&fscache_n_stores_again);
63040+ fscache_stat_unchecked(&fscache_n_stores_again);
63041 already_pending:
63042 spin_unlock(&cookie->stores_lock);
63043 spin_unlock(&object->lock);
63044 spin_unlock(&cookie->lock);
63045 radix_tree_preload_end();
63046 kfree(op);
63047- fscache_stat(&fscache_n_stores_ok);
63048+ fscache_stat_unchecked(&fscache_n_stores_ok);
63049 _leave(" = 0");
63050 return 0;
63051
63052@@ -1039,14 +1039,14 @@ nobufs:
63053 kfree(op);
63054 if (wake_cookie)
63055 __fscache_wake_unused_cookie(cookie);
63056- fscache_stat(&fscache_n_stores_nobufs);
63057+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
63058 _leave(" = -ENOBUFS");
63059 return -ENOBUFS;
63060
63061 nomem_free:
63062 kfree(op);
63063 nomem:
63064- fscache_stat(&fscache_n_stores_oom);
63065+ fscache_stat_unchecked(&fscache_n_stores_oom);
63066 _leave(" = -ENOMEM");
63067 return -ENOMEM;
63068 }
63069@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
63070 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63071 ASSERTCMP(page, !=, NULL);
63072
63073- fscache_stat(&fscache_n_uncaches);
63074+ fscache_stat_unchecked(&fscache_n_uncaches);
63075
63076 /* cache withdrawal may beat us to it */
63077 if (!PageFsCache(page))
63078@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
63079 struct fscache_cookie *cookie = op->op.object->cookie;
63080
63081 #ifdef CONFIG_FSCACHE_STATS
63082- atomic_inc(&fscache_n_marks);
63083+ atomic_inc_unchecked(&fscache_n_marks);
63084 #endif
63085
63086 _debug("- mark %p{%lx}", page, page->index);
63087diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63088index 40d13c7..ddf52b9 100644
63089--- a/fs/fscache/stats.c
63090+++ b/fs/fscache/stats.c
63091@@ -18,99 +18,99 @@
63092 /*
63093 * operation counters
63094 */
63095-atomic_t fscache_n_op_pend;
63096-atomic_t fscache_n_op_run;
63097-atomic_t fscache_n_op_enqueue;
63098-atomic_t fscache_n_op_requeue;
63099-atomic_t fscache_n_op_deferred_release;
63100-atomic_t fscache_n_op_release;
63101-atomic_t fscache_n_op_gc;
63102-atomic_t fscache_n_op_cancelled;
63103-atomic_t fscache_n_op_rejected;
63104+atomic_unchecked_t fscache_n_op_pend;
63105+atomic_unchecked_t fscache_n_op_run;
63106+atomic_unchecked_t fscache_n_op_enqueue;
63107+atomic_unchecked_t fscache_n_op_requeue;
63108+atomic_unchecked_t fscache_n_op_deferred_release;
63109+atomic_unchecked_t fscache_n_op_release;
63110+atomic_unchecked_t fscache_n_op_gc;
63111+atomic_unchecked_t fscache_n_op_cancelled;
63112+atomic_unchecked_t fscache_n_op_rejected;
63113
63114-atomic_t fscache_n_attr_changed;
63115-atomic_t fscache_n_attr_changed_ok;
63116-atomic_t fscache_n_attr_changed_nobufs;
63117-atomic_t fscache_n_attr_changed_nomem;
63118-atomic_t fscache_n_attr_changed_calls;
63119+atomic_unchecked_t fscache_n_attr_changed;
63120+atomic_unchecked_t fscache_n_attr_changed_ok;
63121+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63122+atomic_unchecked_t fscache_n_attr_changed_nomem;
63123+atomic_unchecked_t fscache_n_attr_changed_calls;
63124
63125-atomic_t fscache_n_allocs;
63126-atomic_t fscache_n_allocs_ok;
63127-atomic_t fscache_n_allocs_wait;
63128-atomic_t fscache_n_allocs_nobufs;
63129-atomic_t fscache_n_allocs_intr;
63130-atomic_t fscache_n_allocs_object_dead;
63131-atomic_t fscache_n_alloc_ops;
63132-atomic_t fscache_n_alloc_op_waits;
63133+atomic_unchecked_t fscache_n_allocs;
63134+atomic_unchecked_t fscache_n_allocs_ok;
63135+atomic_unchecked_t fscache_n_allocs_wait;
63136+atomic_unchecked_t fscache_n_allocs_nobufs;
63137+atomic_unchecked_t fscache_n_allocs_intr;
63138+atomic_unchecked_t fscache_n_allocs_object_dead;
63139+atomic_unchecked_t fscache_n_alloc_ops;
63140+atomic_unchecked_t fscache_n_alloc_op_waits;
63141
63142-atomic_t fscache_n_retrievals;
63143-atomic_t fscache_n_retrievals_ok;
63144-atomic_t fscache_n_retrievals_wait;
63145-atomic_t fscache_n_retrievals_nodata;
63146-atomic_t fscache_n_retrievals_nobufs;
63147-atomic_t fscache_n_retrievals_intr;
63148-atomic_t fscache_n_retrievals_nomem;
63149-atomic_t fscache_n_retrievals_object_dead;
63150-atomic_t fscache_n_retrieval_ops;
63151-atomic_t fscache_n_retrieval_op_waits;
63152+atomic_unchecked_t fscache_n_retrievals;
63153+atomic_unchecked_t fscache_n_retrievals_ok;
63154+atomic_unchecked_t fscache_n_retrievals_wait;
63155+atomic_unchecked_t fscache_n_retrievals_nodata;
63156+atomic_unchecked_t fscache_n_retrievals_nobufs;
63157+atomic_unchecked_t fscache_n_retrievals_intr;
63158+atomic_unchecked_t fscache_n_retrievals_nomem;
63159+atomic_unchecked_t fscache_n_retrievals_object_dead;
63160+atomic_unchecked_t fscache_n_retrieval_ops;
63161+atomic_unchecked_t fscache_n_retrieval_op_waits;
63162
63163-atomic_t fscache_n_stores;
63164-atomic_t fscache_n_stores_ok;
63165-atomic_t fscache_n_stores_again;
63166-atomic_t fscache_n_stores_nobufs;
63167-atomic_t fscache_n_stores_oom;
63168-atomic_t fscache_n_store_ops;
63169-atomic_t fscache_n_store_calls;
63170-atomic_t fscache_n_store_pages;
63171-atomic_t fscache_n_store_radix_deletes;
63172-atomic_t fscache_n_store_pages_over_limit;
63173+atomic_unchecked_t fscache_n_stores;
63174+atomic_unchecked_t fscache_n_stores_ok;
63175+atomic_unchecked_t fscache_n_stores_again;
63176+atomic_unchecked_t fscache_n_stores_nobufs;
63177+atomic_unchecked_t fscache_n_stores_oom;
63178+atomic_unchecked_t fscache_n_store_ops;
63179+atomic_unchecked_t fscache_n_store_calls;
63180+atomic_unchecked_t fscache_n_store_pages;
63181+atomic_unchecked_t fscache_n_store_radix_deletes;
63182+atomic_unchecked_t fscache_n_store_pages_over_limit;
63183
63184-atomic_t fscache_n_store_vmscan_not_storing;
63185-atomic_t fscache_n_store_vmscan_gone;
63186-atomic_t fscache_n_store_vmscan_busy;
63187-atomic_t fscache_n_store_vmscan_cancelled;
63188-atomic_t fscache_n_store_vmscan_wait;
63189+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63190+atomic_unchecked_t fscache_n_store_vmscan_gone;
63191+atomic_unchecked_t fscache_n_store_vmscan_busy;
63192+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63193+atomic_unchecked_t fscache_n_store_vmscan_wait;
63194
63195-atomic_t fscache_n_marks;
63196-atomic_t fscache_n_uncaches;
63197+atomic_unchecked_t fscache_n_marks;
63198+atomic_unchecked_t fscache_n_uncaches;
63199
63200-atomic_t fscache_n_acquires;
63201-atomic_t fscache_n_acquires_null;
63202-atomic_t fscache_n_acquires_no_cache;
63203-atomic_t fscache_n_acquires_ok;
63204-atomic_t fscache_n_acquires_nobufs;
63205-atomic_t fscache_n_acquires_oom;
63206+atomic_unchecked_t fscache_n_acquires;
63207+atomic_unchecked_t fscache_n_acquires_null;
63208+atomic_unchecked_t fscache_n_acquires_no_cache;
63209+atomic_unchecked_t fscache_n_acquires_ok;
63210+atomic_unchecked_t fscache_n_acquires_nobufs;
63211+atomic_unchecked_t fscache_n_acquires_oom;
63212
63213-atomic_t fscache_n_invalidates;
63214-atomic_t fscache_n_invalidates_run;
63215+atomic_unchecked_t fscache_n_invalidates;
63216+atomic_unchecked_t fscache_n_invalidates_run;
63217
63218-atomic_t fscache_n_updates;
63219-atomic_t fscache_n_updates_null;
63220-atomic_t fscache_n_updates_run;
63221+atomic_unchecked_t fscache_n_updates;
63222+atomic_unchecked_t fscache_n_updates_null;
63223+atomic_unchecked_t fscache_n_updates_run;
63224
63225-atomic_t fscache_n_relinquishes;
63226-atomic_t fscache_n_relinquishes_null;
63227-atomic_t fscache_n_relinquishes_waitcrt;
63228-atomic_t fscache_n_relinquishes_retire;
63229+atomic_unchecked_t fscache_n_relinquishes;
63230+atomic_unchecked_t fscache_n_relinquishes_null;
63231+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63232+atomic_unchecked_t fscache_n_relinquishes_retire;
63233
63234-atomic_t fscache_n_cookie_index;
63235-atomic_t fscache_n_cookie_data;
63236-atomic_t fscache_n_cookie_special;
63237+atomic_unchecked_t fscache_n_cookie_index;
63238+atomic_unchecked_t fscache_n_cookie_data;
63239+atomic_unchecked_t fscache_n_cookie_special;
63240
63241-atomic_t fscache_n_object_alloc;
63242-atomic_t fscache_n_object_no_alloc;
63243-atomic_t fscache_n_object_lookups;
63244-atomic_t fscache_n_object_lookups_negative;
63245-atomic_t fscache_n_object_lookups_positive;
63246-atomic_t fscache_n_object_lookups_timed_out;
63247-atomic_t fscache_n_object_created;
63248-atomic_t fscache_n_object_avail;
63249-atomic_t fscache_n_object_dead;
63250+atomic_unchecked_t fscache_n_object_alloc;
63251+atomic_unchecked_t fscache_n_object_no_alloc;
63252+atomic_unchecked_t fscache_n_object_lookups;
63253+atomic_unchecked_t fscache_n_object_lookups_negative;
63254+atomic_unchecked_t fscache_n_object_lookups_positive;
63255+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63256+atomic_unchecked_t fscache_n_object_created;
63257+atomic_unchecked_t fscache_n_object_avail;
63258+atomic_unchecked_t fscache_n_object_dead;
63259
63260-atomic_t fscache_n_checkaux_none;
63261-atomic_t fscache_n_checkaux_okay;
63262-atomic_t fscache_n_checkaux_update;
63263-atomic_t fscache_n_checkaux_obsolete;
63264+atomic_unchecked_t fscache_n_checkaux_none;
63265+atomic_unchecked_t fscache_n_checkaux_okay;
63266+atomic_unchecked_t fscache_n_checkaux_update;
63267+atomic_unchecked_t fscache_n_checkaux_obsolete;
63268
63269 atomic_t fscache_n_cop_alloc_object;
63270 atomic_t fscache_n_cop_lookup_object;
63271@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63272 seq_puts(m, "FS-Cache statistics\n");
63273
63274 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63275- atomic_read(&fscache_n_cookie_index),
63276- atomic_read(&fscache_n_cookie_data),
63277- atomic_read(&fscache_n_cookie_special));
63278+ atomic_read_unchecked(&fscache_n_cookie_index),
63279+ atomic_read_unchecked(&fscache_n_cookie_data),
63280+ atomic_read_unchecked(&fscache_n_cookie_special));
63281
63282 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63283- atomic_read(&fscache_n_object_alloc),
63284- atomic_read(&fscache_n_object_no_alloc),
63285- atomic_read(&fscache_n_object_avail),
63286- atomic_read(&fscache_n_object_dead));
63287+ atomic_read_unchecked(&fscache_n_object_alloc),
63288+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63289+ atomic_read_unchecked(&fscache_n_object_avail),
63290+ atomic_read_unchecked(&fscache_n_object_dead));
63291 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63292- atomic_read(&fscache_n_checkaux_none),
63293- atomic_read(&fscache_n_checkaux_okay),
63294- atomic_read(&fscache_n_checkaux_update),
63295- atomic_read(&fscache_n_checkaux_obsolete));
63296+ atomic_read_unchecked(&fscache_n_checkaux_none),
63297+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63298+ atomic_read_unchecked(&fscache_n_checkaux_update),
63299+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63300
63301 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63302- atomic_read(&fscache_n_marks),
63303- atomic_read(&fscache_n_uncaches));
63304+ atomic_read_unchecked(&fscache_n_marks),
63305+ atomic_read_unchecked(&fscache_n_uncaches));
63306
63307 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63308 " oom=%u\n",
63309- atomic_read(&fscache_n_acquires),
63310- atomic_read(&fscache_n_acquires_null),
63311- atomic_read(&fscache_n_acquires_no_cache),
63312- atomic_read(&fscache_n_acquires_ok),
63313- atomic_read(&fscache_n_acquires_nobufs),
63314- atomic_read(&fscache_n_acquires_oom));
63315+ atomic_read_unchecked(&fscache_n_acquires),
63316+ atomic_read_unchecked(&fscache_n_acquires_null),
63317+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63318+ atomic_read_unchecked(&fscache_n_acquires_ok),
63319+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63320+ atomic_read_unchecked(&fscache_n_acquires_oom));
63321
63322 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63323- atomic_read(&fscache_n_object_lookups),
63324- atomic_read(&fscache_n_object_lookups_negative),
63325- atomic_read(&fscache_n_object_lookups_positive),
63326- atomic_read(&fscache_n_object_created),
63327- atomic_read(&fscache_n_object_lookups_timed_out));
63328+ atomic_read_unchecked(&fscache_n_object_lookups),
63329+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63330+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63331+ atomic_read_unchecked(&fscache_n_object_created),
63332+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63333
63334 seq_printf(m, "Invals : n=%u run=%u\n",
63335- atomic_read(&fscache_n_invalidates),
63336- atomic_read(&fscache_n_invalidates_run));
63337+ atomic_read_unchecked(&fscache_n_invalidates),
63338+ atomic_read_unchecked(&fscache_n_invalidates_run));
63339
63340 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63341- atomic_read(&fscache_n_updates),
63342- atomic_read(&fscache_n_updates_null),
63343- atomic_read(&fscache_n_updates_run));
63344+ atomic_read_unchecked(&fscache_n_updates),
63345+ atomic_read_unchecked(&fscache_n_updates_null),
63346+ atomic_read_unchecked(&fscache_n_updates_run));
63347
63348 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63349- atomic_read(&fscache_n_relinquishes),
63350- atomic_read(&fscache_n_relinquishes_null),
63351- atomic_read(&fscache_n_relinquishes_waitcrt),
63352- atomic_read(&fscache_n_relinquishes_retire));
63353+ atomic_read_unchecked(&fscache_n_relinquishes),
63354+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63355+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63356+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63357
63358 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63359- atomic_read(&fscache_n_attr_changed),
63360- atomic_read(&fscache_n_attr_changed_ok),
63361- atomic_read(&fscache_n_attr_changed_nobufs),
63362- atomic_read(&fscache_n_attr_changed_nomem),
63363- atomic_read(&fscache_n_attr_changed_calls));
63364+ atomic_read_unchecked(&fscache_n_attr_changed),
63365+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63366+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63367+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63368+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63369
63370 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63371- atomic_read(&fscache_n_allocs),
63372- atomic_read(&fscache_n_allocs_ok),
63373- atomic_read(&fscache_n_allocs_wait),
63374- atomic_read(&fscache_n_allocs_nobufs),
63375- atomic_read(&fscache_n_allocs_intr));
63376+ atomic_read_unchecked(&fscache_n_allocs),
63377+ atomic_read_unchecked(&fscache_n_allocs_ok),
63378+ atomic_read_unchecked(&fscache_n_allocs_wait),
63379+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63380+ atomic_read_unchecked(&fscache_n_allocs_intr));
63381 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63382- atomic_read(&fscache_n_alloc_ops),
63383- atomic_read(&fscache_n_alloc_op_waits),
63384- atomic_read(&fscache_n_allocs_object_dead));
63385+ atomic_read_unchecked(&fscache_n_alloc_ops),
63386+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63387+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63388
63389 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63390 " int=%u oom=%u\n",
63391- atomic_read(&fscache_n_retrievals),
63392- atomic_read(&fscache_n_retrievals_ok),
63393- atomic_read(&fscache_n_retrievals_wait),
63394- atomic_read(&fscache_n_retrievals_nodata),
63395- atomic_read(&fscache_n_retrievals_nobufs),
63396- atomic_read(&fscache_n_retrievals_intr),
63397- atomic_read(&fscache_n_retrievals_nomem));
63398+ atomic_read_unchecked(&fscache_n_retrievals),
63399+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63400+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63401+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63402+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63403+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63404+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63405 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63406- atomic_read(&fscache_n_retrieval_ops),
63407- atomic_read(&fscache_n_retrieval_op_waits),
63408- atomic_read(&fscache_n_retrievals_object_dead));
63409+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63410+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63411+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63412
63413 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63414- atomic_read(&fscache_n_stores),
63415- atomic_read(&fscache_n_stores_ok),
63416- atomic_read(&fscache_n_stores_again),
63417- atomic_read(&fscache_n_stores_nobufs),
63418- atomic_read(&fscache_n_stores_oom));
63419+ atomic_read_unchecked(&fscache_n_stores),
63420+ atomic_read_unchecked(&fscache_n_stores_ok),
63421+ atomic_read_unchecked(&fscache_n_stores_again),
63422+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63423+ atomic_read_unchecked(&fscache_n_stores_oom));
63424 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63425- atomic_read(&fscache_n_store_ops),
63426- atomic_read(&fscache_n_store_calls),
63427- atomic_read(&fscache_n_store_pages),
63428- atomic_read(&fscache_n_store_radix_deletes),
63429- atomic_read(&fscache_n_store_pages_over_limit));
63430+ atomic_read_unchecked(&fscache_n_store_ops),
63431+ atomic_read_unchecked(&fscache_n_store_calls),
63432+ atomic_read_unchecked(&fscache_n_store_pages),
63433+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63434+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63435
63436 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63437- atomic_read(&fscache_n_store_vmscan_not_storing),
63438- atomic_read(&fscache_n_store_vmscan_gone),
63439- atomic_read(&fscache_n_store_vmscan_busy),
63440- atomic_read(&fscache_n_store_vmscan_cancelled),
63441- atomic_read(&fscache_n_store_vmscan_wait));
63442+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63443+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63444+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63445+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63446+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63447
63448 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63449- atomic_read(&fscache_n_op_pend),
63450- atomic_read(&fscache_n_op_run),
63451- atomic_read(&fscache_n_op_enqueue),
63452- atomic_read(&fscache_n_op_cancelled),
63453- atomic_read(&fscache_n_op_rejected));
63454+ atomic_read_unchecked(&fscache_n_op_pend),
63455+ atomic_read_unchecked(&fscache_n_op_run),
63456+ atomic_read_unchecked(&fscache_n_op_enqueue),
63457+ atomic_read_unchecked(&fscache_n_op_cancelled),
63458+ atomic_read_unchecked(&fscache_n_op_rejected));
63459 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63460- atomic_read(&fscache_n_op_deferred_release),
63461- atomic_read(&fscache_n_op_release),
63462- atomic_read(&fscache_n_op_gc));
63463+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63464+ atomic_read_unchecked(&fscache_n_op_release),
63465+ atomic_read_unchecked(&fscache_n_op_gc));
63466
63467 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63468 atomic_read(&fscache_n_cop_alloc_object),
63469diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63470index 28d0c7a..04816b7 100644
63471--- a/fs/fuse/cuse.c
63472+++ b/fs/fuse/cuse.c
63473@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63474 INIT_LIST_HEAD(&cuse_conntbl[i]);
63475
63476 /* inherit and extend fuse_dev_operations */
63477- cuse_channel_fops = fuse_dev_operations;
63478- cuse_channel_fops.owner = THIS_MODULE;
63479- cuse_channel_fops.open = cuse_channel_open;
63480- cuse_channel_fops.release = cuse_channel_release;
63481+ pax_open_kernel();
63482+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63483+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63484+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63485+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63486+ pax_close_kernel();
63487
63488 cuse_class = class_create(THIS_MODULE, "cuse");
63489 if (IS_ERR(cuse_class))
63490diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63491index ed19a7d..91e9a4c 100644
63492--- a/fs/fuse/dev.c
63493+++ b/fs/fuse/dev.c
63494@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63495 ret = 0;
63496 pipe_lock(pipe);
63497
63498- if (!pipe->readers) {
63499+ if (!atomic_read(&pipe->readers)) {
63500 send_sig(SIGPIPE, current, 0);
63501 if (!ret)
63502 ret = -EPIPE;
63503@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63504 page_nr++;
63505 ret += buf->len;
63506
63507- if (pipe->files)
63508+ if (atomic_read(&pipe->files))
63509 do_wakeup = 1;
63510 }
63511
63512diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63513index 08e7b1a..d91c6ee 100644
63514--- a/fs/fuse/dir.c
63515+++ b/fs/fuse/dir.c
63516@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63517 return link;
63518 }
63519
63520-static void free_link(char *link)
63521+static void free_link(const char *link)
63522 {
63523 if (!IS_ERR(link))
63524 free_page((unsigned long) link);
63525diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63526index fd62cae..3494dfa 100644
63527--- a/fs/hostfs/hostfs_kern.c
63528+++ b/fs/hostfs/hostfs_kern.c
63529@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63530
63531 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63532 {
63533- char *s = nd_get_link(nd);
63534+ const char *s = nd_get_link(nd);
63535 if (!IS_ERR(s))
63536 __putname(s);
63537 }
63538diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63539index 5eba47f..d353c22 100644
63540--- a/fs/hugetlbfs/inode.c
63541+++ b/fs/hugetlbfs/inode.c
63542@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63543 struct mm_struct *mm = current->mm;
63544 struct vm_area_struct *vma;
63545 struct hstate *h = hstate_file(file);
63546+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63547 struct vm_unmapped_area_info info;
63548
63549 if (len & ~huge_page_mask(h))
63550@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63551 return addr;
63552 }
63553
63554+#ifdef CONFIG_PAX_RANDMMAP
63555+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63556+#endif
63557+
63558 if (addr) {
63559 addr = ALIGN(addr, huge_page_size(h));
63560 vma = find_vma(mm, addr);
63561- if (TASK_SIZE - len >= addr &&
63562- (!vma || addr + len <= vma->vm_start))
63563+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63564 return addr;
63565 }
63566
63567 info.flags = 0;
63568 info.length = len;
63569 info.low_limit = TASK_UNMAPPED_BASE;
63570+
63571+#ifdef CONFIG_PAX_RANDMMAP
63572+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63573+ info.low_limit += mm->delta_mmap;
63574+#endif
63575+
63576 info.high_limit = TASK_SIZE;
63577 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63578 info.align_offset = 0;
63579@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63580 };
63581 MODULE_ALIAS_FS("hugetlbfs");
63582
63583-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63584+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63585
63586 static int can_do_hugetlb_shm(void)
63587 {
63588diff --git a/fs/inode.c b/fs/inode.c
63589index aa149e7..46f1f65 100644
63590--- a/fs/inode.c
63591+++ b/fs/inode.c
63592@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
63593 unsigned int *p = &get_cpu_var(last_ino);
63594 unsigned int res = *p;
63595
63596+start:
63597+
63598 #ifdef CONFIG_SMP
63599 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63600- static atomic_t shared_last_ino;
63601- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63602+ static atomic_unchecked_t shared_last_ino;
63603+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63604
63605 res = next - LAST_INO_BATCH;
63606 }
63607 #endif
63608
63609- *p = ++res;
63610+ if (unlikely(!++res))
63611+ goto start; /* never zero */
63612+ *p = res;
63613 put_cpu_var(last_ino);
63614 return res;
63615 }
63616diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63617index 4a6cf28..d3a29d3 100644
63618--- a/fs/jffs2/erase.c
63619+++ b/fs/jffs2/erase.c
63620@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63621 struct jffs2_unknown_node marker = {
63622 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63623 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63624- .totlen = cpu_to_je32(c->cleanmarker_size)
63625+ .totlen = cpu_to_je32(c->cleanmarker_size),
63626+ .hdr_crc = cpu_to_je32(0)
63627 };
63628
63629 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63630diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63631index 09ed551..45684f8 100644
63632--- a/fs/jffs2/wbuf.c
63633+++ b/fs/jffs2/wbuf.c
63634@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63635 {
63636 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63637 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63638- .totlen = constant_cpu_to_je32(8)
63639+ .totlen = constant_cpu_to_je32(8),
63640+ .hdr_crc = constant_cpu_to_je32(0)
63641 };
63642
63643 /*
63644diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63645index 16c3a95..e9cb75d 100644
63646--- a/fs/jfs/super.c
63647+++ b/fs/jfs/super.c
63648@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
63649
63650 jfs_inode_cachep =
63651 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63652- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63653+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63654 init_once);
63655 if (jfs_inode_cachep == NULL)
63656 return -ENOMEM;
63657diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63658index 2d881b3..fe1ac77 100644
63659--- a/fs/kernfs/dir.c
63660+++ b/fs/kernfs/dir.c
63661@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63662 *
63663 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63664 */
63665-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63666+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63667 {
63668 unsigned long hash = init_name_hash();
63669 unsigned int len = strlen(name);
63670@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
63671 ret = scops->mkdir(parent, dentry->d_name.name, mode);
63672
63673 kernfs_put_active(parent);
63674+
63675+ if (!ret) {
63676+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
63677+ ret = PTR_ERR_OR_ZERO(dentry_ret);
63678+ }
63679+
63680 return ret;
63681 }
63682
63683diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63684index ddc9f96..4e450ad 100644
63685--- a/fs/kernfs/file.c
63686+++ b/fs/kernfs/file.c
63687@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63688
63689 struct kernfs_open_node {
63690 atomic_t refcnt;
63691- atomic_t event;
63692+ atomic_unchecked_t event;
63693 wait_queue_head_t poll;
63694 struct list_head files; /* goes through kernfs_open_file.list */
63695 };
63696@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63697 {
63698 struct kernfs_open_file *of = sf->private;
63699
63700- of->event = atomic_read(&of->kn->attr.open->event);
63701+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63702
63703 return of->kn->attr.ops->seq_show(sf, v);
63704 }
63705@@ -271,7 +271,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
63706 {
63707 struct kernfs_open_file *of = kernfs_of(file);
63708 const struct kernfs_ops *ops;
63709- size_t len;
63710+ ssize_t len;
63711 char *buf;
63712
63713 if (of->atomic_write_len) {
63714@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63715 return ret;
63716 }
63717
63718-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63719- void *buf, int len, int write)
63720+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63721+ void *buf, size_t len, int write)
63722 {
63723 struct file *file = vma->vm_file;
63724 struct kernfs_open_file *of = kernfs_of(file);
63725- int ret;
63726+ ssize_t ret;
63727
63728 if (!of->vm_ops)
63729 return -EINVAL;
63730@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63731 return -ENOMEM;
63732
63733 atomic_set(&new_on->refcnt, 0);
63734- atomic_set(&new_on->event, 1);
63735+ atomic_set_unchecked(&new_on->event, 1);
63736 init_waitqueue_head(&new_on->poll);
63737 INIT_LIST_HEAD(&new_on->files);
63738 goto retry;
63739@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
63740
63741 kernfs_put_active(kn);
63742
63743- if (of->event != atomic_read(&on->event))
63744+ if (of->event != atomic_read_unchecked(&on->event))
63745 goto trigger;
63746
63747 return DEFAULT_POLLMASK;
63748@@ -823,7 +823,7 @@ repeat:
63749
63750 on = kn->attr.open;
63751 if (on) {
63752- atomic_inc(&on->event);
63753+ atomic_inc_unchecked(&on->event);
63754 wake_up_interruptible(&on->poll);
63755 }
63756
63757diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
63758index 8a19889..4c3069a 100644
63759--- a/fs/kernfs/symlink.c
63760+++ b/fs/kernfs/symlink.c
63761@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
63762 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
63763 void *cookie)
63764 {
63765- char *page = nd_get_link(nd);
63766+ const char *page = nd_get_link(nd);
63767 if (!IS_ERR(page))
63768 free_page((unsigned long)page);
63769 }
63770diff --git a/fs/libfs.c b/fs/libfs.c
63771index 005843c..06c4191 100644
63772--- a/fs/libfs.c
63773+++ b/fs/libfs.c
63774@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63775
63776 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
63777 struct dentry *next = list_entry(p, struct dentry, d_child);
63778+ char d_name[sizeof(next->d_iname)];
63779+ const unsigned char *name;
63780+
63781 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
63782 if (!simple_positive(next)) {
63783 spin_unlock(&next->d_lock);
63784@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63785
63786 spin_unlock(&next->d_lock);
63787 spin_unlock(&dentry->d_lock);
63788- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
63789+ name = next->d_name.name;
63790+ if (name == next->d_iname) {
63791+ memcpy(d_name, name, next->d_name.len);
63792+ name = d_name;
63793+ }
63794+ if (!dir_emit(ctx, name, next->d_name.len,
63795 next->d_inode->i_ino, dt_type(next->d_inode)))
63796 return 0;
63797 spin_lock(&dentry->d_lock);
63798@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
63799 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
63800 void *cookie)
63801 {
63802- char *s = nd_get_link(nd);
63803+ const char *s = nd_get_link(nd);
63804 if (!IS_ERR(s))
63805 kfree(s);
63806 }
63807diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
63808index acd3947..1f896e2 100644
63809--- a/fs/lockd/clntproc.c
63810+++ b/fs/lockd/clntproc.c
63811@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
63812 /*
63813 * Cookie counter for NLM requests
63814 */
63815-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
63816+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
63817
63818 void nlmclnt_next_cookie(struct nlm_cookie *c)
63819 {
63820- u32 cookie = atomic_inc_return(&nlm_cookie);
63821+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
63822
63823 memcpy(c->data, &cookie, 4);
63824 c->len=4;
63825diff --git a/fs/locks.c b/fs/locks.c
63826index 59e2f90..bd69071 100644
63827--- a/fs/locks.c
63828+++ b/fs/locks.c
63829@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
63830 locks_remove_posix(filp, filp);
63831
63832 if (filp->f_op->flock) {
63833- struct file_lock fl = {
63834+ struct file_lock flock = {
63835 .fl_owner = filp,
63836 .fl_pid = current->tgid,
63837 .fl_file = filp,
63838@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
63839 .fl_type = F_UNLCK,
63840 .fl_end = OFFSET_MAX,
63841 };
63842- filp->f_op->flock(filp, F_SETLKW, &fl);
63843- if (fl.fl_ops && fl.fl_ops->fl_release_private)
63844- fl.fl_ops->fl_release_private(&fl);
63845+ filp->f_op->flock(filp, F_SETLKW, &flock);
63846+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
63847+ flock.fl_ops->fl_release_private(&flock);
63848 }
63849
63850 spin_lock(&inode->i_lock);
63851diff --git a/fs/mount.h b/fs/mount.h
63852index 0ad6f76..a04c146 100644
63853--- a/fs/mount.h
63854+++ b/fs/mount.h
63855@@ -12,7 +12,7 @@ struct mnt_namespace {
63856 u64 seq; /* Sequence number to prevent loops */
63857 wait_queue_head_t poll;
63858 u64 event;
63859-};
63860+} __randomize_layout;
63861
63862 struct mnt_pcp {
63863 int mnt_count;
63864@@ -63,7 +63,7 @@ struct mount {
63865 int mnt_expiry_mark; /* true if marked for expiry */
63866 struct hlist_head mnt_pins;
63867 struct path mnt_ex_mountpoint;
63868-};
63869+} __randomize_layout;
63870
63871 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
63872
63873diff --git a/fs/namei.c b/fs/namei.c
63874index bc35b02..7ed1f1d 100644
63875--- a/fs/namei.c
63876+++ b/fs/namei.c
63877@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
63878 if (ret != -EACCES)
63879 return ret;
63880
63881+#ifdef CONFIG_GRKERNSEC
63882+ /* we'll block if we have to log due to a denied capability use */
63883+ if (mask & MAY_NOT_BLOCK)
63884+ return -ECHILD;
63885+#endif
63886+
63887 if (S_ISDIR(inode->i_mode)) {
63888 /* DACs are overridable for directories */
63889- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63890- return 0;
63891 if (!(mask & MAY_WRITE))
63892- if (capable_wrt_inode_uidgid(inode,
63893- CAP_DAC_READ_SEARCH))
63894+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63895+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63896 return 0;
63897+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63898+ return 0;
63899 return -EACCES;
63900 }
63901 /*
63902+ * Searching includes executable on directories, else just read.
63903+ */
63904+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63905+ if (mask == MAY_READ)
63906+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63907+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63908+ return 0;
63909+
63910+ /*
63911 * Read/write DACs are always overridable.
63912 * Executable DACs are overridable when there is
63913 * at least one exec bit set.
63914@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
63915 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63916 return 0;
63917
63918- /*
63919- * Searching includes executable on directories, else just read.
63920- */
63921- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63922- if (mask == MAY_READ)
63923- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63924- return 0;
63925-
63926 return -EACCES;
63927 }
63928 EXPORT_SYMBOL(generic_permission);
63929@@ -497,7 +504,7 @@ struct nameidata {
63930 int last_type;
63931 unsigned depth;
63932 struct file *base;
63933- char *saved_names[MAX_NESTED_LINKS + 1];
63934+ const char *saved_names[MAX_NESTED_LINKS + 1];
63935 };
63936
63937 /*
63938@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
63939 nd->flags |= LOOKUP_JUMPED;
63940 }
63941
63942-void nd_set_link(struct nameidata *nd, char *path)
63943+void nd_set_link(struct nameidata *nd, const char *path)
63944 {
63945 nd->saved_names[nd->depth] = path;
63946 }
63947 EXPORT_SYMBOL(nd_set_link);
63948
63949-char *nd_get_link(struct nameidata *nd)
63950+const char *nd_get_link(const struct nameidata *nd)
63951 {
63952 return nd->saved_names[nd->depth];
63953 }
63954@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63955 {
63956 struct dentry *dentry = link->dentry;
63957 int error;
63958- char *s;
63959+ const char *s;
63960
63961 BUG_ON(nd->flags & LOOKUP_RCU);
63962
63963@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63964 if (error)
63965 goto out_put_nd_path;
63966
63967+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
63968+ dentry->d_inode, dentry, nd->path.mnt)) {
63969+ error = -EACCES;
63970+ goto out_put_nd_path;
63971+ }
63972+
63973 nd->last_type = LAST_BIND;
63974 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
63975 error = PTR_ERR(*p);
63976@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
63977 if (res)
63978 break;
63979 res = walk_component(nd, path, LOOKUP_FOLLOW);
63980+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
63981+ res = -EACCES;
63982 put_link(nd, &link, cookie);
63983 } while (res > 0);
63984
63985@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
63986 static inline u64 hash_name(const char *name)
63987 {
63988 unsigned long a, b, adata, bdata, mask, hash, len;
63989- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63990+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63991
63992 hash = a = 0;
63993 len = -sizeof(unsigned long);
63994@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
63995 if (err)
63996 break;
63997 err = lookup_last(nd, &path);
63998+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
63999+ err = -EACCES;
64000 put_link(nd, &link, cookie);
64001 }
64002 }
64003@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
64004 if (!err)
64005 err = complete_walk(nd);
64006
64007+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64008+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64009+ path_put(&nd->path);
64010+ err = -ENOENT;
64011+ }
64012+ }
64013+
64014 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64015 if (!d_can_lookup(nd->path.dentry)) {
64016 path_put(&nd->path);
64017@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
64018 retval = path_lookupat(dfd, name->name,
64019 flags | LOOKUP_REVAL, nd);
64020
64021- if (likely(!retval))
64022+ if (likely(!retval)) {
64023 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64024+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64025+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64026+ path_put(&nd->path);
64027+ return -ENOENT;
64028+ }
64029+ }
64030+ }
64031 return retval;
64032 }
64033
64034@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64035 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64036 return -EPERM;
64037
64038+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64039+ return -EPERM;
64040+ if (gr_handle_rawio(inode))
64041+ return -EPERM;
64042+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64043+ return -EACCES;
64044+
64045 return 0;
64046 }
64047
64048@@ -2826,7 +2864,7 @@ looked_up:
64049 * cleared otherwise prior to returning.
64050 */
64051 static int lookup_open(struct nameidata *nd, struct path *path,
64052- struct file *file,
64053+ struct path *link, struct file *file,
64054 const struct open_flags *op,
64055 bool got_write, int *opened)
64056 {
64057@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64058 /* Negative dentry, just create the file */
64059 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64060 umode_t mode = op->mode;
64061+
64062+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64063+ error = -EACCES;
64064+ goto out_dput;
64065+ }
64066+
64067+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64068+ error = -EACCES;
64069+ goto out_dput;
64070+ }
64071+
64072 if (!IS_POSIXACL(dir->d_inode))
64073 mode &= ~current_umask();
64074 /*
64075@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64076 nd->flags & LOOKUP_EXCL);
64077 if (error)
64078 goto out_dput;
64079+ else
64080+ gr_handle_create(dentry, nd->path.mnt);
64081 }
64082 out_no_open:
64083 path->dentry = dentry;
64084@@ -2896,7 +2947,7 @@ out_dput:
64085 /*
64086 * Handle the last step of open()
64087 */
64088-static int do_last(struct nameidata *nd, struct path *path,
64089+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
64090 struct file *file, const struct open_flags *op,
64091 int *opened, struct filename *name)
64092 {
64093@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
64094 if (error)
64095 return error;
64096
64097+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64098+ error = -ENOENT;
64099+ goto out;
64100+ }
64101+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64102+ error = -EACCES;
64103+ goto out;
64104+ }
64105+
64106 audit_inode(name, dir, LOOKUP_PARENT);
64107 error = -EISDIR;
64108 /* trailing slashes? */
64109@@ -2965,7 +3025,7 @@ retry_lookup:
64110 */
64111 }
64112 mutex_lock(&dir->d_inode->i_mutex);
64113- error = lookup_open(nd, path, file, op, got_write, opened);
64114+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64115 mutex_unlock(&dir->d_inode->i_mutex);
64116
64117 if (error <= 0) {
64118@@ -2989,11 +3049,28 @@ retry_lookup:
64119 goto finish_open_created;
64120 }
64121
64122+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64123+ error = -ENOENT;
64124+ goto exit_dput;
64125+ }
64126+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64127+ error = -EACCES;
64128+ goto exit_dput;
64129+ }
64130+
64131 /*
64132 * create/update audit record if it already exists.
64133 */
64134- if (d_is_positive(path->dentry))
64135+ if (d_is_positive(path->dentry)) {
64136+ /* only check if O_CREAT is specified, all other checks need to go
64137+ into may_open */
64138+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64139+ error = -EACCES;
64140+ goto exit_dput;
64141+ }
64142+
64143 audit_inode(name, path->dentry, 0);
64144+ }
64145
64146 /*
64147 * If atomic_open() acquired write access it is dropped now due to
64148@@ -3034,6 +3111,11 @@ finish_lookup:
64149 }
64150 }
64151 BUG_ON(inode != path->dentry->d_inode);
64152+ /* if we're resolving a symlink to another symlink */
64153+ if (link && gr_handle_symlink_owner(link, inode)) {
64154+ error = -EACCES;
64155+ goto out;
64156+ }
64157 return 1;
64158 }
64159
64160@@ -3053,7 +3135,18 @@ finish_open:
64161 path_put(&save_parent);
64162 return error;
64163 }
64164+
64165+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64166+ error = -ENOENT;
64167+ goto out;
64168+ }
64169+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64170+ error = -EACCES;
64171+ goto out;
64172+ }
64173+
64174 audit_inode(name, nd->path.dentry, 0);
64175+
64176 error = -EISDIR;
64177 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64178 goto out;
64179@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64180 if (unlikely(error))
64181 goto out;
64182
64183- error = do_last(nd, &path, file, op, &opened, pathname);
64184+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64185 while (unlikely(error > 0)) { /* trailing symlink */
64186 struct path link = path;
64187 void *cookie;
64188@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64189 error = follow_link(&link, nd, &cookie);
64190 if (unlikely(error))
64191 break;
64192- error = do_last(nd, &path, file, op, &opened, pathname);
64193+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64194 put_link(nd, &link, cookie);
64195 }
64196 out:
64197@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64198 goto unlock;
64199
64200 error = -EEXIST;
64201- if (d_is_positive(dentry))
64202+ if (d_is_positive(dentry)) {
64203+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64204+ error = -ENOENT;
64205 goto fail;
64206-
64207+ }
64208 /*
64209 * Special case - lookup gave negative, but... we had foo/bar/
64210 * From the vfs_mknod() POV we just have a negative dentry -
64211@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64212 }
64213 EXPORT_SYMBOL(user_path_create);
64214
64215+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64216+{
64217+ struct filename *tmp = getname(pathname);
64218+ struct dentry *res;
64219+ if (IS_ERR(tmp))
64220+ return ERR_CAST(tmp);
64221+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64222+ if (IS_ERR(res))
64223+ putname(tmp);
64224+ else
64225+ *to = tmp;
64226+ return res;
64227+}
64228+
64229 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64230 {
64231 int error = may_create(dir, dentry);
64232@@ -3446,6 +3555,17 @@ retry:
64233
64234 if (!IS_POSIXACL(path.dentry->d_inode))
64235 mode &= ~current_umask();
64236+
64237+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64238+ error = -EPERM;
64239+ goto out;
64240+ }
64241+
64242+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64243+ error = -EACCES;
64244+ goto out;
64245+ }
64246+
64247 error = security_path_mknod(&path, dentry, mode, dev);
64248 if (error)
64249 goto out;
64250@@ -3461,6 +3581,8 @@ retry:
64251 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64252 break;
64253 }
64254+ if (!error)
64255+ gr_handle_create(dentry, path.mnt);
64256 out:
64257 done_path_create(&path, dentry);
64258 if (retry_estale(error, lookup_flags)) {
64259@@ -3515,9 +3637,16 @@ retry:
64260
64261 if (!IS_POSIXACL(path.dentry->d_inode))
64262 mode &= ~current_umask();
64263+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64264+ error = -EACCES;
64265+ goto out;
64266+ }
64267 error = security_path_mkdir(&path, dentry, mode);
64268 if (!error)
64269 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64270+ if (!error)
64271+ gr_handle_create(dentry, path.mnt);
64272+out:
64273 done_path_create(&path, dentry);
64274 if (retry_estale(error, lookup_flags)) {
64275 lookup_flags |= LOOKUP_REVAL;
64276@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64277 struct filename *name;
64278 struct dentry *dentry;
64279 struct nameidata nd;
64280+ u64 saved_ino = 0;
64281+ dev_t saved_dev = 0;
64282 unsigned int lookup_flags = 0;
64283 retry:
64284 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64285@@ -3633,10 +3764,21 @@ retry:
64286 error = -ENOENT;
64287 goto exit3;
64288 }
64289+
64290+ saved_ino = gr_get_ino_from_dentry(dentry);
64291+ saved_dev = gr_get_dev_from_dentry(dentry);
64292+
64293+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64294+ error = -EACCES;
64295+ goto exit3;
64296+ }
64297+
64298 error = security_path_rmdir(&nd.path, dentry);
64299 if (error)
64300 goto exit3;
64301 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64302+ if (!error && (saved_dev || saved_ino))
64303+ gr_handle_delete(saved_ino, saved_dev);
64304 exit3:
64305 dput(dentry);
64306 exit2:
64307@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64308 struct nameidata nd;
64309 struct inode *inode = NULL;
64310 struct inode *delegated_inode = NULL;
64311+ u64 saved_ino = 0;
64312+ dev_t saved_dev = 0;
64313 unsigned int lookup_flags = 0;
64314 retry:
64315 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64316@@ -3755,10 +3899,22 @@ retry_deleg:
64317 if (d_is_negative(dentry))
64318 goto slashes;
64319 ihold(inode);
64320+
64321+ if (inode->i_nlink <= 1) {
64322+ saved_ino = gr_get_ino_from_dentry(dentry);
64323+ saved_dev = gr_get_dev_from_dentry(dentry);
64324+ }
64325+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64326+ error = -EACCES;
64327+ goto exit2;
64328+ }
64329+
64330 error = security_path_unlink(&nd.path, dentry);
64331 if (error)
64332 goto exit2;
64333 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64334+ if (!error && (saved_ino || saved_dev))
64335+ gr_handle_delete(saved_ino, saved_dev);
64336 exit2:
64337 dput(dentry);
64338 }
64339@@ -3847,9 +4003,17 @@ retry:
64340 if (IS_ERR(dentry))
64341 goto out_putname;
64342
64343+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64344+ error = -EACCES;
64345+ goto out;
64346+ }
64347+
64348 error = security_path_symlink(&path, dentry, from->name);
64349 if (!error)
64350 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64351+ if (!error)
64352+ gr_handle_create(dentry, path.mnt);
64353+out:
64354 done_path_create(&path, dentry);
64355 if (retry_estale(error, lookup_flags)) {
64356 lookup_flags |= LOOKUP_REVAL;
64357@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64358 struct dentry *new_dentry;
64359 struct path old_path, new_path;
64360 struct inode *delegated_inode = NULL;
64361+ struct filename *to = NULL;
64362 int how = 0;
64363 int error;
64364
64365@@ -3976,7 +4141,7 @@ retry:
64366 if (error)
64367 return error;
64368
64369- new_dentry = user_path_create(newdfd, newname, &new_path,
64370+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64371 (how & LOOKUP_REVAL));
64372 error = PTR_ERR(new_dentry);
64373 if (IS_ERR(new_dentry))
64374@@ -3988,11 +4153,28 @@ retry:
64375 error = may_linkat(&old_path);
64376 if (unlikely(error))
64377 goto out_dput;
64378+
64379+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64380+ old_path.dentry->d_inode,
64381+ old_path.dentry->d_inode->i_mode, to)) {
64382+ error = -EACCES;
64383+ goto out_dput;
64384+ }
64385+
64386+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64387+ old_path.dentry, old_path.mnt, to)) {
64388+ error = -EACCES;
64389+ goto out_dput;
64390+ }
64391+
64392 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64393 if (error)
64394 goto out_dput;
64395 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64396+ if (!error)
64397+ gr_handle_create(new_dentry, new_path.mnt);
64398 out_dput:
64399+ putname(to);
64400 done_path_create(&new_path, new_dentry);
64401 if (delegated_inode) {
64402 error = break_deleg_wait(&delegated_inode);
64403@@ -4308,6 +4490,20 @@ retry_deleg:
64404 if (new_dentry == trap)
64405 goto exit5;
64406
64407+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64408+ /* use EXDEV error to cause 'mv' to switch to an alternative
64409+ * method for usability
64410+ */
64411+ error = -EXDEV;
64412+ goto exit5;
64413+ }
64414+
64415+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64416+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64417+ to, flags);
64418+ if (error)
64419+ goto exit5;
64420+
64421 error = security_path_rename(&oldnd.path, old_dentry,
64422 &newnd.path, new_dentry, flags);
64423 if (error)
64424@@ -4315,6 +4511,9 @@ retry_deleg:
64425 error = vfs_rename(old_dir->d_inode, old_dentry,
64426 new_dir->d_inode, new_dentry,
64427 &delegated_inode, flags);
64428+ if (!error)
64429+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64430+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64431 exit5:
64432 dput(new_dentry);
64433 exit4:
64434@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64435
64436 int readlink_copy(char __user *buffer, int buflen, const char *link)
64437 {
64438+ char tmpbuf[64];
64439+ const char *newlink;
64440 int len = PTR_ERR(link);
64441+
64442 if (IS_ERR(link))
64443 goto out;
64444
64445 len = strlen(link);
64446 if (len > (unsigned) buflen)
64447 len = buflen;
64448- if (copy_to_user(buffer, link, len))
64449+
64450+ if (len < sizeof(tmpbuf)) {
64451+ memcpy(tmpbuf, link, len);
64452+ newlink = tmpbuf;
64453+ } else
64454+ newlink = link;
64455+
64456+ if (copy_to_user(buffer, newlink, len))
64457 len = -EFAULT;
64458 out:
64459 return len;
64460diff --git a/fs/namespace.c b/fs/namespace.c
64461index cd1e968..e64ff16 100644
64462--- a/fs/namespace.c
64463+++ b/fs/namespace.c
64464@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
64465 if (!(sb->s_flags & MS_RDONLY))
64466 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64467 up_write(&sb->s_umount);
64468+
64469+ gr_log_remount(mnt->mnt_devname, retval);
64470+
64471 return retval;
64472 }
64473
64474@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
64475 }
64476 unlock_mount_hash();
64477 namespace_unlock();
64478+
64479+ gr_log_unmount(mnt->mnt_devname, retval);
64480+
64481 return retval;
64482 }
64483
64484@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
64485 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64486 */
64487
64488-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64489+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64490 {
64491 struct path path;
64492 struct mount *mnt;
64493@@ -1565,7 +1571,7 @@ out:
64494 /*
64495 * The 2.0 compatible umount. No flags.
64496 */
64497-SYSCALL_DEFINE1(oldumount, char __user *, name)
64498+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64499 {
64500 return sys_umount(name, 0);
64501 }
64502@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64503 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64504 MS_STRICTATIME);
64505
64506+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64507+ retval = -EPERM;
64508+ goto dput_out;
64509+ }
64510+
64511+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64512+ retval = -EPERM;
64513+ goto dput_out;
64514+ }
64515+
64516 if (flags & MS_REMOUNT)
64517 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64518 data_page);
64519@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64520 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64521 dev_name, data_page);
64522 dput_out:
64523+ gr_log_mount(dev_name, &path, retval);
64524+
64525 path_put(&path);
64526+
64527 return retval;
64528 }
64529
64530@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64531 * number incrementing at 10Ghz will take 12,427 years to wrap which
64532 * is effectively never, so we can ignore the possibility.
64533 */
64534-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64535+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64536
64537 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64538 {
64539@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64540 return ERR_PTR(ret);
64541 }
64542 new_ns->ns.ops = &mntns_operations;
64543- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64544+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
64545 atomic_set(&new_ns->count, 1);
64546 new_ns->root = NULL;
64547 INIT_LIST_HEAD(&new_ns->list);
64548@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64549 return new_ns;
64550 }
64551
64552-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64553+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64554 struct user_namespace *user_ns, struct fs_struct *new_fs)
64555 {
64556 struct mnt_namespace *new_ns;
64557@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64558 }
64559 EXPORT_SYMBOL(mount_subtree);
64560
64561-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64562- char __user *, type, unsigned long, flags, void __user *, data)
64563+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64564+ const char __user *, type, unsigned long, flags, void __user *, data)
64565 {
64566 int ret;
64567 char *kernel_type;
64568@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64569 if (error)
64570 goto out2;
64571
64572+ if (gr_handle_chroot_pivot()) {
64573+ error = -EPERM;
64574+ goto out2;
64575+ }
64576+
64577 get_fs_root(current->fs, &root);
64578 old_mp = lock_mount(&old);
64579 error = PTR_ERR(old_mp);
64580@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
64581 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64582 return -EPERM;
64583
64584- if (fs->users != 1)
64585+ if (atomic_read(&fs->users) != 1)
64586 return -EINVAL;
64587
64588 get_mnt_ns(mnt_ns);
64589diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64590index 02f8d09..a5c25d1 100644
64591--- a/fs/nfs/callback_xdr.c
64592+++ b/fs/nfs/callback_xdr.c
64593@@ -51,7 +51,7 @@ struct callback_op {
64594 callback_decode_arg_t decode_args;
64595 callback_encode_res_t encode_res;
64596 long res_maxsize;
64597-};
64598+} __do_const;
64599
64600 static struct callback_op callback_ops[];
64601
64602diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64603index 2211f6b..30d0950 100644
64604--- a/fs/nfs/inode.c
64605+++ b/fs/nfs/inode.c
64606@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64607 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64608 }
64609
64610-static atomic_long_t nfs_attr_generation_counter;
64611+static atomic_long_unchecked_t nfs_attr_generation_counter;
64612
64613 static unsigned long nfs_read_attr_generation_counter(void)
64614 {
64615- return atomic_long_read(&nfs_attr_generation_counter);
64616+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64617 }
64618
64619 unsigned long nfs_inc_attr_generation_counter(void)
64620 {
64621- return atomic_long_inc_return(&nfs_attr_generation_counter);
64622+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64623 }
64624
64625 void nfs_fattr_init(struct nfs_fattr *fattr)
64626diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64627index ac71d13..a2e590a 100644
64628--- a/fs/nfsd/nfs4proc.c
64629+++ b/fs/nfsd/nfs4proc.c
64630@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
64631 nfsd4op_rsize op_rsize_bop;
64632 stateid_getter op_get_currentstateid;
64633 stateid_setter op_set_currentstateid;
64634-};
64635+} __do_const;
64636
64637 static struct nfsd4_operation nfsd4_ops[];
64638
64639diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64640index 15f7b73..00e230b 100644
64641--- a/fs/nfsd/nfs4xdr.c
64642+++ b/fs/nfsd/nfs4xdr.c
64643@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64644
64645 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64646
64647-static nfsd4_dec nfsd4_dec_ops[] = {
64648+static const nfsd4_dec nfsd4_dec_ops[] = {
64649 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64650 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64651 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64652diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64653index 83a9694..6b7f928 100644
64654--- a/fs/nfsd/nfscache.c
64655+++ b/fs/nfsd/nfscache.c
64656@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64657 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64658 u32 hash;
64659 struct nfsd_drc_bucket *b;
64660- int len;
64661+ long len;
64662 size_t bufsize = 0;
64663
64664 if (!rp)
64665@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64666 hash = nfsd_cache_hash(rp->c_xid);
64667 b = &drc_hashtbl[hash];
64668
64669- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64670- len >>= 2;
64671+ if (statp) {
64672+ len = (char*)statp - (char*)resv->iov_base;
64673+ len = resv->iov_len - len;
64674+ len >>= 2;
64675+ }
64676
64677 /* Don't cache excessive amounts of data and XDR failures */
64678- if (!statp || len > (256 >> 2)) {
64679+ if (!statp || len > (256 >> 2) || len < 0) {
64680 nfsd_reply_cache_free(b, rp);
64681 return;
64682 }
64683@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64684 switch (cachetype) {
64685 case RC_REPLSTAT:
64686 if (len != 1)
64687- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64688+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64689 rp->c_replstat = *statp;
64690 break;
64691 case RC_REPLBUFF:
64692diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64693index 5685c67..73029ef 100644
64694--- a/fs/nfsd/vfs.c
64695+++ b/fs/nfsd/vfs.c
64696@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64697
64698 oldfs = get_fs();
64699 set_fs(KERNEL_DS);
64700- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64701+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64702 set_fs(oldfs);
64703 return nfsd_finish_read(file, count, host_err);
64704 }
64705@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64706
64707 /* Write the data. */
64708 oldfs = get_fs(); set_fs(KERNEL_DS);
64709- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64710+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64711 set_fs(oldfs);
64712 if (host_err < 0)
64713 goto out_nfserr;
64714@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64715 */
64716
64717 oldfs = get_fs(); set_fs(KERNEL_DS);
64718- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64719+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64720 set_fs(oldfs);
64721
64722 if (host_err < 0)
64723diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64724index 52ccd34..7a6b202 100644
64725--- a/fs/nls/nls_base.c
64726+++ b/fs/nls/nls_base.c
64727@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64728
64729 int __register_nls(struct nls_table *nls, struct module *owner)
64730 {
64731- struct nls_table ** tmp = &tables;
64732+ struct nls_table *tmp = tables;
64733
64734 if (nls->next)
64735 return -EBUSY;
64736
64737- nls->owner = owner;
64738+ pax_open_kernel();
64739+ *(void **)&nls->owner = owner;
64740+ pax_close_kernel();
64741 spin_lock(&nls_lock);
64742- while (*tmp) {
64743- if (nls == *tmp) {
64744+ while (tmp) {
64745+ if (nls == tmp) {
64746 spin_unlock(&nls_lock);
64747 return -EBUSY;
64748 }
64749- tmp = &(*tmp)->next;
64750+ tmp = tmp->next;
64751 }
64752- nls->next = tables;
64753+ pax_open_kernel();
64754+ *(struct nls_table **)&nls->next = tables;
64755+ pax_close_kernel();
64756 tables = nls;
64757 spin_unlock(&nls_lock);
64758 return 0;
64759@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64760
64761 int unregister_nls(struct nls_table * nls)
64762 {
64763- struct nls_table ** tmp = &tables;
64764+ struct nls_table * const * tmp = &tables;
64765
64766 spin_lock(&nls_lock);
64767 while (*tmp) {
64768 if (nls == *tmp) {
64769- *tmp = nls->next;
64770+ pax_open_kernel();
64771+ *(struct nls_table **)tmp = nls->next;
64772+ pax_close_kernel();
64773 spin_unlock(&nls_lock);
64774 return 0;
64775 }
64776@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
64777 return -EINVAL;
64778 }
64779
64780-static struct nls_table *find_nls(char *charset)
64781+static struct nls_table *find_nls(const char *charset)
64782 {
64783 struct nls_table *nls;
64784 spin_lock(&nls_lock);
64785@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
64786 return nls;
64787 }
64788
64789-struct nls_table *load_nls(char *charset)
64790+struct nls_table *load_nls(const char *charset)
64791 {
64792 return try_then_request_module(find_nls(charset), "nls_%s", charset);
64793 }
64794diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
64795index 162b3f1..6076a7c 100644
64796--- a/fs/nls/nls_euc-jp.c
64797+++ b/fs/nls/nls_euc-jp.c
64798@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
64799 p_nls = load_nls("cp932");
64800
64801 if (p_nls) {
64802- table.charset2upper = p_nls->charset2upper;
64803- table.charset2lower = p_nls->charset2lower;
64804+ pax_open_kernel();
64805+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64806+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64807+ pax_close_kernel();
64808 return register_nls(&table);
64809 }
64810
64811diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
64812index a80a741..7b96e1b 100644
64813--- a/fs/nls/nls_koi8-ru.c
64814+++ b/fs/nls/nls_koi8-ru.c
64815@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
64816 p_nls = load_nls("koi8-u");
64817
64818 if (p_nls) {
64819- table.charset2upper = p_nls->charset2upper;
64820- table.charset2lower = p_nls->charset2lower;
64821+ pax_open_kernel();
64822+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64823+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64824+ pax_close_kernel();
64825 return register_nls(&table);
64826 }
64827
64828diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
64829index bff8567..83281c6 100644
64830--- a/fs/notify/fanotify/fanotify_user.c
64831+++ b/fs/notify/fanotify/fanotify_user.c
64832@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
64833
64834 fd = fanotify_event_metadata.fd;
64835 ret = -EFAULT;
64836- if (copy_to_user(buf, &fanotify_event_metadata,
64837- fanotify_event_metadata.event_len))
64838+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
64839+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
64840 goto out_close_fd;
64841
64842 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
64843diff --git a/fs/notify/notification.c b/fs/notify/notification.c
64844index a95d8e0..a91a5fd 100644
64845--- a/fs/notify/notification.c
64846+++ b/fs/notify/notification.c
64847@@ -48,7 +48,7 @@
64848 #include <linux/fsnotify_backend.h>
64849 #include "fsnotify.h"
64850
64851-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64852+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64853
64854 /**
64855 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
64856@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64857 */
64858 u32 fsnotify_get_cookie(void)
64859 {
64860- return atomic_inc_return(&fsnotify_sync_cookie);
64861+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
64862 }
64863 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
64864
64865diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
64866index 9e38daf..5727cae 100644
64867--- a/fs/ntfs/dir.c
64868+++ b/fs/ntfs/dir.c
64869@@ -1310,7 +1310,7 @@ find_next_index_buffer:
64870 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
64871 ~(s64)(ndir->itype.index.block_size - 1)));
64872 /* Bounds checks. */
64873- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64874+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64875 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
64876 "inode 0x%lx or driver bug.", vdir->i_ino);
64877 goto err_out;
64878diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
64879index 643faa4..ef9027e 100644
64880--- a/fs/ntfs/file.c
64881+++ b/fs/ntfs/file.c
64882@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
64883 char *addr;
64884 size_t total = 0;
64885 unsigned len;
64886- int left;
64887+ unsigned left;
64888
64889 do {
64890 len = PAGE_CACHE_SIZE - ofs;
64891diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
64892index 9e1e112..241a52a 100644
64893--- a/fs/ntfs/super.c
64894+++ b/fs/ntfs/super.c
64895@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64896 if (!silent)
64897 ntfs_error(sb, "Primary boot sector is invalid.");
64898 } else if (!silent)
64899- ntfs_error(sb, read_err_str, "primary");
64900+ ntfs_error(sb, read_err_str, "%s", "primary");
64901 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
64902 if (bh_primary)
64903 brelse(bh_primary);
64904@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64905 goto hotfix_primary_boot_sector;
64906 brelse(bh_backup);
64907 } else if (!silent)
64908- ntfs_error(sb, read_err_str, "backup");
64909+ ntfs_error(sb, read_err_str, "%s", "backup");
64910 /* Try to read NT3.51- backup boot sector. */
64911 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
64912 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
64913@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64914 "sector.");
64915 brelse(bh_backup);
64916 } else if (!silent)
64917- ntfs_error(sb, read_err_str, "backup");
64918+ ntfs_error(sb, read_err_str, "%s", "backup");
64919 /* We failed. Cleanup and return. */
64920 if (bh_primary)
64921 brelse(bh_primary);
64922diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
64923index 0440134..d52c93a 100644
64924--- a/fs/ocfs2/localalloc.c
64925+++ b/fs/ocfs2/localalloc.c
64926@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
64927 goto bail;
64928 }
64929
64930- atomic_inc(&osb->alloc_stats.moves);
64931+ atomic_inc_unchecked(&osb->alloc_stats.moves);
64932
64933 bail:
64934 if (handle)
64935diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
64936index 7d6b7d0..5fb529a 100644
64937--- a/fs/ocfs2/ocfs2.h
64938+++ b/fs/ocfs2/ocfs2.h
64939@@ -242,11 +242,11 @@ enum ocfs2_vol_state
64940
64941 struct ocfs2_alloc_stats
64942 {
64943- atomic_t moves;
64944- atomic_t local_data;
64945- atomic_t bitmap_data;
64946- atomic_t bg_allocs;
64947- atomic_t bg_extends;
64948+ atomic_unchecked_t moves;
64949+ atomic_unchecked_t local_data;
64950+ atomic_unchecked_t bitmap_data;
64951+ atomic_unchecked_t bg_allocs;
64952+ atomic_unchecked_t bg_extends;
64953 };
64954
64955 enum ocfs2_local_alloc_state
64956diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
64957index 0cb889a..6a26b24 100644
64958--- a/fs/ocfs2/suballoc.c
64959+++ b/fs/ocfs2/suballoc.c
64960@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
64961 mlog_errno(status);
64962 goto bail;
64963 }
64964- atomic_inc(&osb->alloc_stats.bg_extends);
64965+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
64966
64967 /* You should never ask for this much metadata */
64968 BUG_ON(bits_wanted >
64969@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
64970 mlog_errno(status);
64971 goto bail;
64972 }
64973- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64974+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64975
64976 *suballoc_loc = res.sr_bg_blkno;
64977 *suballoc_bit_start = res.sr_bit_offset;
64978@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
64979 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
64980 res->sr_bits);
64981
64982- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64983+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64984
64985 BUG_ON(res->sr_bits != 1);
64986
64987@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
64988 mlog_errno(status);
64989 goto bail;
64990 }
64991- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64992+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64993
64994 BUG_ON(res.sr_bits != 1);
64995
64996@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64997 cluster_start,
64998 num_clusters);
64999 if (!status)
65000- atomic_inc(&osb->alloc_stats.local_data);
65001+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65002 } else {
65003 if (min_clusters > (osb->bitmap_cpg - 1)) {
65004 /* The only paths asking for contiguousness
65005@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65006 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65007 res.sr_bg_blkno,
65008 res.sr_bit_offset);
65009- atomic_inc(&osb->alloc_stats.bitmap_data);
65010+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65011 *num_clusters = res.sr_bits;
65012 }
65013 }
65014diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65015index 8372317..ec86e79 100644
65016--- a/fs/ocfs2/super.c
65017+++ b/fs/ocfs2/super.c
65018@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65019 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65020 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65021 "Stats",
65022- atomic_read(&osb->alloc_stats.bitmap_data),
65023- atomic_read(&osb->alloc_stats.local_data),
65024- atomic_read(&osb->alloc_stats.bg_allocs),
65025- atomic_read(&osb->alloc_stats.moves),
65026- atomic_read(&osb->alloc_stats.bg_extends));
65027+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65028+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65029+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65030+ atomic_read_unchecked(&osb->alloc_stats.moves),
65031+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65032
65033 out += snprintf(buf + out, len - out,
65034 "%10s => State: %u Descriptor: %llu Size: %u bits "
65035@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
65036
65037 mutex_init(&osb->system_file_mutex);
65038
65039- atomic_set(&osb->alloc_stats.moves, 0);
65040- atomic_set(&osb->alloc_stats.local_data, 0);
65041- atomic_set(&osb->alloc_stats.bitmap_data, 0);
65042- atomic_set(&osb->alloc_stats.bg_allocs, 0);
65043- atomic_set(&osb->alloc_stats.bg_extends, 0);
65044+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
65045+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
65046+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
65047+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
65048+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
65049
65050 /* Copy the blockcheck stats from the superblock probe */
65051 osb->osb_ecc_stats = *stats;
65052diff --git a/fs/open.c b/fs/open.c
65053index 813be03..781941d 100644
65054--- a/fs/open.c
65055+++ b/fs/open.c
65056@@ -32,6 +32,8 @@
65057 #include <linux/dnotify.h>
65058 #include <linux/compat.h>
65059
65060+#define CREATE_TRACE_POINTS
65061+#include <trace/events/fs.h>
65062 #include "internal.h"
65063
65064 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
65065@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
65066 error = locks_verify_truncate(inode, NULL, length);
65067 if (!error)
65068 error = security_path_truncate(path);
65069+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
65070+ error = -EACCES;
65071 if (!error)
65072 error = do_truncate(path->dentry, length, 0, NULL);
65073
65074@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
65075 error = locks_verify_truncate(inode, f.file, length);
65076 if (!error)
65077 error = security_path_truncate(&f.file->f_path);
65078+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
65079+ error = -EACCES;
65080 if (!error)
65081 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
65082 sb_end_write(inode->i_sb);
65083@@ -392,6 +398,9 @@ retry:
65084 if (__mnt_is_readonly(path.mnt))
65085 res = -EROFS;
65086
65087+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
65088+ res = -EACCES;
65089+
65090 out_path_release:
65091 path_put(&path);
65092 if (retry_estale(res, lookup_flags)) {
65093@@ -423,6 +432,8 @@ retry:
65094 if (error)
65095 goto dput_and_out;
65096
65097+ gr_log_chdir(path.dentry, path.mnt);
65098+
65099 set_fs_pwd(current->fs, &path);
65100
65101 dput_and_out:
65102@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65103 goto out_putf;
65104
65105 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65106+
65107+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65108+ error = -EPERM;
65109+
65110+ if (!error)
65111+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65112+
65113 if (!error)
65114 set_fs_pwd(current->fs, &f.file->f_path);
65115 out_putf:
65116@@ -481,7 +499,13 @@ retry:
65117 if (error)
65118 goto dput_and_out;
65119
65120+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65121+ goto dput_and_out;
65122+
65123 set_fs_root(current->fs, &path);
65124+
65125+ gr_handle_chroot_chdir(&path);
65126+
65127 error = 0;
65128 dput_and_out:
65129 path_put(&path);
65130@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65131 return error;
65132 retry_deleg:
65133 mutex_lock(&inode->i_mutex);
65134+
65135+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65136+ error = -EACCES;
65137+ goto out_unlock;
65138+ }
65139+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65140+ error = -EACCES;
65141+ goto out_unlock;
65142+ }
65143+
65144 error = security_path_chmod(path, mode);
65145 if (error)
65146 goto out_unlock;
65147@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65148 uid = make_kuid(current_user_ns(), user);
65149 gid = make_kgid(current_user_ns(), group);
65150
65151+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65152+ return -EACCES;
65153+
65154 newattrs.ia_valid = ATTR_CTIME;
65155 if (user != (uid_t) -1) {
65156 if (!uid_valid(uid))
65157@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65158 } else {
65159 fsnotify_open(f);
65160 fd_install(fd, f);
65161+ trace_do_sys_open(tmp->name, flags, mode);
65162 }
65163 }
65164 putname(tmp);
65165diff --git a/fs/pipe.c b/fs/pipe.c
65166index 21981e5..3d5f55c 100644
65167--- a/fs/pipe.c
65168+++ b/fs/pipe.c
65169@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65170
65171 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65172 {
65173- if (pipe->files)
65174+ if (atomic_read(&pipe->files))
65175 mutex_lock_nested(&pipe->mutex, subclass);
65176 }
65177
65178@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65179
65180 void pipe_unlock(struct pipe_inode_info *pipe)
65181 {
65182- if (pipe->files)
65183+ if (atomic_read(&pipe->files))
65184 mutex_unlock(&pipe->mutex);
65185 }
65186 EXPORT_SYMBOL(pipe_unlock);
65187@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65188 }
65189 if (bufs) /* More to do? */
65190 continue;
65191- if (!pipe->writers)
65192+ if (!atomic_read(&pipe->writers))
65193 break;
65194- if (!pipe->waiting_writers) {
65195+ if (!atomic_read(&pipe->waiting_writers)) {
65196 /* syscall merging: Usually we must not sleep
65197 * if O_NONBLOCK is set, or if we got some data.
65198 * But if a writer sleeps in kernel space, then
65199@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65200
65201 __pipe_lock(pipe);
65202
65203- if (!pipe->readers) {
65204+ if (!atomic_read(&pipe->readers)) {
65205 send_sig(SIGPIPE, current, 0);
65206 ret = -EPIPE;
65207 goto out;
65208@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65209 for (;;) {
65210 int bufs;
65211
65212- if (!pipe->readers) {
65213+ if (!atomic_read(&pipe->readers)) {
65214 send_sig(SIGPIPE, current, 0);
65215 if (!ret)
65216 ret = -EPIPE;
65217@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65218 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65219 do_wakeup = 0;
65220 }
65221- pipe->waiting_writers++;
65222+ atomic_inc(&pipe->waiting_writers);
65223 pipe_wait(pipe);
65224- pipe->waiting_writers--;
65225+ atomic_dec(&pipe->waiting_writers);
65226 }
65227 out:
65228 __pipe_unlock(pipe);
65229@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65230 mask = 0;
65231 if (filp->f_mode & FMODE_READ) {
65232 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65233- if (!pipe->writers && filp->f_version != pipe->w_counter)
65234+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65235 mask |= POLLHUP;
65236 }
65237
65238@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65239 * Most Unices do not set POLLERR for FIFOs but on Linux they
65240 * behave exactly like pipes for poll().
65241 */
65242- if (!pipe->readers)
65243+ if (!atomic_read(&pipe->readers))
65244 mask |= POLLERR;
65245 }
65246
65247@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65248 int kill = 0;
65249
65250 spin_lock(&inode->i_lock);
65251- if (!--pipe->files) {
65252+ if (atomic_dec_and_test(&pipe->files)) {
65253 inode->i_pipe = NULL;
65254 kill = 1;
65255 }
65256@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65257
65258 __pipe_lock(pipe);
65259 if (file->f_mode & FMODE_READ)
65260- pipe->readers--;
65261+ atomic_dec(&pipe->readers);
65262 if (file->f_mode & FMODE_WRITE)
65263- pipe->writers--;
65264+ atomic_dec(&pipe->writers);
65265
65266- if (pipe->readers || pipe->writers) {
65267+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65268 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65269 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65270 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65271@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65272 kfree(pipe);
65273 }
65274
65275-static struct vfsmount *pipe_mnt __read_mostly;
65276+struct vfsmount *pipe_mnt __read_mostly;
65277
65278 /*
65279 * pipefs_dname() is called from d_path().
65280@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65281 goto fail_iput;
65282
65283 inode->i_pipe = pipe;
65284- pipe->files = 2;
65285- pipe->readers = pipe->writers = 1;
65286+ atomic_set(&pipe->files, 2);
65287+ atomic_set(&pipe->readers, 1);
65288+ atomic_set(&pipe->writers, 1);
65289 inode->i_fop = &pipefifo_fops;
65290
65291 /*
65292@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65293 spin_lock(&inode->i_lock);
65294 if (inode->i_pipe) {
65295 pipe = inode->i_pipe;
65296- pipe->files++;
65297+ atomic_inc(&pipe->files);
65298 spin_unlock(&inode->i_lock);
65299 } else {
65300 spin_unlock(&inode->i_lock);
65301 pipe = alloc_pipe_info();
65302 if (!pipe)
65303 return -ENOMEM;
65304- pipe->files = 1;
65305+ atomic_set(&pipe->files, 1);
65306 spin_lock(&inode->i_lock);
65307 if (unlikely(inode->i_pipe)) {
65308- inode->i_pipe->files++;
65309+ atomic_inc(&inode->i_pipe->files);
65310 spin_unlock(&inode->i_lock);
65311 free_pipe_info(pipe);
65312 pipe = inode->i_pipe;
65313@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65314 * opened, even when there is no process writing the FIFO.
65315 */
65316 pipe->r_counter++;
65317- if (pipe->readers++ == 0)
65318+ if (atomic_inc_return(&pipe->readers) == 1)
65319 wake_up_partner(pipe);
65320
65321- if (!is_pipe && !pipe->writers) {
65322+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65323 if ((filp->f_flags & O_NONBLOCK)) {
65324 /* suppress POLLHUP until we have
65325 * seen a writer */
65326@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65327 * errno=ENXIO when there is no process reading the FIFO.
65328 */
65329 ret = -ENXIO;
65330- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65331+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65332 goto err;
65333
65334 pipe->w_counter++;
65335- if (!pipe->writers++)
65336+ if (atomic_inc_return(&pipe->writers) == 1)
65337 wake_up_partner(pipe);
65338
65339- if (!is_pipe && !pipe->readers) {
65340+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65341 if (wait_for_partner(pipe, &pipe->r_counter))
65342 goto err_wr;
65343 }
65344@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65345 * the process can at least talk to itself.
65346 */
65347
65348- pipe->readers++;
65349- pipe->writers++;
65350+ atomic_inc(&pipe->readers);
65351+ atomic_inc(&pipe->writers);
65352 pipe->r_counter++;
65353 pipe->w_counter++;
65354- if (pipe->readers == 1 || pipe->writers == 1)
65355+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65356 wake_up_partner(pipe);
65357 break;
65358
65359@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65360 return 0;
65361
65362 err_rd:
65363- if (!--pipe->readers)
65364+ if (atomic_dec_and_test(&pipe->readers))
65365 wake_up_interruptible(&pipe->wait);
65366 ret = -ERESTARTSYS;
65367 goto err;
65368
65369 err_wr:
65370- if (!--pipe->writers)
65371+ if (atomic_dec_and_test(&pipe->writers))
65372 wake_up_interruptible(&pipe->wait);
65373 ret = -ERESTARTSYS;
65374 goto err;
65375diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65376index 0855f77..6787d50 100644
65377--- a/fs/posix_acl.c
65378+++ b/fs/posix_acl.c
65379@@ -20,6 +20,7 @@
65380 #include <linux/xattr.h>
65381 #include <linux/export.h>
65382 #include <linux/user_namespace.h>
65383+#include <linux/grsecurity.h>
65384
65385 struct posix_acl **acl_by_type(struct inode *inode, int type)
65386 {
65387@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65388 }
65389 }
65390 if (mode_p)
65391- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65392+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65393 return not_equiv;
65394 }
65395 EXPORT_SYMBOL(posix_acl_equiv_mode);
65396@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65397 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65398 }
65399
65400- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65401+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65402 return not_equiv;
65403 }
65404
65405@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65406 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65407 int err = -ENOMEM;
65408 if (clone) {
65409+ *mode_p &= ~gr_acl_umask();
65410+
65411 err = posix_acl_create_masq(clone, mode_p);
65412 if (err < 0) {
65413 posix_acl_release(clone);
65414@@ -659,11 +662,12 @@ struct posix_acl *
65415 posix_acl_from_xattr(struct user_namespace *user_ns,
65416 const void *value, size_t size)
65417 {
65418- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65419- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65420+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65421+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65422 int count;
65423 struct posix_acl *acl;
65424 struct posix_acl_entry *acl_e;
65425+ umode_t umask = gr_acl_umask();
65426
65427 if (!value)
65428 return NULL;
65429@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65430
65431 switch(acl_e->e_tag) {
65432 case ACL_USER_OBJ:
65433+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65434+ break;
65435 case ACL_GROUP_OBJ:
65436 case ACL_MASK:
65437+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65438+ break;
65439 case ACL_OTHER:
65440+ acl_e->e_perm &= ~(umask & S_IRWXO);
65441 break;
65442
65443 case ACL_USER:
65444+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65445 acl_e->e_uid =
65446 make_kuid(user_ns,
65447 le32_to_cpu(entry->e_id));
65448@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65449 goto fail;
65450 break;
65451 case ACL_GROUP:
65452+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65453 acl_e->e_gid =
65454 make_kgid(user_ns,
65455 le32_to_cpu(entry->e_id));
65456diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65457index 2183fcf..3c32a98 100644
65458--- a/fs/proc/Kconfig
65459+++ b/fs/proc/Kconfig
65460@@ -30,7 +30,7 @@ config PROC_FS
65461
65462 config PROC_KCORE
65463 bool "/proc/kcore support" if !ARM
65464- depends on PROC_FS && MMU
65465+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65466 help
65467 Provides a virtual ELF core file of the live kernel. This can
65468 be read with gdb and other ELF tools. No modifications can be
65469@@ -38,8 +38,8 @@ config PROC_KCORE
65470
65471 config PROC_VMCORE
65472 bool "/proc/vmcore support"
65473- depends on PROC_FS && CRASH_DUMP
65474- default y
65475+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65476+ default n
65477 help
65478 Exports the dump image of crashed kernel in ELF format.
65479
65480@@ -63,8 +63,8 @@ config PROC_SYSCTL
65481 limited in memory.
65482
65483 config PROC_PAGE_MONITOR
65484- default y
65485- depends on PROC_FS && MMU
65486+ default n
65487+ depends on PROC_FS && MMU && !GRKERNSEC
65488 bool "Enable /proc page monitoring" if EXPERT
65489 help
65490 Various /proc files exist to monitor process memory utilization:
65491diff --git a/fs/proc/array.c b/fs/proc/array.c
65492index bd117d0..e6872d7 100644
65493--- a/fs/proc/array.c
65494+++ b/fs/proc/array.c
65495@@ -60,6 +60,7 @@
65496 #include <linux/tty.h>
65497 #include <linux/string.h>
65498 #include <linux/mman.h>
65499+#include <linux/grsecurity.h>
65500 #include <linux/proc_fs.h>
65501 #include <linux/ioport.h>
65502 #include <linux/uaccess.h>
65503@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65504 seq_putc(m, '\n');
65505 }
65506
65507+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65508+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65509+{
65510+ if (p->mm)
65511+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65512+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65513+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65514+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65515+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65516+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65517+ else
65518+ seq_printf(m, "PaX:\t-----\n");
65519+}
65520+#endif
65521+
65522 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65523 struct pid *pid, struct task_struct *task)
65524 {
65525@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65526 task_cpus_allowed(m, task);
65527 cpuset_task_status_allowed(m, task);
65528 task_context_switch_counts(m, task);
65529+
65530+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65531+ task_pax(m, task);
65532+#endif
65533+
65534+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65535+ task_grsec_rbac(m, task);
65536+#endif
65537+
65538 return 0;
65539 }
65540
65541+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65542+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65543+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65544+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65545+#endif
65546+
65547 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65548 struct pid *pid, struct task_struct *task, int whole)
65549 {
65550@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65551 char tcomm[sizeof(task->comm)];
65552 unsigned long flags;
65553
65554+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65555+ if (current->exec_id != m->exec_id) {
65556+ gr_log_badprocpid("stat");
65557+ return 0;
65558+ }
65559+#endif
65560+
65561 state = *get_task_state(task);
65562 vsize = eip = esp = 0;
65563 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65564@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65565 gtime = task_gtime(task);
65566 }
65567
65568+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65569+ if (PAX_RAND_FLAGS(mm)) {
65570+ eip = 0;
65571+ esp = 0;
65572+ wchan = 0;
65573+ }
65574+#endif
65575+#ifdef CONFIG_GRKERNSEC_HIDESYM
65576+ wchan = 0;
65577+ eip =0;
65578+ esp =0;
65579+#endif
65580+
65581 /* scale priority and nice values from timeslices to -20..20 */
65582 /* to make it look like a "normal" Unix priority/nice value */
65583 priority = task_prio(task);
65584@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65585 seq_put_decimal_ull(m, ' ', vsize);
65586 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65587 seq_put_decimal_ull(m, ' ', rsslim);
65588+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65589+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65590+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65591+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65592+#else
65593 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65594 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65595 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65596+#endif
65597 seq_put_decimal_ull(m, ' ', esp);
65598 seq_put_decimal_ull(m, ' ', eip);
65599 /* The signal information here is obsolete.
65600@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65601 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65602 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65603
65604- if (mm && permitted) {
65605+ if (mm && permitted
65606+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65607+ && !PAX_RAND_FLAGS(mm)
65608+#endif
65609+ ) {
65610 seq_put_decimal_ull(m, ' ', mm->start_data);
65611 seq_put_decimal_ull(m, ' ', mm->end_data);
65612 seq_put_decimal_ull(m, ' ', mm->start_brk);
65613@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65614 struct pid *pid, struct task_struct *task)
65615 {
65616 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65617- struct mm_struct *mm = get_task_mm(task);
65618+ struct mm_struct *mm;
65619
65620+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65621+ if (current->exec_id != m->exec_id) {
65622+ gr_log_badprocpid("statm");
65623+ return 0;
65624+ }
65625+#endif
65626+ mm = get_task_mm(task);
65627 if (mm) {
65628 size = task_statm(mm, &shared, &text, &data, &resident);
65629 mmput(mm);
65630@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65631 return 0;
65632 }
65633
65634+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65635+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65636+{
65637+ unsigned long flags;
65638+ u32 curr_ip = 0;
65639+
65640+ if (lock_task_sighand(task, &flags)) {
65641+ curr_ip = task->signal->curr_ip;
65642+ unlock_task_sighand(task, &flags);
65643+ }
65644+ return seq_printf(m, "%pI4\n", &curr_ip);
65645+}
65646+#endif
65647+
65648 #ifdef CONFIG_CHECKPOINT_RESTORE
65649 static struct pid *
65650 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65651diff --git a/fs/proc/base.c b/fs/proc/base.c
65652index 3f3d7ae..68de109 100644
65653--- a/fs/proc/base.c
65654+++ b/fs/proc/base.c
65655@@ -113,6 +113,14 @@ struct pid_entry {
65656 union proc_op op;
65657 };
65658
65659+struct getdents_callback {
65660+ struct linux_dirent __user * current_dir;
65661+ struct linux_dirent __user * previous;
65662+ struct file * file;
65663+ int count;
65664+ int error;
65665+};
65666+
65667 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65668 .name = (NAME), \
65669 .len = sizeof(NAME) - 1, \
65670@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65671 return 0;
65672 }
65673
65674+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65675+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65676+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65677+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65678+#endif
65679+
65680 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65681 struct pid *pid, struct task_struct *task)
65682 {
65683 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65684 if (mm && !IS_ERR(mm)) {
65685 unsigned int nwords = 0;
65686+
65687+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65688+ /* allow if we're currently ptracing this task */
65689+ if (PAX_RAND_FLAGS(mm) &&
65690+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65691+ mmput(mm);
65692+ return 0;
65693+ }
65694+#endif
65695+
65696 do {
65697 nwords += 2;
65698 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65699@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65700 }
65701
65702
65703-#ifdef CONFIG_KALLSYMS
65704+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65705 /*
65706 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65707 * Returns the resolved symbol. If that fails, simply return the address.
65708@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65709 mutex_unlock(&task->signal->cred_guard_mutex);
65710 }
65711
65712-#ifdef CONFIG_STACKTRACE
65713+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65714
65715 #define MAX_STACK_TRACE_DEPTH 64
65716
65717@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65718 return 0;
65719 }
65720
65721-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65722+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65723 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65724 struct pid *pid, struct task_struct *task)
65725 {
65726@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65727 /************************************************************************/
65728
65729 /* permission checks */
65730-static int proc_fd_access_allowed(struct inode *inode)
65731+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65732 {
65733 struct task_struct *task;
65734 int allowed = 0;
65735@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65736 */
65737 task = get_proc_task(inode);
65738 if (task) {
65739- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65740+ if (log)
65741+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65742+ else
65743+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65744 put_task_struct(task);
65745 }
65746 return allowed;
65747@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
65748 struct task_struct *task,
65749 int hide_pid_min)
65750 {
65751+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65752+ return false;
65753+
65754+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65755+ rcu_read_lock();
65756+ {
65757+ const struct cred *tmpcred = current_cred();
65758+ const struct cred *cred = __task_cred(task);
65759+
65760+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
65761+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65762+ || in_group_p(grsec_proc_gid)
65763+#endif
65764+ ) {
65765+ rcu_read_unlock();
65766+ return true;
65767+ }
65768+ }
65769+ rcu_read_unlock();
65770+
65771+ if (!pid->hide_pid)
65772+ return false;
65773+#endif
65774+
65775 if (pid->hide_pid < hide_pid_min)
65776 return true;
65777 if (in_group_p(pid->pid_gid))
65778 return true;
65779+
65780 return ptrace_may_access(task, PTRACE_MODE_READ);
65781 }
65782
65783@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
65784 put_task_struct(task);
65785
65786 if (!has_perms) {
65787+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65788+ {
65789+#else
65790 if (pid->hide_pid == 2) {
65791+#endif
65792 /*
65793 * Let's make getdents(), stat(), and open()
65794 * consistent with each other. If a process
65795@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
65796
65797 if (task) {
65798 mm = mm_access(task, mode);
65799+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
65800+ mmput(mm);
65801+ mm = ERR_PTR(-EPERM);
65802+ }
65803 put_task_struct(task);
65804
65805 if (!IS_ERR_OR_NULL(mm)) {
65806@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
65807 return PTR_ERR(mm);
65808
65809 file->private_data = mm;
65810+
65811+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65812+ file->f_version = current->exec_id;
65813+#endif
65814+
65815 return 0;
65816 }
65817
65818@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65819 ssize_t copied;
65820 char *page;
65821
65822+#ifdef CONFIG_GRKERNSEC
65823+ if (write)
65824+ return -EPERM;
65825+#endif
65826+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65827+ if (file->f_version != current->exec_id) {
65828+ gr_log_badprocpid("mem");
65829+ return 0;
65830+ }
65831+#endif
65832+
65833 if (!mm)
65834 return 0;
65835
65836@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65837 goto free;
65838
65839 while (count > 0) {
65840- int this_len = min_t(int, count, PAGE_SIZE);
65841+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
65842
65843 if (write && copy_from_user(page, buf, this_len)) {
65844 copied = -EFAULT;
65845@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65846 if (!mm)
65847 return 0;
65848
65849+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65850+ if (file->f_version != current->exec_id) {
65851+ gr_log_badprocpid("environ");
65852+ return 0;
65853+ }
65854+#endif
65855+
65856 page = (char *)__get_free_page(GFP_TEMPORARY);
65857 if (!page)
65858 return -ENOMEM;
65859@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65860 goto free;
65861 while (count > 0) {
65862 size_t this_len, max_len;
65863- int retval;
65864+ ssize_t retval;
65865
65866 if (src >= (mm->env_end - mm->env_start))
65867 break;
65868@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
65869 int error = -EACCES;
65870
65871 /* Are we allowed to snoop on the tasks file descriptors? */
65872- if (!proc_fd_access_allowed(inode))
65873+ if (!proc_fd_access_allowed(inode, 0))
65874 goto out;
65875
65876 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65877@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
65878 struct path path;
65879
65880 /* Are we allowed to snoop on the tasks file descriptors? */
65881- if (!proc_fd_access_allowed(inode))
65882- goto out;
65883+ /* logging this is needed for learning on chromium to work properly,
65884+ but we don't want to flood the logs from 'ps' which does a readlink
65885+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
65886+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
65887+ */
65888+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
65889+ if (!proc_fd_access_allowed(inode,0))
65890+ goto out;
65891+ } else {
65892+ if (!proc_fd_access_allowed(inode,1))
65893+ goto out;
65894+ }
65895
65896 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65897 if (error)
65898@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
65899 rcu_read_lock();
65900 cred = __task_cred(task);
65901 inode->i_uid = cred->euid;
65902+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65903+ inode->i_gid = grsec_proc_gid;
65904+#else
65905 inode->i_gid = cred->egid;
65906+#endif
65907 rcu_read_unlock();
65908 }
65909 security_task_to_inode(task, inode);
65910@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
65911 return -ENOENT;
65912 }
65913 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65914+#ifdef CONFIG_GRKERNSEC_PROC_USER
65915+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65916+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65917+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65918+#endif
65919 task_dumpable(task)) {
65920 cred = __task_cred(task);
65921 stat->uid = cred->euid;
65922+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65923+ stat->gid = grsec_proc_gid;
65924+#else
65925 stat->gid = cred->egid;
65926+#endif
65927 }
65928 }
65929 rcu_read_unlock();
65930@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
65931
65932 if (task) {
65933 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65934+#ifdef CONFIG_GRKERNSEC_PROC_USER
65935+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65936+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65937+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65938+#endif
65939 task_dumpable(task)) {
65940 rcu_read_lock();
65941 cred = __task_cred(task);
65942 inode->i_uid = cred->euid;
65943+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65944+ inode->i_gid = grsec_proc_gid;
65945+#else
65946 inode->i_gid = cred->egid;
65947+#endif
65948 rcu_read_unlock();
65949 } else {
65950 inode->i_uid = GLOBAL_ROOT_UID;
65951@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
65952 if (!task)
65953 goto out_no_task;
65954
65955+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65956+ goto out;
65957+
65958 /*
65959 * Yes, it does not scale. And it should not. Don't add
65960 * new entries into /proc/<tgid>/ without very good reasons.
65961@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
65962 if (!task)
65963 return -ENOENT;
65964
65965+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65966+ goto out;
65967+
65968 if (!dir_emit_dots(file, ctx))
65969 goto out;
65970
65971@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
65972 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
65973 #endif
65974 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65975-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65976+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65977 ONE("syscall", S_IRUSR, proc_pid_syscall),
65978 #endif
65979 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65980@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
65981 #ifdef CONFIG_SECURITY
65982 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65983 #endif
65984-#ifdef CONFIG_KALLSYMS
65985+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65986 ONE("wchan", S_IRUGO, proc_pid_wchan),
65987 #endif
65988-#ifdef CONFIG_STACKTRACE
65989+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65990 ONE("stack", S_IRUSR, proc_pid_stack),
65991 #endif
65992 #ifdef CONFIG_SCHEDSTATS
65993@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
65994 #ifdef CONFIG_HARDWALL
65995 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
65996 #endif
65997+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65998+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
65999+#endif
66000 #ifdef CONFIG_USER_NS
66001 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66002 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66003@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
66004 if (!inode)
66005 goto out;
66006
66007+#ifdef CONFIG_GRKERNSEC_PROC_USER
66008+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66009+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66010+ inode->i_gid = grsec_proc_gid;
66011+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66012+#else
66013 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66014+#endif
66015 inode->i_op = &proc_tgid_base_inode_operations;
66016 inode->i_fop = &proc_tgid_base_operations;
66017 inode->i_flags|=S_IMMUTABLE;
66018@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66019 if (!task)
66020 goto out;
66021
66022+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66023+ goto out_put_task;
66024+
66025 result = proc_pid_instantiate(dir, dentry, task, NULL);
66026+out_put_task:
66027 put_task_struct(task);
66028 out:
66029 return ERR_PTR(result);
66030@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
66031 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66032 #endif
66033 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66034-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66035+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66036 ONE("syscall", S_IRUSR, proc_pid_syscall),
66037 #endif
66038 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66039@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
66040 #ifdef CONFIG_SECURITY
66041 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66042 #endif
66043-#ifdef CONFIG_KALLSYMS
66044+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66045 ONE("wchan", S_IRUGO, proc_pid_wchan),
66046 #endif
66047-#ifdef CONFIG_STACKTRACE
66048+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66049 ONE("stack", S_IRUSR, proc_pid_stack),
66050 #endif
66051 #ifdef CONFIG_SCHEDSTATS
66052diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
66053index cbd82df..c0407d2 100644
66054--- a/fs/proc/cmdline.c
66055+++ b/fs/proc/cmdline.c
66056@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
66057
66058 static int __init proc_cmdline_init(void)
66059 {
66060+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66061+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
66062+#else
66063 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
66064+#endif
66065 return 0;
66066 }
66067 fs_initcall(proc_cmdline_init);
66068diff --git a/fs/proc/devices.c b/fs/proc/devices.c
66069index 50493ed..248166b 100644
66070--- a/fs/proc/devices.c
66071+++ b/fs/proc/devices.c
66072@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
66073
66074 static int __init proc_devices_init(void)
66075 {
66076+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66077+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
66078+#else
66079 proc_create("devices", 0, NULL, &proc_devinfo_operations);
66080+#endif
66081 return 0;
66082 }
66083 fs_initcall(proc_devices_init);
66084diff --git a/fs/proc/fd.c b/fs/proc/fd.c
66085index 8e5ad83..1f07a8c 100644
66086--- a/fs/proc/fd.c
66087+++ b/fs/proc/fd.c
66088@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
66089 if (!task)
66090 return -ENOENT;
66091
66092- files = get_files_struct(task);
66093+ if (!gr_acl_handle_procpidmem(task))
66094+ files = get_files_struct(task);
66095 put_task_struct(task);
66096
66097 if (files) {
66098@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66099 */
66100 int proc_fd_permission(struct inode *inode, int mask)
66101 {
66102+ struct task_struct *task;
66103 int rv = generic_permission(inode, mask);
66104- if (rv == 0)
66105- return 0;
66106+
66107 if (task_tgid(current) == proc_pid(inode))
66108 rv = 0;
66109+
66110+ task = get_proc_task(inode);
66111+ if (task == NULL)
66112+ return rv;
66113+
66114+ if (gr_acl_handle_procpidmem(task))
66115+ rv = -EACCES;
66116+
66117+ put_task_struct(task);
66118+
66119 return rv;
66120 }
66121
66122diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66123index 7fea132..2923577 100644
66124--- a/fs/proc/generic.c
66125+++ b/fs/proc/generic.c
66126@@ -23,6 +23,7 @@
66127 #include <linux/bitops.h>
66128 #include <linux/spinlock.h>
66129 #include <linux/completion.h>
66130+#include <linux/grsecurity.h>
66131 #include <asm/uaccess.h>
66132
66133 #include "internal.h"
66134@@ -265,6 +266,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66135 return proc_lookup_de(PDE(dir), dir, dentry);
66136 }
66137
66138+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66139+ unsigned int flags)
66140+{
66141+ if (gr_proc_is_restricted())
66142+ return ERR_PTR(-EACCES);
66143+
66144+ return proc_lookup_de(PDE(dir), dir, dentry);
66145+}
66146+
66147 /*
66148 * This returns non-zero if at EOF, so that the /proc
66149 * root directory can use this and check if it should
66150@@ -322,6 +332,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66151 return proc_readdir_de(PDE(inode), file, ctx);
66152 }
66153
66154+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66155+{
66156+ struct inode *inode = file_inode(file);
66157+
66158+ if (gr_proc_is_restricted())
66159+ return -EACCES;
66160+
66161+ return proc_readdir_de(PDE(inode), file, ctx);
66162+}
66163+
66164 /*
66165 * These are the generic /proc directory operations. They
66166 * use the in-memory "struct proc_dir_entry" tree to parse
66167@@ -333,6 +353,12 @@ static const struct file_operations proc_dir_operations = {
66168 .iterate = proc_readdir,
66169 };
66170
66171+static const struct file_operations proc_dir_restricted_operations = {
66172+ .llseek = generic_file_llseek,
66173+ .read = generic_read_dir,
66174+ .iterate = proc_readdir_restrict,
66175+};
66176+
66177 /*
66178 * proc directories can do almost nothing..
66179 */
66180@@ -342,6 +368,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66181 .setattr = proc_notify_change,
66182 };
66183
66184+static const struct inode_operations proc_dir_restricted_inode_operations = {
66185+ .lookup = proc_lookup_restrict,
66186+ .getattr = proc_getattr,
66187+ .setattr = proc_notify_change,
66188+};
66189+
66190 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66191 {
66192 int ret;
66193@@ -351,8 +383,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66194 return ret;
66195
66196 if (S_ISDIR(dp->mode)) {
66197- dp->proc_fops = &proc_dir_operations;
66198- dp->proc_iops = &proc_dir_inode_operations;
66199+ if (dp->restricted) {
66200+ dp->proc_fops = &proc_dir_restricted_operations;
66201+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66202+ } else {
66203+ dp->proc_fops = &proc_dir_operations;
66204+ dp->proc_iops = &proc_dir_inode_operations;
66205+ }
66206 dir->nlink++;
66207 } else if (S_ISLNK(dp->mode)) {
66208 dp->proc_iops = &proc_link_inode_operations;
66209@@ -465,6 +502,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66210 }
66211 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66212
66213+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66214+ struct proc_dir_entry *parent, void *data)
66215+{
66216+ struct proc_dir_entry *ent;
66217+
66218+ if (mode == 0)
66219+ mode = S_IRUGO | S_IXUGO;
66220+
66221+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66222+ if (ent) {
66223+ ent->data = data;
66224+ ent->restricted = 1;
66225+ if (proc_register(parent, ent) < 0) {
66226+ kfree(ent);
66227+ ent = NULL;
66228+ }
66229+ }
66230+ return ent;
66231+}
66232+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66233+
66234 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66235 struct proc_dir_entry *parent)
66236 {
66237@@ -479,6 +537,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66238 }
66239 EXPORT_SYMBOL(proc_mkdir);
66240
66241+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66242+ struct proc_dir_entry *parent)
66243+{
66244+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66245+}
66246+EXPORT_SYMBOL(proc_mkdir_restrict);
66247+
66248 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66249 struct proc_dir_entry *parent,
66250 const struct file_operations *proc_fops,
66251diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66252index 8420a2f..7b98f00 100644
66253--- a/fs/proc/inode.c
66254+++ b/fs/proc/inode.c
66255@@ -23,11 +23,17 @@
66256 #include <linux/slab.h>
66257 #include <linux/mount.h>
66258 #include <linux/magic.h>
66259+#include <linux/grsecurity.h>
66260
66261 #include <asm/uaccess.h>
66262
66263 #include "internal.h"
66264
66265+#ifdef CONFIG_PROC_SYSCTL
66266+extern const struct inode_operations proc_sys_inode_operations;
66267+extern const struct inode_operations proc_sys_dir_operations;
66268+#endif
66269+
66270 static void proc_evict_inode(struct inode *inode)
66271 {
66272 struct proc_dir_entry *de;
66273@@ -48,6 +54,13 @@ static void proc_evict_inode(struct inode *inode)
66274 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66275 sysctl_head_put(head);
66276 }
66277+
66278+#ifdef CONFIG_PROC_SYSCTL
66279+ if (inode->i_op == &proc_sys_inode_operations ||
66280+ inode->i_op == &proc_sys_dir_operations)
66281+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66282+#endif
66283+
66284 }
66285
66286 static struct kmem_cache * proc_inode_cachep;
66287@@ -405,7 +418,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66288 if (de->mode) {
66289 inode->i_mode = de->mode;
66290 inode->i_uid = de->uid;
66291+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66292+ inode->i_gid = grsec_proc_gid;
66293+#else
66294 inode->i_gid = de->gid;
66295+#endif
66296 }
66297 if (de->size)
66298 inode->i_size = de->size;
66299diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66300index 6fcdba5..d08b8f1 100644
66301--- a/fs/proc/internal.h
66302+++ b/fs/proc/internal.h
66303@@ -47,9 +47,10 @@ struct proc_dir_entry {
66304 struct completion *pde_unload_completion;
66305 struct list_head pde_openers; /* who did ->open, but not ->release */
66306 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66307+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66308 u8 namelen;
66309 char name[];
66310-};
66311+} __randomize_layout;
66312
66313 union proc_op {
66314 int (*proc_get_link)(struct dentry *, struct path *);
66315@@ -67,7 +68,7 @@ struct proc_inode {
66316 struct ctl_table *sysctl_entry;
66317 const struct proc_ns_operations *ns_ops;
66318 struct inode vfs_inode;
66319-};
66320+} __randomize_layout;
66321
66322 /*
66323 * General functions
66324@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66325 struct pid *, struct task_struct *);
66326 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66327 struct pid *, struct task_struct *);
66328+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66329+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66330+ struct pid *, struct task_struct *);
66331+#endif
66332
66333 /*
66334 * base.c
66335@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66336 * generic.c
66337 */
66338 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66339+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66340 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66341 struct dentry *);
66342 extern int proc_readdir(struct file *, struct dir_context *);
66343+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66344 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66345
66346 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66347diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66348index a352d57..cb94a5c 100644
66349--- a/fs/proc/interrupts.c
66350+++ b/fs/proc/interrupts.c
66351@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66352
66353 static int __init proc_interrupts_init(void)
66354 {
66355+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66356+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66357+#else
66358 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66359+#endif
66360 return 0;
66361 }
66362 fs_initcall(proc_interrupts_init);
66363diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66364index 91a4e64..cb007c0 100644
66365--- a/fs/proc/kcore.c
66366+++ b/fs/proc/kcore.c
66367@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66368 * the addresses in the elf_phdr on our list.
66369 */
66370 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66371- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66372+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66373+ if (tsz > buflen)
66374 tsz = buflen;
66375-
66376+
66377 while (buflen) {
66378 struct kcore_list *m;
66379
66380@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66381 kfree(elf_buf);
66382 } else {
66383 if (kern_addr_valid(start)) {
66384- unsigned long n;
66385+ char *elf_buf;
66386+ mm_segment_t oldfs;
66387
66388- n = copy_to_user(buffer, (char *)start, tsz);
66389- /*
66390- * We cannot distinguish between fault on source
66391- * and fault on destination. When this happens
66392- * we clear too and hope it will trigger the
66393- * EFAULT again.
66394- */
66395- if (n) {
66396- if (clear_user(buffer + tsz - n,
66397- n))
66398+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66399+ if (!elf_buf)
66400+ return -ENOMEM;
66401+ oldfs = get_fs();
66402+ set_fs(KERNEL_DS);
66403+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66404+ set_fs(oldfs);
66405+ if (copy_to_user(buffer, elf_buf, tsz)) {
66406+ kfree(elf_buf);
66407 return -EFAULT;
66408+ }
66409 }
66410+ set_fs(oldfs);
66411+ kfree(elf_buf);
66412 } else {
66413 if (clear_user(buffer, tsz))
66414 return -EFAULT;
66415@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66416
66417 static int open_kcore(struct inode *inode, struct file *filp)
66418 {
66419+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66420+ return -EPERM;
66421+#endif
66422 if (!capable(CAP_SYS_RAWIO))
66423 return -EPERM;
66424 if (kcore_need_update)
66425diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66426index d3ebf2e..6ad42d1 100644
66427--- a/fs/proc/meminfo.c
66428+++ b/fs/proc/meminfo.c
66429@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66430 vmi.used >> 10,
66431 vmi.largest_chunk >> 10
66432 #ifdef CONFIG_MEMORY_FAILURE
66433- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66434+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66435 #endif
66436 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66437 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66438diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66439index d4a3574..b421ce9 100644
66440--- a/fs/proc/nommu.c
66441+++ b/fs/proc/nommu.c
66442@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66443
66444 if (file) {
66445 seq_pad(m, ' ');
66446- seq_path(m, &file->f_path, "");
66447+ seq_path(m, &file->f_path, "\n\\");
66448 }
66449
66450 seq_putc(m, '\n');
66451diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66452index 1bde894..22ac7eb 100644
66453--- a/fs/proc/proc_net.c
66454+++ b/fs/proc/proc_net.c
66455@@ -23,9 +23,27 @@
66456 #include <linux/nsproxy.h>
66457 #include <net/net_namespace.h>
66458 #include <linux/seq_file.h>
66459+#include <linux/grsecurity.h>
66460
66461 #include "internal.h"
66462
66463+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66464+static struct seq_operations *ipv6_seq_ops_addr;
66465+
66466+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66467+{
66468+ ipv6_seq_ops_addr = addr;
66469+}
66470+
66471+void unregister_ipv6_seq_ops_addr(void)
66472+{
66473+ ipv6_seq_ops_addr = NULL;
66474+}
66475+
66476+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66477+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66478+#endif
66479+
66480 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66481 {
66482 return pde->parent->data;
66483@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66484 return maybe_get_net(PDE_NET(PDE(inode)));
66485 }
66486
66487+extern const struct seq_operations dev_seq_ops;
66488+
66489 int seq_open_net(struct inode *ino, struct file *f,
66490 const struct seq_operations *ops, int size)
66491 {
66492@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66493
66494 BUG_ON(size < sizeof(*p));
66495
66496+ /* only permit access to /proc/net/dev */
66497+ if (
66498+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66499+ ops != ipv6_seq_ops_addr &&
66500+#endif
66501+ ops != &dev_seq_ops && gr_proc_is_restricted())
66502+ return -EACCES;
66503+
66504 net = get_proc_net(ino);
66505 if (net == NULL)
66506 return -ENXIO;
66507@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66508 int err;
66509 struct net *net;
66510
66511+ if (gr_proc_is_restricted())
66512+ return -EACCES;
66513+
66514 err = -ENXIO;
66515 net = get_proc_net(inode);
66516 if (net == NULL)
66517diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66518index f92d5dd..26398ac 100644
66519--- a/fs/proc/proc_sysctl.c
66520+++ b/fs/proc/proc_sysctl.c
66521@@ -11,13 +11,21 @@
66522 #include <linux/namei.h>
66523 #include <linux/mm.h>
66524 #include <linux/module.h>
66525+#include <linux/nsproxy.h>
66526+#ifdef CONFIG_GRKERNSEC
66527+#include <net/net_namespace.h>
66528+#endif
66529 #include "internal.h"
66530
66531+extern int gr_handle_chroot_sysctl(const int op);
66532+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66533+ const int op);
66534+
66535 static const struct dentry_operations proc_sys_dentry_operations;
66536 static const struct file_operations proc_sys_file_operations;
66537-static const struct inode_operations proc_sys_inode_operations;
66538+const struct inode_operations proc_sys_inode_operations;
66539 static const struct file_operations proc_sys_dir_file_operations;
66540-static const struct inode_operations proc_sys_dir_operations;
66541+const struct inode_operations proc_sys_dir_operations;
66542
66543 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66544 {
66545@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66546
66547 err = NULL;
66548 d_set_d_op(dentry, &proc_sys_dentry_operations);
66549+
66550+ gr_handle_proc_create(dentry, inode);
66551+
66552 d_add(dentry, inode);
66553
66554 out:
66555@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66556 struct inode *inode = file_inode(filp);
66557 struct ctl_table_header *head = grab_header(inode);
66558 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66559+ int op = write ? MAY_WRITE : MAY_READ;
66560 ssize_t error;
66561 size_t res;
66562
66563@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66564 * and won't be until we finish.
66565 */
66566 error = -EPERM;
66567- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66568+ if (sysctl_perm(head, table, op))
66569 goto out;
66570
66571 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66572@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66573 if (!table->proc_handler)
66574 goto out;
66575
66576+#ifdef CONFIG_GRKERNSEC
66577+ error = -EPERM;
66578+ if (gr_handle_chroot_sysctl(op))
66579+ goto out;
66580+ dget(filp->f_path.dentry);
66581+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66582+ dput(filp->f_path.dentry);
66583+ goto out;
66584+ }
66585+ dput(filp->f_path.dentry);
66586+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66587+ goto out;
66588+ if (write) {
66589+ if (current->nsproxy->net_ns != table->extra2) {
66590+ if (!capable(CAP_SYS_ADMIN))
66591+ goto out;
66592+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66593+ goto out;
66594+ }
66595+#endif
66596+
66597 /* careful: calling conventions are nasty here */
66598 res = count;
66599 error = table->proc_handler(table, write, buf, &res, ppos);
66600@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66601 return false;
66602 } else {
66603 d_set_d_op(child, &proc_sys_dentry_operations);
66604+
66605+ gr_handle_proc_create(child, inode);
66606+
66607 d_add(child, inode);
66608 }
66609 } else {
66610@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66611 if ((*pos)++ < ctx->pos)
66612 return true;
66613
66614+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66615+ return 0;
66616+
66617 if (unlikely(S_ISLNK(table->mode)))
66618 res = proc_sys_link_fill_cache(file, ctx, head, table);
66619 else
66620@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66621 if (IS_ERR(head))
66622 return PTR_ERR(head);
66623
66624+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66625+ return -ENOENT;
66626+
66627 generic_fillattr(inode, stat);
66628 if (table)
66629 stat->mode = (stat->mode & S_IFMT) | table->mode;
66630@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66631 .llseek = generic_file_llseek,
66632 };
66633
66634-static const struct inode_operations proc_sys_inode_operations = {
66635+const struct inode_operations proc_sys_inode_operations = {
66636 .permission = proc_sys_permission,
66637 .setattr = proc_sys_setattr,
66638 .getattr = proc_sys_getattr,
66639 };
66640
66641-static const struct inode_operations proc_sys_dir_operations = {
66642+const struct inode_operations proc_sys_dir_operations = {
66643 .lookup = proc_sys_lookup,
66644 .permission = proc_sys_permission,
66645 .setattr = proc_sys_setattr,
66646@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66647 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66648 const char *name, int namelen)
66649 {
66650- struct ctl_table *table;
66651+ ctl_table_no_const *table;
66652 struct ctl_dir *new;
66653 struct ctl_node *node;
66654 char *new_name;
66655@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66656 return NULL;
66657
66658 node = (struct ctl_node *)(new + 1);
66659- table = (struct ctl_table *)(node + 1);
66660+ table = (ctl_table_no_const *)(node + 1);
66661 new_name = (char *)(table + 2);
66662 memcpy(new_name, name, namelen);
66663 new_name[namelen] = '\0';
66664@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66665 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66666 struct ctl_table_root *link_root)
66667 {
66668- struct ctl_table *link_table, *entry, *link;
66669+ ctl_table_no_const *link_table, *link;
66670+ struct ctl_table *entry;
66671 struct ctl_table_header *links;
66672 struct ctl_node *node;
66673 char *link_name;
66674@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66675 return NULL;
66676
66677 node = (struct ctl_node *)(links + 1);
66678- link_table = (struct ctl_table *)(node + nr_entries);
66679+ link_table = (ctl_table_no_const *)(node + nr_entries);
66680 link_name = (char *)&link_table[nr_entries + 1];
66681
66682 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66683@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66684 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66685 struct ctl_table *table)
66686 {
66687- struct ctl_table *ctl_table_arg = NULL;
66688- struct ctl_table *entry, *files;
66689+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66690+ struct ctl_table *entry;
66691 int nr_files = 0;
66692 int nr_dirs = 0;
66693 int err = -ENOMEM;
66694@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66695 nr_files++;
66696 }
66697
66698- files = table;
66699 /* If there are mixed files and directories we need a new table */
66700 if (nr_dirs && nr_files) {
66701- struct ctl_table *new;
66702+ ctl_table_no_const *new;
66703 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66704 GFP_KERNEL);
66705 if (!files)
66706@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66707 /* Register everything except a directory full of subdirectories */
66708 if (nr_files || !nr_dirs) {
66709 struct ctl_table_header *header;
66710- header = __register_sysctl_table(set, path, files);
66711+ header = __register_sysctl_table(set, path, files ? files : table);
66712 if (!header) {
66713 kfree(ctl_table_arg);
66714 goto out;
66715diff --git a/fs/proc/root.c b/fs/proc/root.c
66716index e74ac9f..35e89f4 100644
66717--- a/fs/proc/root.c
66718+++ b/fs/proc/root.c
66719@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66720 proc_mkdir("openprom", NULL);
66721 #endif
66722 proc_tty_init();
66723+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66724+#ifdef CONFIG_GRKERNSEC_PROC_USER
66725+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66726+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66727+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66728+#endif
66729+#else
66730 proc_mkdir("bus", NULL);
66731+#endif
66732 proc_sys_init();
66733 }
66734
66735diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66736index 510413eb..34d9a8c 100644
66737--- a/fs/proc/stat.c
66738+++ b/fs/proc/stat.c
66739@@ -11,6 +11,7 @@
66740 #include <linux/irqnr.h>
66741 #include <linux/cputime.h>
66742 #include <linux/tick.h>
66743+#include <linux/grsecurity.h>
66744
66745 #ifndef arch_irq_stat_cpu
66746 #define arch_irq_stat_cpu(cpu) 0
66747@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
66748 u64 sum_softirq = 0;
66749 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
66750 struct timespec boottime;
66751+ int unrestricted = 1;
66752+
66753+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66754+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66755+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
66756+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66757+ && !in_group_p(grsec_proc_gid)
66758+#endif
66759+ )
66760+ unrestricted = 0;
66761+#endif
66762+#endif
66763
66764 user = nice = system = idle = iowait =
66765 irq = softirq = steal = 0;
66766@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
66767 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66768 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66769 idle += get_idle_time(i);
66770- iowait += get_iowait_time(i);
66771- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66772- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66773- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66774- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66775- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66776- sum += kstat_cpu_irqs_sum(i);
66777- sum += arch_irq_stat_cpu(i);
66778+ if (unrestricted) {
66779+ iowait += get_iowait_time(i);
66780+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66781+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66782+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66783+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66784+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66785+ sum += kstat_cpu_irqs_sum(i);
66786+ sum += arch_irq_stat_cpu(i);
66787+ for (j = 0; j < NR_SOFTIRQS; j++) {
66788+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66789
66790- for (j = 0; j < NR_SOFTIRQS; j++) {
66791- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66792-
66793- per_softirq_sums[j] += softirq_stat;
66794- sum_softirq += softirq_stat;
66795+ per_softirq_sums[j] += softirq_stat;
66796+ sum_softirq += softirq_stat;
66797+ }
66798 }
66799 }
66800- sum += arch_irq_stat();
66801+ if (unrestricted)
66802+ sum += arch_irq_stat();
66803
66804 seq_puts(p, "cpu ");
66805 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66806@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
66807 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66808 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66809 idle = get_idle_time(i);
66810- iowait = get_iowait_time(i);
66811- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66812- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66813- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66814- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66815- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66816+ if (unrestricted) {
66817+ iowait = get_iowait_time(i);
66818+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66819+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66820+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66821+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66822+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66823+ }
66824 seq_printf(p, "cpu%d", i);
66825 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66826 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
66827@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
66828
66829 /* sum again ? it could be updated? */
66830 for_each_irq_nr(j)
66831- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
66832+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
66833
66834 seq_printf(p,
66835 "\nctxt %llu\n"
66836@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
66837 "processes %lu\n"
66838 "procs_running %lu\n"
66839 "procs_blocked %lu\n",
66840- nr_context_switches(),
66841+ unrestricted ? nr_context_switches() : 0ULL,
66842 (unsigned long)jif,
66843- total_forks,
66844- nr_running(),
66845- nr_iowait());
66846+ unrestricted ? total_forks : 0UL,
66847+ unrestricted ? nr_running() : 0UL,
66848+ unrestricted ? nr_iowait() : 0UL);
66849
66850 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
66851
66852diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
66853index 88f9b83..314064c 100644
66854--- a/fs/proc/task_mmu.c
66855+++ b/fs/proc/task_mmu.c
66856@@ -13,12 +13,19 @@
66857 #include <linux/swap.h>
66858 #include <linux/swapops.h>
66859 #include <linux/mmu_notifier.h>
66860+#include <linux/grsecurity.h>
66861
66862 #include <asm/elf.h>
66863 #include <asm/uaccess.h>
66864 #include <asm/tlbflush.h>
66865 #include "internal.h"
66866
66867+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66868+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66869+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66870+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66871+#endif
66872+
66873 void task_mem(struct seq_file *m, struct mm_struct *mm)
66874 {
66875 unsigned long data, text, lib, swap;
66876@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66877 "VmExe:\t%8lu kB\n"
66878 "VmLib:\t%8lu kB\n"
66879 "VmPTE:\t%8lu kB\n"
66880- "VmSwap:\t%8lu kB\n",
66881- hiwater_vm << (PAGE_SHIFT-10),
66882+ "VmSwap:\t%8lu kB\n"
66883+
66884+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66885+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
66886+#endif
66887+
66888+ ,hiwater_vm << (PAGE_SHIFT-10),
66889 total_vm << (PAGE_SHIFT-10),
66890 mm->locked_vm << (PAGE_SHIFT-10),
66891 mm->pinned_vm << (PAGE_SHIFT-10),
66892@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66893 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66894 (PTRS_PER_PTE * sizeof(pte_t) *
66895 atomic_long_read(&mm->nr_ptes)) >> 10,
66896- swap << (PAGE_SHIFT-10));
66897+ swap << (PAGE_SHIFT-10)
66898+
66899+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66900+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66901+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
66902+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
66903+#else
66904+ , mm->context.user_cs_base
66905+ , mm->context.user_cs_limit
66906+#endif
66907+#endif
66908+
66909+ );
66910 }
66911
66912 unsigned long task_vsize(struct mm_struct *mm)
66913@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66914 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
66915 }
66916
66917- /* We don't show the stack guard page in /proc/maps */
66918+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66919+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
66920+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
66921+#else
66922 start = vma->vm_start;
66923- if (stack_guard_page_start(vma, start))
66924- start += PAGE_SIZE;
66925 end = vma->vm_end;
66926- if (stack_guard_page_end(vma, end))
66927- end -= PAGE_SIZE;
66928+#endif
66929
66930 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
66931 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
66932@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66933 flags & VM_WRITE ? 'w' : '-',
66934 flags & VM_EXEC ? 'x' : '-',
66935 flags & VM_MAYSHARE ? 's' : 'p',
66936+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66937+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
66938+#else
66939 pgoff,
66940+#endif
66941 MAJOR(dev), MINOR(dev), ino);
66942
66943 /*
66944@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66945 */
66946 if (file) {
66947 seq_pad(m, ' ');
66948- seq_path(m, &file->f_path, "\n");
66949+ seq_path(m, &file->f_path, "\n\\");
66950 goto done;
66951 }
66952
66953@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66954 * Thread stack in /proc/PID/task/TID/maps or
66955 * the main process stack.
66956 */
66957- if (!is_pid || (vma->vm_start <= mm->start_stack &&
66958- vma->vm_end >= mm->start_stack)) {
66959+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
66960+ (vma->vm_start <= mm->start_stack &&
66961+ vma->vm_end >= mm->start_stack)) {
66962 name = "[stack]";
66963 } else {
66964 /* Thread stack in /proc/PID/maps */
66965@@ -359,6 +388,12 @@ done:
66966
66967 static int show_map(struct seq_file *m, void *v, int is_pid)
66968 {
66969+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66970+ if (current->exec_id != m->exec_id) {
66971+ gr_log_badprocpid("maps");
66972+ return 0;
66973+ }
66974+#endif
66975 show_map_vma(m, v, is_pid);
66976 m_cache_vma(m, v);
66977 return 0;
66978@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66979 .private = &mss,
66980 };
66981
66982+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66983+ if (current->exec_id != m->exec_id) {
66984+ gr_log_badprocpid("smaps");
66985+ return 0;
66986+ }
66987+#endif
66988 memset(&mss, 0, sizeof mss);
66989- mss.vma = vma;
66990- /* mmap_sem is held in m_start */
66991- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66992- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66993-
66994+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66995+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
66996+#endif
66997+ mss.vma = vma;
66998+ /* mmap_sem is held in m_start */
66999+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67000+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67001+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67002+ }
67003+#endif
67004 show_map_vma(m, vma, is_pid);
67005
67006 seq_printf(m,
67007@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67008 "KernelPageSize: %8lu kB\n"
67009 "MMUPageSize: %8lu kB\n"
67010 "Locked: %8lu kB\n",
67011+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67012+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67013+#else
67014 (vma->vm_end - vma->vm_start) >> 10,
67015+#endif
67016 mss.resident >> 10,
67017 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67018 mss.shared_clean >> 10,
67019@@ -1486,6 +1536,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67020 char buffer[64];
67021 int nid;
67022
67023+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67024+ if (current->exec_id != m->exec_id) {
67025+ gr_log_badprocpid("numa_maps");
67026+ return 0;
67027+ }
67028+#endif
67029+
67030 if (!mm)
67031 return 0;
67032
67033@@ -1507,11 +1564,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67034 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
67035 }
67036
67037+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67038+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
67039+#else
67040 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
67041+#endif
67042
67043 if (file) {
67044 seq_puts(m, " file=");
67045- seq_path(m, &file->f_path, "\n\t= ");
67046+ seq_path(m, &file->f_path, "\n\t\\= ");
67047 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67048 seq_puts(m, " heap");
67049 } else {
67050diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
67051index 599ec2e..f1413ae 100644
67052--- a/fs/proc/task_nommu.c
67053+++ b/fs/proc/task_nommu.c
67054@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67055 else
67056 bytes += kobjsize(mm);
67057
67058- if (current->fs && current->fs->users > 1)
67059+ if (current->fs && atomic_read(&current->fs->users) > 1)
67060 sbytes += kobjsize(current->fs);
67061 else
67062 bytes += kobjsize(current->fs);
67063@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
67064
67065 if (file) {
67066 seq_pad(m, ' ');
67067- seq_path(m, &file->f_path, "");
67068+ seq_path(m, &file->f_path, "\n\\");
67069 } else if (mm) {
67070 pid_t tid = pid_of_stack(priv, vma, is_pid);
67071
67072diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
67073index a90d6d35..d08047c 100644
67074--- a/fs/proc/vmcore.c
67075+++ b/fs/proc/vmcore.c
67076@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
67077 nr_bytes = count;
67078
67079 /* If pfn is not ram, return zeros for sparse dump files */
67080- if (pfn_is_ram(pfn) == 0)
67081- memset(buf, 0, nr_bytes);
67082- else {
67083+ if (pfn_is_ram(pfn) == 0) {
67084+ if (userbuf) {
67085+ if (clear_user((char __force_user *)buf, nr_bytes))
67086+ return -EFAULT;
67087+ } else
67088+ memset(buf, 0, nr_bytes);
67089+ } else {
67090 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
67091 offset, userbuf);
67092 if (tmp < 0)
67093@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
67094 static int copy_to(void *target, void *src, size_t size, int userbuf)
67095 {
67096 if (userbuf) {
67097- if (copy_to_user((char __user *) target, src, size))
67098+ if (copy_to_user((char __force_user *) target, src, size))
67099 return -EFAULT;
67100 } else {
67101 memcpy(target, src, size);
67102@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67103 if (*fpos < m->offset + m->size) {
67104 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67105 start = m->paddr + *fpos - m->offset;
67106- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67107+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67108 if (tmp < 0)
67109 return tmp;
67110 buflen -= tsz;
67111@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67112 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67113 size_t buflen, loff_t *fpos)
67114 {
67115- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67116+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67117 }
67118
67119 /*
67120diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67121index d3fb2b6..43a8140 100644
67122--- a/fs/qnx6/qnx6.h
67123+++ b/fs/qnx6/qnx6.h
67124@@ -74,7 +74,7 @@ enum {
67125 BYTESEX_BE,
67126 };
67127
67128-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67129+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67130 {
67131 if (sbi->s_bytesex == BYTESEX_LE)
67132 return le64_to_cpu((__force __le64)n);
67133@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67134 return (__force __fs64)cpu_to_be64(n);
67135 }
67136
67137-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67138+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67139 {
67140 if (sbi->s_bytesex == BYTESEX_LE)
67141 return le32_to_cpu((__force __le32)n);
67142diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67143index bb2869f..d34ada8 100644
67144--- a/fs/quota/netlink.c
67145+++ b/fs/quota/netlink.c
67146@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67147 void quota_send_warning(struct kqid qid, dev_t dev,
67148 const char warntype)
67149 {
67150- static atomic_t seq;
67151+ static atomic_unchecked_t seq;
67152 struct sk_buff *skb;
67153 void *msg_head;
67154 int ret;
67155@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67156 "VFS: Not enough memory to send quota warning.\n");
67157 return;
67158 }
67159- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67160+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67161 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67162 if (!msg_head) {
67163 printk(KERN_ERR
67164diff --git a/fs/read_write.c b/fs/read_write.c
67165index c0805c93..d39f2eb 100644
67166--- a/fs/read_write.c
67167+++ b/fs/read_write.c
67168@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67169
67170 old_fs = get_fs();
67171 set_fs(get_ds());
67172- p = (__force const char __user *)buf;
67173+ p = (const char __force_user *)buf;
67174 if (count > MAX_RW_COUNT)
67175 count = MAX_RW_COUNT;
67176 if (file->f_op->write)
67177diff --git a/fs/readdir.c b/fs/readdir.c
67178index ced6791..936687b 100644
67179--- a/fs/readdir.c
67180+++ b/fs/readdir.c
67181@@ -18,6 +18,7 @@
67182 #include <linux/security.h>
67183 #include <linux/syscalls.h>
67184 #include <linux/unistd.h>
67185+#include <linux/namei.h>
67186
67187 #include <asm/uaccess.h>
67188
67189@@ -71,6 +72,7 @@ struct old_linux_dirent {
67190 struct readdir_callback {
67191 struct dir_context ctx;
67192 struct old_linux_dirent __user * dirent;
67193+ struct file * file;
67194 int result;
67195 };
67196
67197@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67198 buf->result = -EOVERFLOW;
67199 return -EOVERFLOW;
67200 }
67201+
67202+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67203+ return 0;
67204+
67205 buf->result++;
67206 dirent = buf->dirent;
67207 if (!access_ok(VERIFY_WRITE, dirent,
67208@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67209 if (!f.file)
67210 return -EBADF;
67211
67212+ buf.file = f.file;
67213 error = iterate_dir(f.file, &buf.ctx);
67214 if (buf.result)
67215 error = buf.result;
67216@@ -145,6 +152,7 @@ struct getdents_callback {
67217 struct dir_context ctx;
67218 struct linux_dirent __user * current_dir;
67219 struct linux_dirent __user * previous;
67220+ struct file * file;
67221 int count;
67222 int error;
67223 };
67224@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67225 buf->error = -EOVERFLOW;
67226 return -EOVERFLOW;
67227 }
67228+
67229+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67230+ return 0;
67231+
67232 dirent = buf->previous;
67233 if (dirent) {
67234 if (__put_user(offset, &dirent->d_off))
67235@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67236 if (!f.file)
67237 return -EBADF;
67238
67239+ buf.file = f.file;
67240 error = iterate_dir(f.file, &buf.ctx);
67241 if (error >= 0)
67242 error = buf.error;
67243@@ -230,6 +243,7 @@ struct getdents_callback64 {
67244 struct dir_context ctx;
67245 struct linux_dirent64 __user * current_dir;
67246 struct linux_dirent64 __user * previous;
67247+ struct file *file;
67248 int count;
67249 int error;
67250 };
67251@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67252 buf->error = -EINVAL; /* only used if we fail.. */
67253 if (reclen > buf->count)
67254 return -EINVAL;
67255+
67256+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67257+ return 0;
67258+
67259 dirent = buf->previous;
67260 if (dirent) {
67261 if (__put_user(offset, &dirent->d_off))
67262@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67263 if (!f.file)
67264 return -EBADF;
67265
67266+ buf.file = f.file;
67267 error = iterate_dir(f.file, &buf.ctx);
67268 if (error >= 0)
67269 error = buf.error;
67270diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67271index 9c02d96..6562c10 100644
67272--- a/fs/reiserfs/do_balan.c
67273+++ b/fs/reiserfs/do_balan.c
67274@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67275 return;
67276 }
67277
67278- atomic_inc(&fs_generation(tb->tb_sb));
67279+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67280 do_balance_starts(tb);
67281
67282 /*
67283diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67284index aca73dd..e3c558d 100644
67285--- a/fs/reiserfs/item_ops.c
67286+++ b/fs/reiserfs/item_ops.c
67287@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67288 }
67289
67290 static struct item_operations errcatch_ops = {
67291- errcatch_bytes_number,
67292- errcatch_decrement_key,
67293- errcatch_is_left_mergeable,
67294- errcatch_print_item,
67295- errcatch_check_item,
67296+ .bytes_number = errcatch_bytes_number,
67297+ .decrement_key = errcatch_decrement_key,
67298+ .is_left_mergeable = errcatch_is_left_mergeable,
67299+ .print_item = errcatch_print_item,
67300+ .check_item = errcatch_check_item,
67301
67302- errcatch_create_vi,
67303- errcatch_check_left,
67304- errcatch_check_right,
67305- errcatch_part_size,
67306- errcatch_unit_num,
67307- errcatch_print_vi
67308+ .create_vi = errcatch_create_vi,
67309+ .check_left = errcatch_check_left,
67310+ .check_right = errcatch_check_right,
67311+ .part_size = errcatch_part_size,
67312+ .unit_num = errcatch_unit_num,
67313+ .print_vi = errcatch_print_vi
67314 };
67315
67316 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67317diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67318index 621b9f3..af527fd 100644
67319--- a/fs/reiserfs/procfs.c
67320+++ b/fs/reiserfs/procfs.c
67321@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67322 "SMALL_TAILS " : "NO_TAILS ",
67323 replay_only(sb) ? "REPLAY_ONLY " : "",
67324 convert_reiserfs(sb) ? "CONV " : "",
67325- atomic_read(&r->s_generation_counter),
67326+ atomic_read_unchecked(&r->s_generation_counter),
67327 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67328 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67329 SF(s_good_search_by_key_reada), SF(s_bmaps),
67330diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67331index bb79cdd..fcf49ef 100644
67332--- a/fs/reiserfs/reiserfs.h
67333+++ b/fs/reiserfs/reiserfs.h
67334@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67335 /* Comment? -Hans */
67336 wait_queue_head_t s_wait;
67337 /* increased by one every time the tree gets re-balanced */
67338- atomic_t s_generation_counter;
67339+ atomic_unchecked_t s_generation_counter;
67340
67341 /* File system properties. Currently holds on-disk FS format */
67342 unsigned long s_properties;
67343@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67344 #define REISERFS_USER_MEM 1 /* user memory mode */
67345
67346 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67347-#define get_generation(s) atomic_read (&fs_generation(s))
67348+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67349 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67350 #define __fs_changed(gen,s) (gen != get_generation (s))
67351 #define fs_changed(gen,s) \
67352diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67353index 71fbbe3..eff29ba 100644
67354--- a/fs/reiserfs/super.c
67355+++ b/fs/reiserfs/super.c
67356@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67357 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67358 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67359 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67360+#ifdef CONFIG_REISERFS_FS_XATTR
67361+ /* turn on user xattrs by default */
67362+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67363+#endif
67364 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67365 sbi->s_alloc_options.preallocmin = 0;
67366 /* Preallocate by 16 blocks (17-1) at once */
67367diff --git a/fs/select.c b/fs/select.c
67368index 467bb1c..cf9d65a 100644
67369--- a/fs/select.c
67370+++ b/fs/select.c
67371@@ -20,6 +20,7 @@
67372 #include <linux/export.h>
67373 #include <linux/slab.h>
67374 #include <linux/poll.h>
67375+#include <linux/security.h>
67376 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67377 #include <linux/file.h>
67378 #include <linux/fdtable.h>
67379@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67380 struct poll_list *walk = head;
67381 unsigned long todo = nfds;
67382
67383+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67384 if (nfds > rlimit(RLIMIT_NOFILE))
67385 return -EINVAL;
67386
67387diff --git a/fs/seq_file.c b/fs/seq_file.c
67388index dbf3a59..daf023f 100644
67389--- a/fs/seq_file.c
67390+++ b/fs/seq_file.c
67391@@ -12,6 +12,8 @@
67392 #include <linux/slab.h>
67393 #include <linux/cred.h>
67394 #include <linux/mm.h>
67395+#include <linux/sched.h>
67396+#include <linux/grsecurity.h>
67397
67398 #include <asm/uaccess.h>
67399 #include <asm/page.h>
67400@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67401
67402 static void *seq_buf_alloc(unsigned long size)
67403 {
67404- void *buf;
67405-
67406- /*
67407- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67408- * it's better to fall back to vmalloc() than to kill things.
67409- */
67410- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67411- if (!buf && size > PAGE_SIZE)
67412- buf = vmalloc(size);
67413- return buf;
67414+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67415 }
67416
67417 /**
67418@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67419 #ifdef CONFIG_USER_NS
67420 p->user_ns = file->f_cred->user_ns;
67421 #endif
67422+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67423+ p->exec_id = current->exec_id;
67424+#endif
67425
67426 /*
67427 * Wrappers around seq_open(e.g. swaps_open) need to be
67428@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67429 }
67430 EXPORT_SYMBOL(seq_open);
67431
67432+
67433+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67434+{
67435+ if (gr_proc_is_restricted())
67436+ return -EACCES;
67437+
67438+ return seq_open(file, op);
67439+}
67440+EXPORT_SYMBOL(seq_open_restrict);
67441+
67442 static int traverse(struct seq_file *m, loff_t offset)
67443 {
67444 loff_t pos = 0, index;
67445@@ -158,7 +164,7 @@ Eoverflow:
67446 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67447 {
67448 struct seq_file *m = file->private_data;
67449- size_t copied = 0;
67450+ ssize_t copied = 0;
67451 loff_t pos;
67452 size_t n;
67453 void *p;
67454@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
67455 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67456 void *data)
67457 {
67458- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67459+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67460 int res = -ENOMEM;
67461
67462 if (op) {
67463@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67464 }
67465 EXPORT_SYMBOL(single_open_size);
67466
67467+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67468+ void *data)
67469+{
67470+ if (gr_proc_is_restricted())
67471+ return -EACCES;
67472+
67473+ return single_open(file, show, data);
67474+}
67475+EXPORT_SYMBOL(single_open_restrict);
67476+
67477+
67478 int single_release(struct inode *inode, struct file *file)
67479 {
67480 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67481diff --git a/fs/splice.c b/fs/splice.c
67482index 75c6058..770d40c 100644
67483--- a/fs/splice.c
67484+++ b/fs/splice.c
67485@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67486 pipe_lock(pipe);
67487
67488 for (;;) {
67489- if (!pipe->readers) {
67490+ if (!atomic_read(&pipe->readers)) {
67491 send_sig(SIGPIPE, current, 0);
67492 if (!ret)
67493 ret = -EPIPE;
67494@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67495 page_nr++;
67496 ret += buf->len;
67497
67498- if (pipe->files)
67499+ if (atomic_read(&pipe->files))
67500 do_wakeup = 1;
67501
67502 if (!--spd->nr_pages)
67503@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67504 do_wakeup = 0;
67505 }
67506
67507- pipe->waiting_writers++;
67508+ atomic_inc(&pipe->waiting_writers);
67509 pipe_wait(pipe);
67510- pipe->waiting_writers--;
67511+ atomic_dec(&pipe->waiting_writers);
67512 }
67513
67514 pipe_unlock(pipe);
67515@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67516 old_fs = get_fs();
67517 set_fs(get_ds());
67518 /* The cast to a user pointer is valid due to the set_fs() */
67519- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67520+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67521 set_fs(old_fs);
67522
67523 return res;
67524@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67525 old_fs = get_fs();
67526 set_fs(get_ds());
67527 /* The cast to a user pointer is valid due to the set_fs() */
67528- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67529+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67530 set_fs(old_fs);
67531
67532 return res;
67533@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67534 goto err;
67535
67536 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67537- vec[i].iov_base = (void __user *) page_address(page);
67538+ vec[i].iov_base = (void __force_user *) page_address(page);
67539 vec[i].iov_len = this_len;
67540 spd.pages[i] = page;
67541 spd.nr_pages++;
67542@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67543 ops->release(pipe, buf);
67544 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67545 pipe->nrbufs--;
67546- if (pipe->files)
67547+ if (atomic_read(&pipe->files))
67548 sd->need_wakeup = true;
67549 }
67550
67551@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67552 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67553 {
67554 while (!pipe->nrbufs) {
67555- if (!pipe->writers)
67556+ if (!atomic_read(&pipe->writers))
67557 return 0;
67558
67559- if (!pipe->waiting_writers && sd->num_spliced)
67560+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67561 return 0;
67562
67563 if (sd->flags & SPLICE_F_NONBLOCK)
67564@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67565 ops->release(pipe, buf);
67566 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67567 pipe->nrbufs--;
67568- if (pipe->files)
67569+ if (atomic_read(&pipe->files))
67570 sd.need_wakeup = true;
67571 } else {
67572 buf->offset += ret;
67573@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67574 * out of the pipe right after the splice_to_pipe(). So set
67575 * PIPE_READERS appropriately.
67576 */
67577- pipe->readers = 1;
67578+ atomic_set(&pipe->readers, 1);
67579
67580 current->splice_pipe = pipe;
67581 }
67582@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67583
67584 partial[buffers].offset = off;
67585 partial[buffers].len = plen;
67586+ partial[buffers].private = 0;
67587
67588 off = 0;
67589 len -= plen;
67590@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67591 ret = -ERESTARTSYS;
67592 break;
67593 }
67594- if (!pipe->writers)
67595+ if (!atomic_read(&pipe->writers))
67596 break;
67597- if (!pipe->waiting_writers) {
67598+ if (!atomic_read(&pipe->waiting_writers)) {
67599 if (flags & SPLICE_F_NONBLOCK) {
67600 ret = -EAGAIN;
67601 break;
67602@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67603 pipe_lock(pipe);
67604
67605 while (pipe->nrbufs >= pipe->buffers) {
67606- if (!pipe->readers) {
67607+ if (!atomic_read(&pipe->readers)) {
67608 send_sig(SIGPIPE, current, 0);
67609 ret = -EPIPE;
67610 break;
67611@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67612 ret = -ERESTARTSYS;
67613 break;
67614 }
67615- pipe->waiting_writers++;
67616+ atomic_inc(&pipe->waiting_writers);
67617 pipe_wait(pipe);
67618- pipe->waiting_writers--;
67619+ atomic_dec(&pipe->waiting_writers);
67620 }
67621
67622 pipe_unlock(pipe);
67623@@ -1818,14 +1819,14 @@ retry:
67624 pipe_double_lock(ipipe, opipe);
67625
67626 do {
67627- if (!opipe->readers) {
67628+ if (!atomic_read(&opipe->readers)) {
67629 send_sig(SIGPIPE, current, 0);
67630 if (!ret)
67631 ret = -EPIPE;
67632 break;
67633 }
67634
67635- if (!ipipe->nrbufs && !ipipe->writers)
67636+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67637 break;
67638
67639 /*
67640@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67641 pipe_double_lock(ipipe, opipe);
67642
67643 do {
67644- if (!opipe->readers) {
67645+ if (!atomic_read(&opipe->readers)) {
67646 send_sig(SIGPIPE, current, 0);
67647 if (!ret)
67648 ret = -EPIPE;
67649@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67650 * return EAGAIN if we have the potential of some data in the
67651 * future, otherwise just return 0
67652 */
67653- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67654+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67655 ret = -EAGAIN;
67656
67657 pipe_unlock(ipipe);
67658diff --git a/fs/stat.c b/fs/stat.c
67659index ae0c3ce..9ee641c 100644
67660--- a/fs/stat.c
67661+++ b/fs/stat.c
67662@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67663 stat->gid = inode->i_gid;
67664 stat->rdev = inode->i_rdev;
67665 stat->size = i_size_read(inode);
67666- stat->atime = inode->i_atime;
67667- stat->mtime = inode->i_mtime;
67668+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67669+ stat->atime = inode->i_ctime;
67670+ stat->mtime = inode->i_ctime;
67671+ } else {
67672+ stat->atime = inode->i_atime;
67673+ stat->mtime = inode->i_mtime;
67674+ }
67675 stat->ctime = inode->i_ctime;
67676 stat->blksize = (1 << inode->i_blkbits);
67677 stat->blocks = inode->i_blocks;
67678@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67679 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67680 {
67681 struct inode *inode = path->dentry->d_inode;
67682+ int retval;
67683
67684- if (inode->i_op->getattr)
67685- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67686+ if (inode->i_op->getattr) {
67687+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67688+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67689+ stat->atime = stat->ctime;
67690+ stat->mtime = stat->ctime;
67691+ }
67692+ return retval;
67693+ }
67694
67695 generic_fillattr(inode, stat);
67696 return 0;
67697diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67698index 0b45ff4..847de5b 100644
67699--- a/fs/sysfs/dir.c
67700+++ b/fs/sysfs/dir.c
67701@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67702 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67703 {
67704 struct kernfs_node *parent, *kn;
67705+ const char *name;
67706+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67707+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67708+ const char *parent_name;
67709+#endif
67710
67711 BUG_ON(!kobj);
67712
67713+ name = kobject_name(kobj);
67714+
67715 if (kobj->parent)
67716 parent = kobj->parent->sd;
67717 else
67718@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67719 if (!parent)
67720 return -ENOENT;
67721
67722- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67723- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67724+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67725+ parent_name = parent->name;
67726+ mode = S_IRWXU;
67727+
67728+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67729+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67730+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67731+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67732+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67733+#endif
67734+
67735+ kn = kernfs_create_dir_ns(parent, name,
67736+ mode, kobj, ns);
67737 if (IS_ERR(kn)) {
67738 if (PTR_ERR(kn) == -EEXIST)
67739- sysfs_warn_dup(parent, kobject_name(kobj));
67740+ sysfs_warn_dup(parent, name);
67741 return PTR_ERR(kn);
67742 }
67743
67744diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67745index 69d4889..a810bd4 100644
67746--- a/fs/sysv/sysv.h
67747+++ b/fs/sysv/sysv.h
67748@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67749 #endif
67750 }
67751
67752-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67753+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67754 {
67755 if (sbi->s_bytesex == BYTESEX_PDP)
67756 return PDP_swab((__force __u32)n);
67757diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
67758index fb08b0c..65fcc7e 100644
67759--- a/fs/ubifs/io.c
67760+++ b/fs/ubifs/io.c
67761@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
67762 return err;
67763 }
67764
67765-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67766+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67767 {
67768 int err;
67769
67770diff --git a/fs/udf/misc.c b/fs/udf/misc.c
67771index c175b4d..8f36a16 100644
67772--- a/fs/udf/misc.c
67773+++ b/fs/udf/misc.c
67774@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
67775
67776 u8 udf_tag_checksum(const struct tag *t)
67777 {
67778- u8 *data = (u8 *)t;
67779+ const u8 *data = (const u8 *)t;
67780 u8 checksum = 0;
67781 int i;
67782 for (i = 0; i < sizeof(struct tag); ++i)
67783diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
67784index 8d974c4..b82f6ec 100644
67785--- a/fs/ufs/swab.h
67786+++ b/fs/ufs/swab.h
67787@@ -22,7 +22,7 @@ enum {
67788 BYTESEX_BE
67789 };
67790
67791-static inline u64
67792+static inline u64 __intentional_overflow(-1)
67793 fs64_to_cpu(struct super_block *sbp, __fs64 n)
67794 {
67795 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67796@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
67797 return (__force __fs64)cpu_to_be64(n);
67798 }
67799
67800-static inline u32
67801+static inline u32 __intentional_overflow(-1)
67802 fs32_to_cpu(struct super_block *sbp, __fs32 n)
67803 {
67804 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67805diff --git a/fs/utimes.c b/fs/utimes.c
67806index aa138d6..5f3a811 100644
67807--- a/fs/utimes.c
67808+++ b/fs/utimes.c
67809@@ -1,6 +1,7 @@
67810 #include <linux/compiler.h>
67811 #include <linux/file.h>
67812 #include <linux/fs.h>
67813+#include <linux/security.h>
67814 #include <linux/linkage.h>
67815 #include <linux/mount.h>
67816 #include <linux/namei.h>
67817@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
67818 }
67819 }
67820 retry_deleg:
67821+
67822+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
67823+ error = -EACCES;
67824+ goto mnt_drop_write_and_out;
67825+ }
67826+
67827 mutex_lock(&inode->i_mutex);
67828 error = notify_change(path->dentry, &newattrs, &delegated_inode);
67829 mutex_unlock(&inode->i_mutex);
67830diff --git a/fs/xattr.c b/fs/xattr.c
67831index 4ef6985..a6cd6567 100644
67832--- a/fs/xattr.c
67833+++ b/fs/xattr.c
67834@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
67835 return rc;
67836 }
67837
67838+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
67839+ssize_t
67840+pax_getxattr(struct dentry *dentry, void *value, size_t size)
67841+{
67842+ struct inode *inode = dentry->d_inode;
67843+ ssize_t error;
67844+
67845+ error = inode_permission(inode, MAY_EXEC);
67846+ if (error)
67847+ return error;
67848+
67849+ if (inode->i_op->getxattr)
67850+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
67851+ else
67852+ error = -EOPNOTSUPP;
67853+
67854+ return error;
67855+}
67856+EXPORT_SYMBOL(pax_getxattr);
67857+#endif
67858+
67859 ssize_t
67860 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
67861 {
67862@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
67863 * Extended attribute SET operations
67864 */
67865 static long
67866-setxattr(struct dentry *d, const char __user *name, const void __user *value,
67867+setxattr(struct path *path, const char __user *name, const void __user *value,
67868 size_t size, int flags)
67869 {
67870 int error;
67871@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
67872 posix_acl_fix_xattr_from_user(kvalue, size);
67873 }
67874
67875- error = vfs_setxattr(d, kname, kvalue, size, flags);
67876+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
67877+ error = -EACCES;
67878+ goto out;
67879+ }
67880+
67881+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
67882 out:
67883 if (vvalue)
67884 vfree(vvalue);
67885@@ -376,7 +402,7 @@ retry:
67886 return error;
67887 error = mnt_want_write(path.mnt);
67888 if (!error) {
67889- error = setxattr(path.dentry, name, value, size, flags);
67890+ error = setxattr(&path, name, value, size, flags);
67891 mnt_drop_write(path.mnt);
67892 }
67893 path_put(&path);
67894@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
67895 audit_file(f.file);
67896 error = mnt_want_write_file(f.file);
67897 if (!error) {
67898- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
67899+ error = setxattr(&f.file->f_path, name, value, size, flags);
67900 mnt_drop_write_file(f.file);
67901 }
67902 fdput(f);
67903@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
67904 * Extended attribute REMOVE operations
67905 */
67906 static long
67907-removexattr(struct dentry *d, const char __user *name)
67908+removexattr(struct path *path, const char __user *name)
67909 {
67910 int error;
67911 char kname[XATTR_NAME_MAX + 1];
67912@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
67913 if (error < 0)
67914 return error;
67915
67916- return vfs_removexattr(d, kname);
67917+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
67918+ return -EACCES;
67919+
67920+ return vfs_removexattr(path->dentry, kname);
67921 }
67922
67923 static int path_removexattr(const char __user *pathname,
67924@@ -623,7 +652,7 @@ retry:
67925 return error;
67926 error = mnt_want_write(path.mnt);
67927 if (!error) {
67928- error = removexattr(path.dentry, name);
67929+ error = removexattr(&path, name);
67930 mnt_drop_write(path.mnt);
67931 }
67932 path_put(&path);
67933@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
67934 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
67935 {
67936 struct fd f = fdget(fd);
67937+ struct path *path;
67938 int error = -EBADF;
67939
67940 if (!f.file)
67941 return error;
67942+ path = &f.file->f_path;
67943 audit_file(f.file);
67944 error = mnt_want_write_file(f.file);
67945 if (!error) {
67946- error = removexattr(f.file->f_path.dentry, name);
67947+ error = removexattr(path, name);
67948 mnt_drop_write_file(f.file);
67949 }
67950 fdput(f);
67951diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
67952index 4e20fe7..6d1a55a 100644
67953--- a/fs/xfs/libxfs/xfs_bmap.c
67954+++ b/fs/xfs/libxfs/xfs_bmap.c
67955@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
67956
67957 #else
67958 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
67959-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
67960+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
67961 #endif /* DEBUG */
67962
67963 /*
67964diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
67965index 098cd78..724d3f8 100644
67966--- a/fs/xfs/xfs_dir2_readdir.c
67967+++ b/fs/xfs/xfs_dir2_readdir.c
67968@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
67969 ino = dp->d_ops->sf_get_ino(sfp, sfep);
67970 filetype = dp->d_ops->sf_get_ftype(sfep);
67971 ctx->pos = off & 0x7fffffff;
67972- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67973+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
67974+ char name[sfep->namelen];
67975+ memcpy(name, sfep->name, sfep->namelen);
67976+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
67977+ return 0;
67978+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67979 xfs_dir3_get_dtype(dp->i_mount, filetype)))
67980 return 0;
67981 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
67982diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
67983index a183198..6b52f52 100644
67984--- a/fs/xfs/xfs_ioctl.c
67985+++ b/fs/xfs/xfs_ioctl.c
67986@@ -119,7 +119,7 @@ xfs_find_handle(
67987 }
67988
67989 error = -EFAULT;
67990- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
67991+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
67992 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
67993 goto out_put;
67994
67995diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
67996index c31d2c2..6ec8f62 100644
67997--- a/fs/xfs/xfs_linux.h
67998+++ b/fs/xfs/xfs_linux.h
67999@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
68000 * of the compiler which do not like us using do_div in the middle
68001 * of large functions.
68002 */
68003-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68004+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68005 {
68006 __u32 mod;
68007
68008@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
68009 return 0;
68010 }
68011 #else
68012-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68013+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68014 {
68015 __u32 mod;
68016
68017diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68018new file mode 100644
68019index 0000000..31f8fe4
68020--- /dev/null
68021+++ b/grsecurity/Kconfig
68022@@ -0,0 +1,1182 @@
68023+#
68024+# grecurity configuration
68025+#
68026+menu "Memory Protections"
68027+depends on GRKERNSEC
68028+
68029+config GRKERNSEC_KMEM
68030+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
68031+ default y if GRKERNSEC_CONFIG_AUTO
68032+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
68033+ help
68034+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
68035+ be written to or read from to modify or leak the contents of the running
68036+ kernel. /dev/port will also not be allowed to be opened, writing to
68037+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
68038+ If you have module support disabled, enabling this will close up several
68039+ ways that are currently used to insert malicious code into the running
68040+ kernel.
68041+
68042+ Even with this feature enabled, we still highly recommend that
68043+ you use the RBAC system, as it is still possible for an attacker to
68044+ modify the running kernel through other more obscure methods.
68045+
68046+ It is highly recommended that you say Y here if you meet all the
68047+ conditions above.
68048+
68049+config GRKERNSEC_VM86
68050+ bool "Restrict VM86 mode"
68051+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68052+ depends on X86_32
68053+
68054+ help
68055+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
68056+ make use of a special execution mode on 32bit x86 processors called
68057+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
68058+ video cards and will still work with this option enabled. The purpose
68059+ of the option is to prevent exploitation of emulation errors in
68060+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
68061+ Nearly all users should be able to enable this option.
68062+
68063+config GRKERNSEC_IO
68064+ bool "Disable privileged I/O"
68065+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68066+ depends on X86
68067+ select RTC_CLASS
68068+ select RTC_INTF_DEV
68069+ select RTC_DRV_CMOS
68070+
68071+ help
68072+ If you say Y here, all ioperm and iopl calls will return an error.
68073+ Ioperm and iopl can be used to modify the running kernel.
68074+ Unfortunately, some programs need this access to operate properly,
68075+ the most notable of which are XFree86 and hwclock. hwclock can be
68076+ remedied by having RTC support in the kernel, so real-time
68077+ clock support is enabled if this option is enabled, to ensure
68078+ that hwclock operates correctly. If hwclock still does not work,
68079+ either update udev or symlink /dev/rtc to /dev/rtc0.
68080+
68081+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
68082+ you may not be able to boot into a graphical environment with this
68083+ option enabled. In this case, you should use the RBAC system instead.
68084+
68085+config GRKERNSEC_BPF_HARDEN
68086+ bool "Harden BPF interpreter"
68087+ default y if GRKERNSEC_CONFIG_AUTO
68088+ help
68089+ Unlike previous versions of grsecurity that hardened both the BPF
68090+ interpreted code against corruption at rest as well as the JIT code
68091+ against JIT-spray attacks and attacker-controlled immediate values
68092+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
68093+ and will ensure the interpreted code is read-only at rest. This feature
68094+ may be removed at a later time when eBPF stabilizes to entirely revert
68095+ back to the more secure pre-3.16 BPF interpreter/JIT.
68096+
68097+ If you're using KERNEXEC, it's recommended that you enable this option
68098+ to supplement the hardening of the kernel.
68099+
68100+config GRKERNSEC_PERF_HARDEN
68101+ bool "Disable unprivileged PERF_EVENTS usage by default"
68102+ default y if GRKERNSEC_CONFIG_AUTO
68103+ depends on PERF_EVENTS
68104+ help
68105+ If you say Y here, the range of acceptable values for the
68106+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68107+ default to a new value: 3. When the sysctl is set to this value, no
68108+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68109+
68110+ Though PERF_EVENTS can be used legitimately for performance monitoring
68111+ and low-level application profiling, it is forced on regardless of
68112+ configuration, has been at fault for several vulnerabilities, and
68113+ creates new opportunities for side channels and other information leaks.
68114+
68115+ This feature puts PERF_EVENTS into a secure default state and permits
68116+ the administrator to change out of it temporarily if unprivileged
68117+ application profiling is needed.
68118+
68119+config GRKERNSEC_RAND_THREADSTACK
68120+ bool "Insert random gaps between thread stacks"
68121+ default y if GRKERNSEC_CONFIG_AUTO
68122+ depends on PAX_RANDMMAP && !PPC
68123+ help
68124+ If you say Y here, a random-sized gap will be enforced between allocated
68125+ thread stacks. Glibc's NPTL and other threading libraries that
68126+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68127+ The implementation currently provides 8 bits of entropy for the gap.
68128+
68129+ Many distributions do not compile threaded remote services with the
68130+ -fstack-check argument to GCC, causing the variable-sized stack-based
68131+ allocator, alloca(), to not probe the stack on allocation. This
68132+ permits an unbounded alloca() to skip over any guard page and potentially
68133+ modify another thread's stack reliably. An enforced random gap
68134+ reduces the reliability of such an attack and increases the chance
68135+ that such a read/write to another thread's stack instead lands in
68136+ an unmapped area, causing a crash and triggering grsecurity's
68137+ anti-bruteforcing logic.
68138+
68139+config GRKERNSEC_PROC_MEMMAP
68140+ bool "Harden ASLR against information leaks and entropy reduction"
68141+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68142+ depends on PAX_NOEXEC || PAX_ASLR
68143+ help
68144+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68145+ give no information about the addresses of its mappings if
68146+ PaX features that rely on random addresses are enabled on the task.
68147+ In addition to sanitizing this information and disabling other
68148+ dangerous sources of information, this option causes reads of sensitive
68149+ /proc/<pid> entries where the file descriptor was opened in a different
68150+ task than the one performing the read. Such attempts are logged.
68151+ This option also limits argv/env strings for suid/sgid binaries
68152+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68153+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68154+ binaries to prevent alternative mmap layouts from being abused.
68155+
68156+ If you use PaX it is essential that you say Y here as it closes up
68157+ several holes that make full ASLR useless locally.
68158+
68159+
68160+config GRKERNSEC_KSTACKOVERFLOW
68161+ bool "Prevent kernel stack overflows"
68162+ default y if GRKERNSEC_CONFIG_AUTO
68163+ depends on !IA64 && 64BIT
68164+ help
68165+ If you say Y here, the kernel's process stacks will be allocated
68166+ with vmalloc instead of the kernel's default allocator. This
68167+ introduces guard pages that in combination with the alloca checking
68168+ of the STACKLEAK feature prevents all forms of kernel process stack
68169+ overflow abuse. Note that this is different from kernel stack
68170+ buffer overflows.
68171+
68172+config GRKERNSEC_BRUTE
68173+ bool "Deter exploit bruteforcing"
68174+ default y if GRKERNSEC_CONFIG_AUTO
68175+ help
68176+ If you say Y here, attempts to bruteforce exploits against forking
68177+ daemons such as apache or sshd, as well as against suid/sgid binaries
68178+ will be deterred. When a child of a forking daemon is killed by PaX
68179+ or crashes due to an illegal instruction or other suspicious signal,
68180+ the parent process will be delayed 30 seconds upon every subsequent
68181+ fork until the administrator is able to assess the situation and
68182+ restart the daemon.
68183+ In the suid/sgid case, the attempt is logged, the user has all their
68184+ existing instances of the suid/sgid binary terminated and will
68185+ be unable to execute any suid/sgid binaries for 15 minutes.
68186+
68187+ It is recommended that you also enable signal logging in the auditing
68188+ section so that logs are generated when a process triggers a suspicious
68189+ signal.
68190+ If the sysctl option is enabled, a sysctl option with name
68191+ "deter_bruteforce" is created.
68192+
68193+config GRKERNSEC_MODHARDEN
68194+ bool "Harden module auto-loading"
68195+ default y if GRKERNSEC_CONFIG_AUTO
68196+ depends on MODULES
68197+ help
68198+ If you say Y here, module auto-loading in response to use of some
68199+ feature implemented by an unloaded module will be restricted to
68200+ root users. Enabling this option helps defend against attacks
68201+ by unprivileged users who abuse the auto-loading behavior to
68202+ cause a vulnerable module to load that is then exploited.
68203+
68204+ If this option prevents a legitimate use of auto-loading for a
68205+ non-root user, the administrator can execute modprobe manually
68206+ with the exact name of the module mentioned in the alert log.
68207+ Alternatively, the administrator can add the module to the list
68208+ of modules loaded at boot by modifying init scripts.
68209+
68210+ Modification of init scripts will most likely be needed on
68211+ Ubuntu servers with encrypted home directory support enabled,
68212+ as the first non-root user logging in will cause the ecb(aes),
68213+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68214+
68215+config GRKERNSEC_HIDESYM
68216+ bool "Hide kernel symbols"
68217+ default y if GRKERNSEC_CONFIG_AUTO
68218+ select PAX_USERCOPY_SLABS
68219+ help
68220+ If you say Y here, getting information on loaded modules, and
68221+ displaying all kernel symbols through a syscall will be restricted
68222+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68223+ /proc/kallsyms will be restricted to the root user. The RBAC
68224+ system can hide that entry even from root.
68225+
68226+ This option also prevents leaking of kernel addresses through
68227+ several /proc entries.
68228+
68229+ Note that this option is only effective provided the following
68230+ conditions are met:
68231+ 1) The kernel using grsecurity is not precompiled by some distribution
68232+ 2) You have also enabled GRKERNSEC_DMESG
68233+ 3) You are using the RBAC system and hiding other files such as your
68234+ kernel image and System.map. Alternatively, enabling this option
68235+ causes the permissions on /boot, /lib/modules, and the kernel
68236+ source directory to change at compile time to prevent
68237+ reading by non-root users.
68238+ If the above conditions are met, this option will aid in providing a
68239+ useful protection against local kernel exploitation of overflows
68240+ and arbitrary read/write vulnerabilities.
68241+
68242+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68243+ in addition to this feature.
68244+
68245+config GRKERNSEC_RANDSTRUCT
68246+ bool "Randomize layout of sensitive kernel structures"
68247+ default y if GRKERNSEC_CONFIG_AUTO
68248+ select GRKERNSEC_HIDESYM
68249+ select MODVERSIONS if MODULES
68250+ help
68251+ If you say Y here, the layouts of a number of sensitive kernel
68252+ structures (task, fs, cred, etc) and all structures composed entirely
68253+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68254+ This can introduce the requirement of an additional infoleak
68255+ vulnerability for exploits targeting these structure types.
68256+
68257+ Enabling this feature will introduce some performance impact, slightly
68258+ increase memory usage, and prevent the use of forensic tools like
68259+ Volatility against the system (unless the kernel source tree isn't
68260+ cleaned after kernel installation).
68261+
68262+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68263+ It remains after a make clean to allow for external modules to be compiled
68264+ with the existing seed and will be removed by a make mrproper or
68265+ make distclean.
68266+
68267+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68268+ to install the supporting headers explicitly in addition to the normal
68269+ gcc package.
68270+
68271+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68272+ bool "Use cacheline-aware structure randomization"
68273+ depends on GRKERNSEC_RANDSTRUCT
68274+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68275+ help
68276+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68277+ at restricting randomization to cacheline-sized groups of elements. It
68278+ will further not randomize bitfields in structures. This reduces the
68279+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68280+
68281+config GRKERNSEC_KERN_LOCKOUT
68282+ bool "Active kernel exploit response"
68283+ default y if GRKERNSEC_CONFIG_AUTO
68284+ depends on X86 || ARM || PPC || SPARC
68285+ help
68286+ If you say Y here, when a PaX alert is triggered due to suspicious
68287+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68288+ or an OOPS occurs due to bad memory accesses, instead of just
68289+ terminating the offending process (and potentially allowing
68290+ a subsequent exploit from the same user), we will take one of two
68291+ actions:
68292+ If the user was root, we will panic the system
68293+ If the user was non-root, we will log the attempt, terminate
68294+ all processes owned by the user, then prevent them from creating
68295+ any new processes until the system is restarted
68296+ This deters repeated kernel exploitation/bruteforcing attempts
68297+ and is useful for later forensics.
68298+
68299+config GRKERNSEC_OLD_ARM_USERLAND
68300+ bool "Old ARM userland compatibility"
68301+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68302+ help
68303+ If you say Y here, stubs of executable code to perform such operations
68304+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68305+ table. This is unfortunately needed for old ARM userland meant to run
68306+ across a wide range of processors. Without this option enabled,
68307+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68308+ which is enough for Linaro userlands or other userlands designed for v6
68309+ and newer ARM CPUs. It's recommended that you try without this option enabled
68310+ first, and only enable it if your userland does not boot (it will likely fail
68311+ at init time).
68312+
68313+endmenu
68314+menu "Role Based Access Control Options"
68315+depends on GRKERNSEC
68316+
68317+config GRKERNSEC_RBAC_DEBUG
68318+ bool
68319+
68320+config GRKERNSEC_NO_RBAC
68321+ bool "Disable RBAC system"
68322+ help
68323+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68324+ preventing the RBAC system from being enabled. You should only say Y
68325+ here if you have no intention of using the RBAC system, so as to prevent
68326+ an attacker with root access from misusing the RBAC system to hide files
68327+ and processes when loadable module support and /dev/[k]mem have been
68328+ locked down.
68329+
68330+config GRKERNSEC_ACL_HIDEKERN
68331+ bool "Hide kernel processes"
68332+ help
68333+ If you say Y here, all kernel threads will be hidden to all
68334+ processes but those whose subject has the "view hidden processes"
68335+ flag.
68336+
68337+config GRKERNSEC_ACL_MAXTRIES
68338+ int "Maximum tries before password lockout"
68339+ default 3
68340+ help
68341+ This option enforces the maximum number of times a user can attempt
68342+ to authorize themselves with the grsecurity RBAC system before being
68343+ denied the ability to attempt authorization again for a specified time.
68344+ The lower the number, the harder it will be to brute-force a password.
68345+
68346+config GRKERNSEC_ACL_TIMEOUT
68347+ int "Time to wait after max password tries, in seconds"
68348+ default 30
68349+ help
68350+ This option specifies the time the user must wait after attempting to
68351+ authorize to the RBAC system with the maximum number of invalid
68352+ passwords. The higher the number, the harder it will be to brute-force
68353+ a password.
68354+
68355+endmenu
68356+menu "Filesystem Protections"
68357+depends on GRKERNSEC
68358+
68359+config GRKERNSEC_PROC
68360+ bool "Proc restrictions"
68361+ default y if GRKERNSEC_CONFIG_AUTO
68362+ help
68363+ If you say Y here, the permissions of the /proc filesystem
68364+ will be altered to enhance system security and privacy. You MUST
68365+ choose either a user only restriction or a user and group restriction.
68366+ Depending upon the option you choose, you can either restrict users to
68367+ see only the processes they themselves run, or choose a group that can
68368+ view all processes and files normally restricted to root if you choose
68369+ the "restrict to user only" option. NOTE: If you're running identd or
68370+ ntpd as a non-root user, you will have to run it as the group you
68371+ specify here.
68372+
68373+config GRKERNSEC_PROC_USER
68374+ bool "Restrict /proc to user only"
68375+ depends on GRKERNSEC_PROC
68376+ help
68377+ If you say Y here, non-root users will only be able to view their own
68378+ processes, and restricts them from viewing network-related information,
68379+ and viewing kernel symbol and module information.
68380+
68381+config GRKERNSEC_PROC_USERGROUP
68382+ bool "Allow special group"
68383+ default y if GRKERNSEC_CONFIG_AUTO
68384+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68385+ help
68386+ If you say Y here, you will be able to select a group that will be
68387+ able to view all processes and network-related information. If you've
68388+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68389+ remain hidden. This option is useful if you want to run identd as
68390+ a non-root user. The group you select may also be chosen at boot time
68391+ via "grsec_proc_gid=" on the kernel commandline.
68392+
68393+config GRKERNSEC_PROC_GID
68394+ int "GID for special group"
68395+ depends on GRKERNSEC_PROC_USERGROUP
68396+ default 1001
68397+
68398+config GRKERNSEC_PROC_ADD
68399+ bool "Additional restrictions"
68400+ default y if GRKERNSEC_CONFIG_AUTO
68401+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68402+ help
68403+ If you say Y here, additional restrictions will be placed on
68404+ /proc that keep normal users from viewing device information and
68405+ slabinfo information that could be useful for exploits.
68406+
68407+config GRKERNSEC_LINK
68408+ bool "Linking restrictions"
68409+ default y if GRKERNSEC_CONFIG_AUTO
68410+ help
68411+ If you say Y here, /tmp race exploits will be prevented, since users
68412+ will no longer be able to follow symlinks owned by other users in
68413+ world-writable +t directories (e.g. /tmp), unless the owner of the
68414+ symlink is the owner of the directory. users will also not be
68415+ able to hardlink to files they do not own. If the sysctl option is
68416+ enabled, a sysctl option with name "linking_restrictions" is created.
68417+
68418+config GRKERNSEC_SYMLINKOWN
68419+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68420+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68421+ help
68422+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68423+ that prevents it from being used as a security feature. As Apache
68424+ verifies the symlink by performing a stat() against the target of
68425+ the symlink before it is followed, an attacker can setup a symlink
68426+ to point to a same-owned file, then replace the symlink with one
68427+ that targets another user's file just after Apache "validates" the
68428+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68429+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68430+ will be in place for the group you specify. If the sysctl option
68431+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68432+ created.
68433+
68434+config GRKERNSEC_SYMLINKOWN_GID
68435+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68436+ depends on GRKERNSEC_SYMLINKOWN
68437+ default 1006
68438+ help
68439+ Setting this GID determines what group kernel-enforced
68440+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68441+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68442+
68443+config GRKERNSEC_FIFO
68444+ bool "FIFO restrictions"
68445+ default y if GRKERNSEC_CONFIG_AUTO
68446+ help
68447+ If you say Y here, users will not be able to write to FIFOs they don't
68448+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68449+ the FIFO is the same owner of the directory it's held in. If the sysctl
68450+ option is enabled, a sysctl option with name "fifo_restrictions" is
68451+ created.
68452+
68453+config GRKERNSEC_SYSFS_RESTRICT
68454+ bool "Sysfs/debugfs restriction"
68455+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68456+ depends on SYSFS
68457+ help
68458+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68459+ any filesystem normally mounted under it (e.g. debugfs) will be
68460+ mostly accessible only by root. These filesystems generally provide access
68461+ to hardware and debug information that isn't appropriate for unprivileged
68462+ users of the system. Sysfs and debugfs have also become a large source
68463+ of new vulnerabilities, ranging from infoleaks to local compromise.
68464+ There has been very little oversight with an eye toward security involved
68465+ in adding new exporters of information to these filesystems, so their
68466+ use is discouraged.
68467+ For reasons of compatibility, a few directories have been whitelisted
68468+ for access by non-root users:
68469+ /sys/fs/selinux
68470+ /sys/fs/fuse
68471+ /sys/devices/system/cpu
68472+
68473+config GRKERNSEC_ROFS
68474+ bool "Runtime read-only mount protection"
68475+ depends on SYSCTL
68476+ help
68477+ If you say Y here, a sysctl option with name "romount_protect" will
68478+ be created. By setting this option to 1 at runtime, filesystems
68479+ will be protected in the following ways:
68480+ * No new writable mounts will be allowed
68481+ * Existing read-only mounts won't be able to be remounted read/write
68482+ * Write operations will be denied on all block devices
68483+ This option acts independently of grsec_lock: once it is set to 1,
68484+ it cannot be turned off. Therefore, please be mindful of the resulting
68485+ behavior if this option is enabled in an init script on a read-only
68486+ filesystem.
68487+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68488+ and GRKERNSEC_IO should be enabled and module loading disabled via
68489+ config or at runtime.
68490+ This feature is mainly intended for secure embedded systems.
68491+
68492+
68493+config GRKERNSEC_DEVICE_SIDECHANNEL
68494+ bool "Eliminate stat/notify-based device sidechannels"
68495+ default y if GRKERNSEC_CONFIG_AUTO
68496+ help
68497+ If you say Y here, timing analyses on block or character
68498+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68499+ will be thwarted for unprivileged users. If a process without
68500+ CAP_MKNOD stats such a device, the last access and last modify times
68501+ will match the device's create time. No access or modify events
68502+ will be triggered through inotify/dnotify/fanotify for such devices.
68503+ This feature will prevent attacks that may at a minimum
68504+ allow an attacker to determine the administrator's password length.
68505+
68506+config GRKERNSEC_CHROOT
68507+ bool "Chroot jail restrictions"
68508+ default y if GRKERNSEC_CONFIG_AUTO
68509+ help
68510+ If you say Y here, you will be able to choose several options that will
68511+ make breaking out of a chrooted jail much more difficult. If you
68512+ encounter no software incompatibilities with the following options, it
68513+ is recommended that you enable each one.
68514+
68515+ Note that the chroot restrictions are not intended to apply to "chroots"
68516+ to directories that are simple bind mounts of the global root filesystem.
68517+ For several other reasons, a user shouldn't expect any significant
68518+ security by performing such a chroot.
68519+
68520+config GRKERNSEC_CHROOT_MOUNT
68521+ bool "Deny mounts"
68522+ default y if GRKERNSEC_CONFIG_AUTO
68523+ depends on GRKERNSEC_CHROOT
68524+ help
68525+ If you say Y here, processes inside a chroot will not be able to
68526+ mount or remount filesystems. If the sysctl option is enabled, a
68527+ sysctl option with name "chroot_deny_mount" is created.
68528+
68529+config GRKERNSEC_CHROOT_DOUBLE
68530+ bool "Deny double-chroots"
68531+ default y if GRKERNSEC_CONFIG_AUTO
68532+ depends on GRKERNSEC_CHROOT
68533+ help
68534+ If you say Y here, processes inside a chroot will not be able to chroot
68535+ again outside the chroot. This is a widely used method of breaking
68536+ out of a chroot jail and should not be allowed. If the sysctl
68537+ option is enabled, a sysctl option with name
68538+ "chroot_deny_chroot" is created.
68539+
68540+config GRKERNSEC_CHROOT_PIVOT
68541+ bool "Deny pivot_root in chroot"
68542+ default y if GRKERNSEC_CONFIG_AUTO
68543+ depends on GRKERNSEC_CHROOT
68544+ help
68545+ If you say Y here, processes inside a chroot will not be able to use
68546+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68547+ works similar to chroot in that it changes the root filesystem. This
68548+ function could be misused in a chrooted process to attempt to break out
68549+ of the chroot, and therefore should not be allowed. If the sysctl
68550+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68551+ created.
68552+
68553+config GRKERNSEC_CHROOT_CHDIR
68554+ bool "Enforce chdir(\"/\") on all chroots"
68555+ default y if GRKERNSEC_CONFIG_AUTO
68556+ depends on GRKERNSEC_CHROOT
68557+ help
68558+ If you say Y here, the current working directory of all newly-chrooted
68559+ applications will be set to the the root directory of the chroot.
68560+ The man page on chroot(2) states:
68561+ Note that this call does not change the current working
68562+ directory, so that `.' can be outside the tree rooted at
68563+ `/'. In particular, the super-user can escape from a
68564+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68565+
68566+ It is recommended that you say Y here, since it's not known to break
68567+ any software. If the sysctl option is enabled, a sysctl option with
68568+ name "chroot_enforce_chdir" is created.
68569+
68570+config GRKERNSEC_CHROOT_CHMOD
68571+ bool "Deny (f)chmod +s"
68572+ default y if GRKERNSEC_CONFIG_AUTO
68573+ depends on GRKERNSEC_CHROOT
68574+ help
68575+ If you say Y here, processes inside a chroot will not be able to chmod
68576+ or fchmod files to make them have suid or sgid bits. This protects
68577+ against another published method of breaking a chroot. If the sysctl
68578+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68579+ created.
68580+
68581+config GRKERNSEC_CHROOT_FCHDIR
68582+ bool "Deny fchdir and fhandle out of chroot"
68583+ default y if GRKERNSEC_CONFIG_AUTO
68584+ depends on GRKERNSEC_CHROOT
68585+ help
68586+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68587+ to a file descriptor of the chrooting process that points to a directory
68588+ outside the filesystem will be stopped. Additionally, this option prevents
68589+ use of the recently-created syscall for opening files by a guessable "file
68590+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68591+ with name "chroot_deny_fchdir" is created.
68592+
68593+config GRKERNSEC_CHROOT_MKNOD
68594+ bool "Deny mknod"
68595+ default y if GRKERNSEC_CONFIG_AUTO
68596+ depends on GRKERNSEC_CHROOT
68597+ help
68598+ If you say Y here, processes inside a chroot will not be allowed to
68599+ mknod. The problem with using mknod inside a chroot is that it
68600+ would allow an attacker to create a device entry that is the same
68601+ as one on the physical root of your system, which could range from
68602+ anything from the console device to a device for your harddrive (which
68603+ they could then use to wipe the drive or steal data). It is recommended
68604+ that you say Y here, unless you run into software incompatibilities.
68605+ If the sysctl option is enabled, a sysctl option with name
68606+ "chroot_deny_mknod" is created.
68607+
68608+config GRKERNSEC_CHROOT_SHMAT
68609+ bool "Deny shmat() out of chroot"
68610+ default y if GRKERNSEC_CONFIG_AUTO
68611+ depends on GRKERNSEC_CHROOT
68612+ help
68613+ If you say Y here, processes inside a chroot will not be able to attach
68614+ to shared memory segments that were created outside of the chroot jail.
68615+ It is recommended that you say Y here. If the sysctl option is enabled,
68616+ a sysctl option with name "chroot_deny_shmat" is created.
68617+
68618+config GRKERNSEC_CHROOT_UNIX
68619+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68620+ default y if GRKERNSEC_CONFIG_AUTO
68621+ depends on GRKERNSEC_CHROOT
68622+ help
68623+ If you say Y here, processes inside a chroot will not be able to
68624+ connect to abstract (meaning not belonging to a filesystem) Unix
68625+ domain sockets that were bound outside of a chroot. It is recommended
68626+ that you say Y here. If the sysctl option is enabled, a sysctl option
68627+ with name "chroot_deny_unix" is created.
68628+
68629+config GRKERNSEC_CHROOT_FINDTASK
68630+ bool "Protect outside processes"
68631+ default y if GRKERNSEC_CONFIG_AUTO
68632+ depends on GRKERNSEC_CHROOT
68633+ help
68634+ If you say Y here, processes inside a chroot will not be able to
68635+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68636+ getsid, or view any process outside of the chroot. If the sysctl
68637+ option is enabled, a sysctl option with name "chroot_findtask" is
68638+ created.
68639+
68640+config GRKERNSEC_CHROOT_NICE
68641+ bool "Restrict priority changes"
68642+ default y if GRKERNSEC_CONFIG_AUTO
68643+ depends on GRKERNSEC_CHROOT
68644+ help
68645+ If you say Y here, processes inside a chroot will not be able to raise
68646+ the priority of processes in the chroot, or alter the priority of
68647+ processes outside the chroot. This provides more security than simply
68648+ removing CAP_SYS_NICE from the process' capability set. If the
68649+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68650+ is created.
68651+
68652+config GRKERNSEC_CHROOT_SYSCTL
68653+ bool "Deny sysctl writes"
68654+ default y if GRKERNSEC_CONFIG_AUTO
68655+ depends on GRKERNSEC_CHROOT
68656+ help
68657+ If you say Y here, an attacker in a chroot will not be able to
68658+ write to sysctl entries, either by sysctl(2) or through a /proc
68659+ interface. It is strongly recommended that you say Y here. If the
68660+ sysctl option is enabled, a sysctl option with name
68661+ "chroot_deny_sysctl" is created.
68662+
68663+config GRKERNSEC_CHROOT_RENAME
68664+ bool "Deny bad renames"
68665+ default y if GRKERNSEC_CONFIG_AUTO
68666+ depends on GRKERNSEC_CHROOT
68667+ help
68668+ If you say Y here, an attacker in a chroot will not be able to
68669+ abuse the ability to create double chroots to break out of the
68670+ chroot by exploiting a race condition between a rename of a directory
68671+ within a chroot against an open of a symlink with relative path
68672+ components. This feature will likewise prevent an accomplice outside
68673+ a chroot from enabling a user inside the chroot to break out and make
68674+ use of their credentials on the global filesystem. Enabling this
68675+ feature is essential to prevent root users from breaking out of a
68676+ chroot. If the sysctl option is enabled, a sysctl option with name
68677+ "chroot_deny_bad_rename" is created.
68678+
68679+config GRKERNSEC_CHROOT_CAPS
68680+ bool "Capability restrictions"
68681+ default y if GRKERNSEC_CONFIG_AUTO
68682+ depends on GRKERNSEC_CHROOT
68683+ help
68684+ If you say Y here, the capabilities on all processes within a
68685+ chroot jail will be lowered to stop module insertion, raw i/o,
68686+ system and net admin tasks, rebooting the system, modifying immutable
68687+ files, modifying IPC owned by another, and changing the system time.
68688+ This is left an option because it can break some apps. Disable this
68689+ if your chrooted apps are having problems performing those kinds of
68690+ tasks. If the sysctl option is enabled, a sysctl option with
68691+ name "chroot_caps" is created.
68692+
68693+config GRKERNSEC_CHROOT_INITRD
68694+ bool "Exempt initrd tasks from restrictions"
68695+ default y if GRKERNSEC_CONFIG_AUTO
68696+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68697+ help
68698+ If you say Y here, tasks started prior to init will be exempted from
68699+ grsecurity's chroot restrictions. This option is mainly meant to
68700+ resolve Plymouth's performing privileged operations unnecessarily
68701+ in a chroot.
68702+
68703+endmenu
68704+menu "Kernel Auditing"
68705+depends on GRKERNSEC
68706+
68707+config GRKERNSEC_AUDIT_GROUP
68708+ bool "Single group for auditing"
68709+ help
68710+ If you say Y here, the exec and chdir logging features will only operate
68711+ on a group you specify. This option is recommended if you only want to
68712+ watch certain users instead of having a large amount of logs from the
68713+ entire system. If the sysctl option is enabled, a sysctl option with
68714+ name "audit_group" is created.
68715+
68716+config GRKERNSEC_AUDIT_GID
68717+ int "GID for auditing"
68718+ depends on GRKERNSEC_AUDIT_GROUP
68719+ default 1007
68720+
68721+config GRKERNSEC_EXECLOG
68722+ bool "Exec logging"
68723+ help
68724+ If you say Y here, all execve() calls will be logged (since the
68725+ other exec*() calls are frontends to execve(), all execution
68726+ will be logged). Useful for shell-servers that like to keep track
68727+ of their users. If the sysctl option is enabled, a sysctl option with
68728+ name "exec_logging" is created.
68729+ WARNING: This option when enabled will produce a LOT of logs, especially
68730+ on an active system.
68731+
68732+config GRKERNSEC_RESLOG
68733+ bool "Resource logging"
68734+ default y if GRKERNSEC_CONFIG_AUTO
68735+ help
68736+ If you say Y here, all attempts to overstep resource limits will
68737+ be logged with the resource name, the requested size, and the current
68738+ limit. It is highly recommended that you say Y here. If the sysctl
68739+ option is enabled, a sysctl option with name "resource_logging" is
68740+ created. If the RBAC system is enabled, the sysctl value is ignored.
68741+
68742+config GRKERNSEC_CHROOT_EXECLOG
68743+ bool "Log execs within chroot"
68744+ help
68745+ If you say Y here, all executions inside a chroot jail will be logged
68746+ to syslog. This can cause a large amount of logs if certain
68747+ applications (eg. djb's daemontools) are installed on the system, and
68748+ is therefore left as an option. If the sysctl option is enabled, a
68749+ sysctl option with name "chroot_execlog" is created.
68750+
68751+config GRKERNSEC_AUDIT_PTRACE
68752+ bool "Ptrace logging"
68753+ help
68754+ If you say Y here, all attempts to attach to a process via ptrace
68755+ will be logged. If the sysctl option is enabled, a sysctl option
68756+ with name "audit_ptrace" is created.
68757+
68758+config GRKERNSEC_AUDIT_CHDIR
68759+ bool "Chdir logging"
68760+ help
68761+ If you say Y here, all chdir() calls will be logged. If the sysctl
68762+ option is enabled, a sysctl option with name "audit_chdir" is created.
68763+
68764+config GRKERNSEC_AUDIT_MOUNT
68765+ bool "(Un)Mount logging"
68766+ help
68767+ If you say Y here, all mounts and unmounts will be logged. If the
68768+ sysctl option is enabled, a sysctl option with name "audit_mount" is
68769+ created.
68770+
68771+config GRKERNSEC_SIGNAL
68772+ bool "Signal logging"
68773+ default y if GRKERNSEC_CONFIG_AUTO
68774+ help
68775+ If you say Y here, certain important signals will be logged, such as
68776+ SIGSEGV, which will as a result inform you of when a error in a program
68777+ occurred, which in some cases could mean a possible exploit attempt.
68778+ If the sysctl option is enabled, a sysctl option with name
68779+ "signal_logging" is created.
68780+
68781+config GRKERNSEC_FORKFAIL
68782+ bool "Fork failure logging"
68783+ help
68784+ If you say Y here, all failed fork() attempts will be logged.
68785+ This could suggest a fork bomb, or someone attempting to overstep
68786+ their process limit. If the sysctl option is enabled, a sysctl option
68787+ with name "forkfail_logging" is created.
68788+
68789+config GRKERNSEC_TIME
68790+ bool "Time change logging"
68791+ default y if GRKERNSEC_CONFIG_AUTO
68792+ help
68793+ If you say Y here, any changes of the system clock will be logged.
68794+ If the sysctl option is enabled, a sysctl option with name
68795+ "timechange_logging" is created.
68796+
68797+config GRKERNSEC_PROC_IPADDR
68798+ bool "/proc/<pid>/ipaddr support"
68799+ default y if GRKERNSEC_CONFIG_AUTO
68800+ help
68801+ If you say Y here, a new entry will be added to each /proc/<pid>
68802+ directory that contains the IP address of the person using the task.
68803+ The IP is carried across local TCP and AF_UNIX stream sockets.
68804+ This information can be useful for IDS/IPSes to perform remote response
68805+ to a local attack. The entry is readable by only the owner of the
68806+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
68807+ the RBAC system), and thus does not create privacy concerns.
68808+
68809+config GRKERNSEC_RWXMAP_LOG
68810+ bool 'Denied RWX mmap/mprotect logging'
68811+ default y if GRKERNSEC_CONFIG_AUTO
68812+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
68813+ help
68814+ If you say Y here, calls to mmap() and mprotect() with explicit
68815+ usage of PROT_WRITE and PROT_EXEC together will be logged when
68816+ denied by the PAX_MPROTECT feature. This feature will also
68817+ log other problematic scenarios that can occur when PAX_MPROTECT
68818+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
68819+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
68820+ is created.
68821+
68822+endmenu
68823+
68824+menu "Executable Protections"
68825+depends on GRKERNSEC
68826+
68827+config GRKERNSEC_DMESG
68828+ bool "Dmesg(8) restriction"
68829+ default y if GRKERNSEC_CONFIG_AUTO
68830+ help
68831+ If you say Y here, non-root users will not be able to use dmesg(8)
68832+ to view the contents of the kernel's circular log buffer.
68833+ The kernel's log buffer often contains kernel addresses and other
68834+ identifying information useful to an attacker in fingerprinting a
68835+ system for a targeted exploit.
68836+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
68837+ created.
68838+
68839+config GRKERNSEC_HARDEN_PTRACE
68840+ bool "Deter ptrace-based process snooping"
68841+ default y if GRKERNSEC_CONFIG_AUTO
68842+ help
68843+ If you say Y here, TTY sniffers and other malicious monitoring
68844+ programs implemented through ptrace will be defeated. If you
68845+ have been using the RBAC system, this option has already been
68846+ enabled for several years for all users, with the ability to make
68847+ fine-grained exceptions.
68848+
68849+ This option only affects the ability of non-root users to ptrace
68850+ processes that are not a descendent of the ptracing process.
68851+ This means that strace ./binary and gdb ./binary will still work,
68852+ but attaching to arbitrary processes will not. If the sysctl
68853+ option is enabled, a sysctl option with name "harden_ptrace" is
68854+ created.
68855+
68856+config GRKERNSEC_PTRACE_READEXEC
68857+ bool "Require read access to ptrace sensitive binaries"
68858+ default y if GRKERNSEC_CONFIG_AUTO
68859+ help
68860+ If you say Y here, unprivileged users will not be able to ptrace unreadable
68861+ binaries. This option is useful in environments that
68862+ remove the read bits (e.g. file mode 4711) from suid binaries to
68863+ prevent infoleaking of their contents. This option adds
68864+ consistency to the use of that file mode, as the binary could normally
68865+ be read out when run without privileges while ptracing.
68866+
68867+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
68868+ is created.
68869+
68870+config GRKERNSEC_SETXID
68871+ bool "Enforce consistent multithreaded privileges"
68872+ default y if GRKERNSEC_CONFIG_AUTO
68873+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
68874+ help
68875+ If you say Y here, a change from a root uid to a non-root uid
68876+ in a multithreaded application will cause the resulting uids,
68877+ gids, supplementary groups, and capabilities in that thread
68878+ to be propagated to the other threads of the process. In most
68879+ cases this is unnecessary, as glibc will emulate this behavior
68880+ on behalf of the application. Other libcs do not act in the
68881+ same way, allowing the other threads of the process to continue
68882+ running with root privileges. If the sysctl option is enabled,
68883+ a sysctl option with name "consistent_setxid" is created.
68884+
68885+config GRKERNSEC_HARDEN_IPC
68886+ bool "Disallow access to overly-permissive IPC objects"
68887+ default y if GRKERNSEC_CONFIG_AUTO
68888+ depends on SYSVIPC
68889+ help
68890+ If you say Y here, access to overly-permissive IPC objects (shared
68891+ memory, message queues, and semaphores) will be denied for processes
68892+ given the following criteria beyond normal permission checks:
68893+ 1) If the IPC object is world-accessible and the euid doesn't match
68894+ that of the creator or current uid for the IPC object
68895+ 2) If the IPC object is group-accessible and the egid doesn't
68896+ match that of the creator or current gid for the IPC object
68897+ It's a common error to grant too much permission to these objects,
68898+ with impact ranging from denial of service and information leaking to
68899+ privilege escalation. This feature was developed in response to
68900+ research by Tim Brown:
68901+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
68902+ who found hundreds of such insecure usages. Processes with
68903+ CAP_IPC_OWNER are still permitted to access these IPC objects.
68904+ If the sysctl option is enabled, a sysctl option with name
68905+ "harden_ipc" is created.
68906+
68907+config GRKERNSEC_TPE
68908+ bool "Trusted Path Execution (TPE)"
68909+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68910+ help
68911+ If you say Y here, you will be able to choose a gid to add to the
68912+ supplementary groups of users you want to mark as "untrusted."
68913+ These users will not be able to execute any files that are not in
68914+ root-owned directories writable only by root. If the sysctl option
68915+ is enabled, a sysctl option with name "tpe" is created.
68916+
68917+config GRKERNSEC_TPE_ALL
68918+ bool "Partially restrict all non-root users"
68919+ depends on GRKERNSEC_TPE
68920+ help
68921+ If you say Y here, all non-root users will be covered under
68922+ a weaker TPE restriction. This is separate from, and in addition to,
68923+ the main TPE options that you have selected elsewhere. Thus, if a
68924+ "trusted" GID is chosen, this restriction applies to even that GID.
68925+ Under this restriction, all non-root users will only be allowed to
68926+ execute files in directories they own that are not group or
68927+ world-writable, or in directories owned by root and writable only by
68928+ root. If the sysctl option is enabled, a sysctl option with name
68929+ "tpe_restrict_all" is created.
68930+
68931+config GRKERNSEC_TPE_INVERT
68932+ bool "Invert GID option"
68933+ depends on GRKERNSEC_TPE
68934+ help
68935+ If you say Y here, the group you specify in the TPE configuration will
68936+ decide what group TPE restrictions will be *disabled* for. This
68937+ option is useful if you want TPE restrictions to be applied to most
68938+ users on the system. If the sysctl option is enabled, a sysctl option
68939+ with name "tpe_invert" is created. Unlike other sysctl options, this
68940+ entry will default to on for backward-compatibility.
68941+
68942+config GRKERNSEC_TPE_GID
68943+ int
68944+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
68945+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
68946+
68947+config GRKERNSEC_TPE_UNTRUSTED_GID
68948+ int "GID for TPE-untrusted users"
68949+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
68950+ default 1005
68951+ help
68952+ Setting this GID determines what group TPE restrictions will be
68953+ *enabled* for. If the sysctl option is enabled, a sysctl option
68954+ with name "tpe_gid" is created.
68955+
68956+config GRKERNSEC_TPE_TRUSTED_GID
68957+ int "GID for TPE-trusted users"
68958+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
68959+ default 1005
68960+ help
68961+ Setting this GID determines what group TPE restrictions will be
68962+ *disabled* for. If the sysctl option is enabled, a sysctl option
68963+ with name "tpe_gid" is created.
68964+
68965+endmenu
68966+menu "Network Protections"
68967+depends on GRKERNSEC
68968+
68969+config GRKERNSEC_BLACKHOLE
68970+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
68971+ default y if GRKERNSEC_CONFIG_AUTO
68972+ depends on NET
68973+ help
68974+ If you say Y here, neither TCP resets nor ICMP
68975+ destination-unreachable packets will be sent in response to packets
68976+ sent to ports for which no associated listening process exists.
68977+ It will also prevent the sending of ICMP protocol unreachable packets
68978+ in response to packets with unknown protocols.
68979+ This feature supports both IPV4 and IPV6 and exempts the
68980+ loopback interface from blackholing. Enabling this feature
68981+ makes a host more resilient to DoS attacks and reduces network
68982+ visibility against scanners.
68983+
68984+ The blackhole feature as-implemented is equivalent to the FreeBSD
68985+ blackhole feature, as it prevents RST responses to all packets, not
68986+ just SYNs. Under most application behavior this causes no
68987+ problems, but applications (like haproxy) may not close certain
68988+ connections in a way that cleanly terminates them on the remote
68989+ end, leaving the remote host in LAST_ACK state. Because of this
68990+ side-effect and to prevent intentional LAST_ACK DoSes, this
68991+ feature also adds automatic mitigation against such attacks.
68992+ The mitigation drastically reduces the amount of time a socket
68993+ can spend in LAST_ACK state. If you're using haproxy and not
68994+ all servers it connects to have this option enabled, consider
68995+ disabling this feature on the haproxy host.
68996+
68997+ If the sysctl option is enabled, two sysctl options with names
68998+ "ip_blackhole" and "lastack_retries" will be created.
68999+ While "ip_blackhole" takes the standard zero/non-zero on/off
69000+ toggle, "lastack_retries" uses the same kinds of values as
69001+ "tcp_retries1" and "tcp_retries2". The default value of 4
69002+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69003+ state.
69004+
69005+config GRKERNSEC_NO_SIMULT_CONNECT
69006+ bool "Disable TCP Simultaneous Connect"
69007+ default y if GRKERNSEC_CONFIG_AUTO
69008+ depends on NET
69009+ help
69010+ If you say Y here, a feature by Willy Tarreau will be enabled that
69011+ removes a weakness in Linux's strict implementation of TCP that
69012+ allows two clients to connect to each other without either entering
69013+ a listening state. The weakness allows an attacker to easily prevent
69014+ a client from connecting to a known server provided the source port
69015+ for the connection is guessed correctly.
69016+
69017+ As the weakness could be used to prevent an antivirus or IPS from
69018+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69019+ it should be eliminated by enabling this option. Though Linux is
69020+ one of few operating systems supporting simultaneous connect, it
69021+ has no legitimate use in practice and is rarely supported by firewalls.
69022+
69023+config GRKERNSEC_SOCKET
69024+ bool "Socket restrictions"
69025+ depends on NET
69026+ help
69027+ If you say Y here, you will be able to choose from several options.
69028+ If you assign a GID on your system and add it to the supplementary
69029+ groups of users you want to restrict socket access to, this patch
69030+ will perform up to three things, based on the option(s) you choose.
69031+
69032+config GRKERNSEC_SOCKET_ALL
69033+ bool "Deny any sockets to group"
69034+ depends on GRKERNSEC_SOCKET
69035+ help
69036+ If you say Y here, you will be able to choose a GID of whose users will
69037+ be unable to connect to other hosts from your machine or run server
69038+ applications from your machine. If the sysctl option is enabled, a
69039+ sysctl option with name "socket_all" is created.
69040+
69041+config GRKERNSEC_SOCKET_ALL_GID
69042+ int "GID to deny all sockets for"
69043+ depends on GRKERNSEC_SOCKET_ALL
69044+ default 1004
69045+ help
69046+ Here you can choose the GID to disable socket access for. Remember to
69047+ add the users you want socket access disabled for to the GID
69048+ specified here. If the sysctl option is enabled, a sysctl option
69049+ with name "socket_all_gid" is created.
69050+
69051+config GRKERNSEC_SOCKET_CLIENT
69052+ bool "Deny client sockets to group"
69053+ depends on GRKERNSEC_SOCKET
69054+ help
69055+ If you say Y here, you will be able to choose a GID of whose users will
69056+ be unable to connect to other hosts from your machine, but will be
69057+ able to run servers. If this option is enabled, all users in the group
69058+ you specify will have to use passive mode when initiating ftp transfers
69059+ from the shell on your machine. If the sysctl option is enabled, a
69060+ sysctl option with name "socket_client" is created.
69061+
69062+config GRKERNSEC_SOCKET_CLIENT_GID
69063+ int "GID to deny client sockets for"
69064+ depends on GRKERNSEC_SOCKET_CLIENT
69065+ default 1003
69066+ help
69067+ Here you can choose the GID to disable client socket access for.
69068+ Remember to add the users you want client socket access disabled for to
69069+ the GID specified here. If the sysctl option is enabled, a sysctl
69070+ option with name "socket_client_gid" is created.
69071+
69072+config GRKERNSEC_SOCKET_SERVER
69073+ bool "Deny server sockets to group"
69074+ depends on GRKERNSEC_SOCKET
69075+ help
69076+ If you say Y here, you will be able to choose a GID of whose users will
69077+ be unable to run server applications from your machine. If the sysctl
69078+ option is enabled, a sysctl option with name "socket_server" is created.
69079+
69080+config GRKERNSEC_SOCKET_SERVER_GID
69081+ int "GID to deny server sockets for"
69082+ depends on GRKERNSEC_SOCKET_SERVER
69083+ default 1002
69084+ help
69085+ Here you can choose the GID to disable server socket access for.
69086+ Remember to add the users you want server socket access disabled for to
69087+ the GID specified here. If the sysctl option is enabled, a sysctl
69088+ option with name "socket_server_gid" is created.
69089+
69090+endmenu
69091+
69092+menu "Physical Protections"
69093+depends on GRKERNSEC
69094+
69095+config GRKERNSEC_DENYUSB
69096+ bool "Deny new USB connections after toggle"
69097+ default y if GRKERNSEC_CONFIG_AUTO
69098+ depends on SYSCTL && USB_SUPPORT
69099+ help
69100+ If you say Y here, a new sysctl option with name "deny_new_usb"
69101+ will be created. Setting its value to 1 will prevent any new
69102+ USB devices from being recognized by the OS. Any attempted USB
69103+ device insertion will be logged. This option is intended to be
69104+ used against custom USB devices designed to exploit vulnerabilities
69105+ in various USB device drivers.
69106+
69107+ For greatest effectiveness, this sysctl should be set after any
69108+ relevant init scripts. This option is safe to enable in distros
69109+ as each user can choose whether or not to toggle the sysctl.
69110+
69111+config GRKERNSEC_DENYUSB_FORCE
69112+ bool "Reject all USB devices not connected at boot"
69113+ select USB
69114+ depends on GRKERNSEC_DENYUSB
69115+ help
69116+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69117+ that doesn't involve a sysctl entry. This option should only be
69118+ enabled if you're sure you want to deny all new USB connections
69119+ at runtime and don't want to modify init scripts. This should not
69120+ be enabled by distros. It forces the core USB code to be built
69121+ into the kernel image so that all devices connected at boot time
69122+ can be recognized and new USB device connections can be prevented
69123+ prior to init running.
69124+
69125+endmenu
69126+
69127+menu "Sysctl Support"
69128+depends on GRKERNSEC && SYSCTL
69129+
69130+config GRKERNSEC_SYSCTL
69131+ bool "Sysctl support"
69132+ default y if GRKERNSEC_CONFIG_AUTO
69133+ help
69134+ If you say Y here, you will be able to change the options that
69135+ grsecurity runs with at bootup, without having to recompile your
69136+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69137+ to enable (1) or disable (0) various features. All the sysctl entries
69138+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69139+ All features enabled in the kernel configuration are disabled at boot
69140+ if you do not say Y to the "Turn on features by default" option.
69141+ All options should be set at startup, and the grsec_lock entry should
69142+ be set to a non-zero value after all the options are set.
69143+ *THIS IS EXTREMELY IMPORTANT*
69144+
69145+config GRKERNSEC_SYSCTL_DISTRO
69146+ bool "Extra sysctl support for distro makers (READ HELP)"
69147+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69148+ help
69149+ If you say Y here, additional sysctl options will be created
69150+ for features that affect processes running as root. Therefore,
69151+ it is critical when using this option that the grsec_lock entry be
69152+ enabled after boot. Only distros with prebuilt kernel packages
69153+ with this option enabled that can ensure grsec_lock is enabled
69154+ after boot should use this option.
69155+ *Failure to set grsec_lock after boot makes all grsec features
69156+ this option covers useless*
69157+
69158+ Currently this option creates the following sysctl entries:
69159+ "Disable Privileged I/O": "disable_priv_io"
69160+
69161+config GRKERNSEC_SYSCTL_ON
69162+ bool "Turn on features by default"
69163+ default y if GRKERNSEC_CONFIG_AUTO
69164+ depends on GRKERNSEC_SYSCTL
69165+ help
69166+ If you say Y here, instead of having all features enabled in the
69167+ kernel configuration disabled at boot time, the features will be
69168+ enabled at boot time. It is recommended you say Y here unless
69169+ there is some reason you would want all sysctl-tunable features to
69170+ be disabled by default. As mentioned elsewhere, it is important
69171+ to enable the grsec_lock entry once you have finished modifying
69172+ the sysctl entries.
69173+
69174+endmenu
69175+menu "Logging Options"
69176+depends on GRKERNSEC
69177+
69178+config GRKERNSEC_FLOODTIME
69179+ int "Seconds in between log messages (minimum)"
69180+ default 10
69181+ help
69182+ This option allows you to enforce the number of seconds between
69183+ grsecurity log messages. The default should be suitable for most
69184+ people, however, if you choose to change it, choose a value small enough
69185+ to allow informative logs to be produced, but large enough to
69186+ prevent flooding.
69187+
69188+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69189+ any rate limiting on grsecurity log messages.
69190+
69191+config GRKERNSEC_FLOODBURST
69192+ int "Number of messages in a burst (maximum)"
69193+ default 6
69194+ help
69195+ This option allows you to choose the maximum number of messages allowed
69196+ within the flood time interval you chose in a separate option. The
69197+ default should be suitable for most people, however if you find that
69198+ many of your logs are being interpreted as flooding, you may want to
69199+ raise this value.
69200+
69201+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69202+ any rate limiting on grsecurity log messages.
69203+
69204+endmenu
69205diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69206new file mode 100644
69207index 0000000..30ababb
69208--- /dev/null
69209+++ b/grsecurity/Makefile
69210@@ -0,0 +1,54 @@
69211+# grsecurity – access control and security hardening for Linux
69212+# All code in this directory and various hooks located throughout the Linux kernel are
69213+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69214+# http://www.grsecurity.net spender@grsecurity.net
69215+#
69216+# This program is free software; you can redistribute it and/or
69217+# modify it under the terms of the GNU General Public License version 2
69218+# as published by the Free Software Foundation.
69219+#
69220+# This program is distributed in the hope that it will be useful,
69221+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69222+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69223+# GNU General Public License for more details.
69224+#
69225+# You should have received a copy of the GNU General Public License
69226+# along with this program; if not, write to the Free Software
69227+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69228+
69229+KBUILD_CFLAGS += -Werror
69230+
69231+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69232+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69233+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69234+ grsec_usb.o grsec_ipc.o grsec_proc.o
69235+
69236+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69237+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69238+ gracl_learn.o grsec_log.o gracl_policy.o
69239+ifdef CONFIG_COMPAT
69240+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69241+endif
69242+
69243+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69244+
69245+ifdef CONFIG_NET
69246+obj-y += grsec_sock.o
69247+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69248+endif
69249+
69250+ifndef CONFIG_GRKERNSEC
69251+obj-y += grsec_disabled.o
69252+endif
69253+
69254+ifdef CONFIG_GRKERNSEC_HIDESYM
69255+extra-y := grsec_hidesym.o
69256+$(obj)/grsec_hidesym.o:
69257+ @-chmod -f 500 /boot
69258+ @-chmod -f 500 /lib/modules
69259+ @-chmod -f 500 /lib64/modules
69260+ @-chmod -f 500 /lib32/modules
69261+ @-chmod -f 700 .
69262+ @-chmod -f 700 $(objtree)
69263+ @echo ' grsec: protected kernel image paths'
69264+endif
69265diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69266new file mode 100644
69267index 0000000..6c1e154
69268--- /dev/null
69269+++ b/grsecurity/gracl.c
69270@@ -0,0 +1,2749 @@
69271+#include <linux/kernel.h>
69272+#include <linux/module.h>
69273+#include <linux/sched.h>
69274+#include <linux/mm.h>
69275+#include <linux/file.h>
69276+#include <linux/fs.h>
69277+#include <linux/namei.h>
69278+#include <linux/mount.h>
69279+#include <linux/tty.h>
69280+#include <linux/proc_fs.h>
69281+#include <linux/lglock.h>
69282+#include <linux/slab.h>
69283+#include <linux/vmalloc.h>
69284+#include <linux/types.h>
69285+#include <linux/sysctl.h>
69286+#include <linux/netdevice.h>
69287+#include <linux/ptrace.h>
69288+#include <linux/gracl.h>
69289+#include <linux/gralloc.h>
69290+#include <linux/security.h>
69291+#include <linux/grinternal.h>
69292+#include <linux/pid_namespace.h>
69293+#include <linux/stop_machine.h>
69294+#include <linux/fdtable.h>
69295+#include <linux/percpu.h>
69296+#include <linux/lglock.h>
69297+#include <linux/hugetlb.h>
69298+#include <linux/posix-timers.h>
69299+#include <linux/prefetch.h>
69300+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69301+#include <linux/magic.h>
69302+#include <linux/pagemap.h>
69303+#include "../fs/btrfs/async-thread.h"
69304+#include "../fs/btrfs/ctree.h"
69305+#include "../fs/btrfs/btrfs_inode.h"
69306+#endif
69307+#include "../fs/mount.h"
69308+
69309+#include <asm/uaccess.h>
69310+#include <asm/errno.h>
69311+#include <asm/mman.h>
69312+
69313+#define FOR_EACH_ROLE_START(role) \
69314+ role = running_polstate.role_list; \
69315+ while (role) {
69316+
69317+#define FOR_EACH_ROLE_END(role) \
69318+ role = role->prev; \
69319+ }
69320+
69321+extern struct path gr_real_root;
69322+
69323+static struct gr_policy_state running_polstate;
69324+struct gr_policy_state *polstate = &running_polstate;
69325+extern struct gr_alloc_state *current_alloc_state;
69326+
69327+extern char *gr_shared_page[4];
69328+DEFINE_RWLOCK(gr_inode_lock);
69329+
69330+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69331+
69332+#ifdef CONFIG_NET
69333+extern struct vfsmount *sock_mnt;
69334+#endif
69335+
69336+extern struct vfsmount *pipe_mnt;
69337+extern struct vfsmount *shm_mnt;
69338+
69339+#ifdef CONFIG_HUGETLBFS
69340+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69341+#endif
69342+
69343+extern u16 acl_sp_role_value;
69344+extern struct acl_object_label *fakefs_obj_rw;
69345+extern struct acl_object_label *fakefs_obj_rwx;
69346+
69347+int gr_acl_is_enabled(void)
69348+{
69349+ return (gr_status & GR_READY);
69350+}
69351+
69352+void gr_enable_rbac_system(void)
69353+{
69354+ pax_open_kernel();
69355+ gr_status |= GR_READY;
69356+ pax_close_kernel();
69357+}
69358+
69359+int gr_rbac_disable(void *unused)
69360+{
69361+ pax_open_kernel();
69362+ gr_status &= ~GR_READY;
69363+ pax_close_kernel();
69364+
69365+ return 0;
69366+}
69367+
69368+static inline dev_t __get_dev(const struct dentry *dentry)
69369+{
69370+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69371+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69372+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69373+ else
69374+#endif
69375+ return dentry->d_sb->s_dev;
69376+}
69377+
69378+static inline u64 __get_ino(const struct dentry *dentry)
69379+{
69380+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69381+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69382+ return btrfs_ino(dentry->d_inode);
69383+ else
69384+#endif
69385+ return dentry->d_inode->i_ino;
69386+}
69387+
69388+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69389+{
69390+ return __get_dev(dentry);
69391+}
69392+
69393+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69394+{
69395+ return __get_ino(dentry);
69396+}
69397+
69398+static char gr_task_roletype_to_char(struct task_struct *task)
69399+{
69400+ switch (task->role->roletype &
69401+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69402+ GR_ROLE_SPECIAL)) {
69403+ case GR_ROLE_DEFAULT:
69404+ return 'D';
69405+ case GR_ROLE_USER:
69406+ return 'U';
69407+ case GR_ROLE_GROUP:
69408+ return 'G';
69409+ case GR_ROLE_SPECIAL:
69410+ return 'S';
69411+ }
69412+
69413+ return 'X';
69414+}
69415+
69416+char gr_roletype_to_char(void)
69417+{
69418+ return gr_task_roletype_to_char(current);
69419+}
69420+
69421+__inline__ int
69422+gr_acl_tpe_check(void)
69423+{
69424+ if (unlikely(!(gr_status & GR_READY)))
69425+ return 0;
69426+ if (current->role->roletype & GR_ROLE_TPE)
69427+ return 1;
69428+ else
69429+ return 0;
69430+}
69431+
69432+int
69433+gr_handle_rawio(const struct inode *inode)
69434+{
69435+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69436+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69437+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69438+ !capable(CAP_SYS_RAWIO))
69439+ return 1;
69440+#endif
69441+ return 0;
69442+}
69443+
69444+int
69445+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69446+{
69447+ if (likely(lena != lenb))
69448+ return 0;
69449+
69450+ return !memcmp(a, b, lena);
69451+}
69452+
69453+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69454+{
69455+ *buflen -= namelen;
69456+ if (*buflen < 0)
69457+ return -ENAMETOOLONG;
69458+ *buffer -= namelen;
69459+ memcpy(*buffer, str, namelen);
69460+ return 0;
69461+}
69462+
69463+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69464+{
69465+ return prepend(buffer, buflen, name->name, name->len);
69466+}
69467+
69468+static int prepend_path(const struct path *path, struct path *root,
69469+ char **buffer, int *buflen)
69470+{
69471+ struct dentry *dentry = path->dentry;
69472+ struct vfsmount *vfsmnt = path->mnt;
69473+ struct mount *mnt = real_mount(vfsmnt);
69474+ bool slash = false;
69475+ int error = 0;
69476+
69477+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69478+ struct dentry * parent;
69479+
69480+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69481+ /* Global root? */
69482+ if (!mnt_has_parent(mnt)) {
69483+ goto out;
69484+ }
69485+ dentry = mnt->mnt_mountpoint;
69486+ mnt = mnt->mnt_parent;
69487+ vfsmnt = &mnt->mnt;
69488+ continue;
69489+ }
69490+ parent = dentry->d_parent;
69491+ prefetch(parent);
69492+ spin_lock(&dentry->d_lock);
69493+ error = prepend_name(buffer, buflen, &dentry->d_name);
69494+ spin_unlock(&dentry->d_lock);
69495+ if (!error)
69496+ error = prepend(buffer, buflen, "/", 1);
69497+ if (error)
69498+ break;
69499+
69500+ slash = true;
69501+ dentry = parent;
69502+ }
69503+
69504+out:
69505+ if (!error && !slash)
69506+ error = prepend(buffer, buflen, "/", 1);
69507+
69508+ return error;
69509+}
69510+
69511+/* this must be called with mount_lock and rename_lock held */
69512+
69513+static char *__our_d_path(const struct path *path, struct path *root,
69514+ char *buf, int buflen)
69515+{
69516+ char *res = buf + buflen;
69517+ int error;
69518+
69519+ prepend(&res, &buflen, "\0", 1);
69520+ error = prepend_path(path, root, &res, &buflen);
69521+ if (error)
69522+ return ERR_PTR(error);
69523+
69524+ return res;
69525+}
69526+
69527+static char *
69528+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69529+{
69530+ char *retval;
69531+
69532+ retval = __our_d_path(path, root, buf, buflen);
69533+ if (unlikely(IS_ERR(retval)))
69534+ retval = strcpy(buf, "<path too long>");
69535+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69536+ retval[1] = '\0';
69537+
69538+ return retval;
69539+}
69540+
69541+static char *
69542+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69543+ char *buf, int buflen)
69544+{
69545+ struct path path;
69546+ char *res;
69547+
69548+ path.dentry = (struct dentry *)dentry;
69549+ path.mnt = (struct vfsmount *)vfsmnt;
69550+
69551+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69552+ by the RBAC system */
69553+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69554+
69555+ return res;
69556+}
69557+
69558+static char *
69559+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69560+ char *buf, int buflen)
69561+{
69562+ char *res;
69563+ struct path path;
69564+ struct path root;
69565+ struct task_struct *reaper = init_pid_ns.child_reaper;
69566+
69567+ path.dentry = (struct dentry *)dentry;
69568+ path.mnt = (struct vfsmount *)vfsmnt;
69569+
69570+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69571+ get_fs_root(reaper->fs, &root);
69572+
69573+ read_seqlock_excl(&mount_lock);
69574+ write_seqlock(&rename_lock);
69575+ res = gen_full_path(&path, &root, buf, buflen);
69576+ write_sequnlock(&rename_lock);
69577+ read_sequnlock_excl(&mount_lock);
69578+
69579+ path_put(&root);
69580+ return res;
69581+}
69582+
69583+char *
69584+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69585+{
69586+ char *ret;
69587+ read_seqlock_excl(&mount_lock);
69588+ write_seqlock(&rename_lock);
69589+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69590+ PAGE_SIZE);
69591+ write_sequnlock(&rename_lock);
69592+ read_sequnlock_excl(&mount_lock);
69593+ return ret;
69594+}
69595+
69596+static char *
69597+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69598+{
69599+ char *ret;
69600+ char *buf;
69601+ int buflen;
69602+
69603+ read_seqlock_excl(&mount_lock);
69604+ write_seqlock(&rename_lock);
69605+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69606+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69607+ buflen = (int)(ret - buf);
69608+ if (buflen >= 5)
69609+ prepend(&ret, &buflen, "/proc", 5);
69610+ else
69611+ ret = strcpy(buf, "<path too long>");
69612+ write_sequnlock(&rename_lock);
69613+ read_sequnlock_excl(&mount_lock);
69614+ return ret;
69615+}
69616+
69617+char *
69618+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69619+{
69620+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69621+ PAGE_SIZE);
69622+}
69623+
69624+char *
69625+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69626+{
69627+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69628+ PAGE_SIZE);
69629+}
69630+
69631+char *
69632+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69633+{
69634+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69635+ PAGE_SIZE);
69636+}
69637+
69638+char *
69639+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69640+{
69641+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69642+ PAGE_SIZE);
69643+}
69644+
69645+char *
69646+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69647+{
69648+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69649+ PAGE_SIZE);
69650+}
69651+
69652+__inline__ __u32
69653+to_gr_audit(const __u32 reqmode)
69654+{
69655+ /* masks off auditable permission flags, then shifts them to create
69656+ auditing flags, and adds the special case of append auditing if
69657+ we're requesting write */
69658+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69659+}
69660+
69661+struct acl_role_label *
69662+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69663+ const gid_t gid)
69664+{
69665+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69666+ struct acl_role_label *match;
69667+ struct role_allowed_ip *ipp;
69668+ unsigned int x;
69669+ u32 curr_ip = task->signal->saved_ip;
69670+
69671+ match = state->acl_role_set.r_hash[index];
69672+
69673+ while (match) {
69674+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69675+ for (x = 0; x < match->domain_child_num; x++) {
69676+ if (match->domain_children[x] == uid)
69677+ goto found;
69678+ }
69679+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69680+ break;
69681+ match = match->next;
69682+ }
69683+found:
69684+ if (match == NULL) {
69685+ try_group:
69686+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69687+ match = state->acl_role_set.r_hash[index];
69688+
69689+ while (match) {
69690+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69691+ for (x = 0; x < match->domain_child_num; x++) {
69692+ if (match->domain_children[x] == gid)
69693+ goto found2;
69694+ }
69695+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69696+ break;
69697+ match = match->next;
69698+ }
69699+found2:
69700+ if (match == NULL)
69701+ match = state->default_role;
69702+ if (match->allowed_ips == NULL)
69703+ return match;
69704+ else {
69705+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69706+ if (likely
69707+ ((ntohl(curr_ip) & ipp->netmask) ==
69708+ (ntohl(ipp->addr) & ipp->netmask)))
69709+ return match;
69710+ }
69711+ match = state->default_role;
69712+ }
69713+ } else if (match->allowed_ips == NULL) {
69714+ return match;
69715+ } else {
69716+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69717+ if (likely
69718+ ((ntohl(curr_ip) & ipp->netmask) ==
69719+ (ntohl(ipp->addr) & ipp->netmask)))
69720+ return match;
69721+ }
69722+ goto try_group;
69723+ }
69724+
69725+ return match;
69726+}
69727+
69728+static struct acl_role_label *
69729+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69730+ const gid_t gid)
69731+{
69732+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69733+}
69734+
69735+struct acl_subject_label *
69736+lookup_acl_subj_label(const u64 ino, const dev_t dev,
69737+ const struct acl_role_label *role)
69738+{
69739+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69740+ struct acl_subject_label *match;
69741+
69742+ match = role->subj_hash[index];
69743+
69744+ while (match && (match->inode != ino || match->device != dev ||
69745+ (match->mode & GR_DELETED))) {
69746+ match = match->next;
69747+ }
69748+
69749+ if (match && !(match->mode & GR_DELETED))
69750+ return match;
69751+ else
69752+ return NULL;
69753+}
69754+
69755+struct acl_subject_label *
69756+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
69757+ const struct acl_role_label *role)
69758+{
69759+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69760+ struct acl_subject_label *match;
69761+
69762+ match = role->subj_hash[index];
69763+
69764+ while (match && (match->inode != ino || match->device != dev ||
69765+ !(match->mode & GR_DELETED))) {
69766+ match = match->next;
69767+ }
69768+
69769+ if (match && (match->mode & GR_DELETED))
69770+ return match;
69771+ else
69772+ return NULL;
69773+}
69774+
69775+static struct acl_object_label *
69776+lookup_acl_obj_label(const u64 ino, const dev_t dev,
69777+ const struct acl_subject_label *subj)
69778+{
69779+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69780+ struct acl_object_label *match;
69781+
69782+ match = subj->obj_hash[index];
69783+
69784+ while (match && (match->inode != ino || match->device != dev ||
69785+ (match->mode & GR_DELETED))) {
69786+ match = match->next;
69787+ }
69788+
69789+ if (match && !(match->mode & GR_DELETED))
69790+ return match;
69791+ else
69792+ return NULL;
69793+}
69794+
69795+static struct acl_object_label *
69796+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
69797+ const struct acl_subject_label *subj)
69798+{
69799+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69800+ struct acl_object_label *match;
69801+
69802+ match = subj->obj_hash[index];
69803+
69804+ while (match && (match->inode != ino || match->device != dev ||
69805+ !(match->mode & GR_DELETED))) {
69806+ match = match->next;
69807+ }
69808+
69809+ if (match && (match->mode & GR_DELETED))
69810+ return match;
69811+
69812+ match = subj->obj_hash[index];
69813+
69814+ while (match && (match->inode != ino || match->device != dev ||
69815+ (match->mode & GR_DELETED))) {
69816+ match = match->next;
69817+ }
69818+
69819+ if (match && !(match->mode & GR_DELETED))
69820+ return match;
69821+ else
69822+ return NULL;
69823+}
69824+
69825+struct name_entry *
69826+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
69827+{
69828+ unsigned int len = strlen(name);
69829+ unsigned int key = full_name_hash(name, len);
69830+ unsigned int index = key % state->name_set.n_size;
69831+ struct name_entry *match;
69832+
69833+ match = state->name_set.n_hash[index];
69834+
69835+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
69836+ match = match->next;
69837+
69838+ return match;
69839+}
69840+
69841+static struct name_entry *
69842+lookup_name_entry(const char *name)
69843+{
69844+ return __lookup_name_entry(&running_polstate, name);
69845+}
69846+
69847+static struct name_entry *
69848+lookup_name_entry_create(const char *name)
69849+{
69850+ unsigned int len = strlen(name);
69851+ unsigned int key = full_name_hash(name, len);
69852+ unsigned int index = key % running_polstate.name_set.n_size;
69853+ struct name_entry *match;
69854+
69855+ match = running_polstate.name_set.n_hash[index];
69856+
69857+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69858+ !match->deleted))
69859+ match = match->next;
69860+
69861+ if (match && match->deleted)
69862+ return match;
69863+
69864+ match = running_polstate.name_set.n_hash[index];
69865+
69866+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69867+ match->deleted))
69868+ match = match->next;
69869+
69870+ if (match && !match->deleted)
69871+ return match;
69872+ else
69873+ return NULL;
69874+}
69875+
69876+static struct inodev_entry *
69877+lookup_inodev_entry(const u64 ino, const dev_t dev)
69878+{
69879+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
69880+ struct inodev_entry *match;
69881+
69882+ match = running_polstate.inodev_set.i_hash[index];
69883+
69884+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
69885+ match = match->next;
69886+
69887+ return match;
69888+}
69889+
69890+void
69891+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
69892+{
69893+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
69894+ state->inodev_set.i_size);
69895+ struct inodev_entry **curr;
69896+
69897+ entry->prev = NULL;
69898+
69899+ curr = &state->inodev_set.i_hash[index];
69900+ if (*curr != NULL)
69901+ (*curr)->prev = entry;
69902+
69903+ entry->next = *curr;
69904+ *curr = entry;
69905+
69906+ return;
69907+}
69908+
69909+static void
69910+insert_inodev_entry(struct inodev_entry *entry)
69911+{
69912+ __insert_inodev_entry(&running_polstate, entry);
69913+}
69914+
69915+void
69916+insert_acl_obj_label(struct acl_object_label *obj,
69917+ struct acl_subject_label *subj)
69918+{
69919+ unsigned int index =
69920+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
69921+ struct acl_object_label **curr;
69922+
69923+ obj->prev = NULL;
69924+
69925+ curr = &subj->obj_hash[index];
69926+ if (*curr != NULL)
69927+ (*curr)->prev = obj;
69928+
69929+ obj->next = *curr;
69930+ *curr = obj;
69931+
69932+ return;
69933+}
69934+
69935+void
69936+insert_acl_subj_label(struct acl_subject_label *obj,
69937+ struct acl_role_label *role)
69938+{
69939+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
69940+ struct acl_subject_label **curr;
69941+
69942+ obj->prev = NULL;
69943+
69944+ curr = &role->subj_hash[index];
69945+ if (*curr != NULL)
69946+ (*curr)->prev = obj;
69947+
69948+ obj->next = *curr;
69949+ *curr = obj;
69950+
69951+ return;
69952+}
69953+
69954+/* derived from glibc fnmatch() 0: match, 1: no match*/
69955+
69956+static int
69957+glob_match(const char *p, const char *n)
69958+{
69959+ char c;
69960+
69961+ while ((c = *p++) != '\0') {
69962+ switch (c) {
69963+ case '?':
69964+ if (*n == '\0')
69965+ return 1;
69966+ else if (*n == '/')
69967+ return 1;
69968+ break;
69969+ case '\\':
69970+ if (*n != c)
69971+ return 1;
69972+ break;
69973+ case '*':
69974+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
69975+ if (*n == '/')
69976+ return 1;
69977+ else if (c == '?') {
69978+ if (*n == '\0')
69979+ return 1;
69980+ else
69981+ ++n;
69982+ }
69983+ }
69984+ if (c == '\0') {
69985+ return 0;
69986+ } else {
69987+ const char *endp;
69988+
69989+ if ((endp = strchr(n, '/')) == NULL)
69990+ endp = n + strlen(n);
69991+
69992+ if (c == '[') {
69993+ for (--p; n < endp; ++n)
69994+ if (!glob_match(p, n))
69995+ return 0;
69996+ } else if (c == '/') {
69997+ while (*n != '\0' && *n != '/')
69998+ ++n;
69999+ if (*n == '/' && !glob_match(p, n + 1))
70000+ return 0;
70001+ } else {
70002+ for (--p; n < endp; ++n)
70003+ if (*n == c && !glob_match(p, n))
70004+ return 0;
70005+ }
70006+
70007+ return 1;
70008+ }
70009+ case '[':
70010+ {
70011+ int not;
70012+ char cold;
70013+
70014+ if (*n == '\0' || *n == '/')
70015+ return 1;
70016+
70017+ not = (*p == '!' || *p == '^');
70018+ if (not)
70019+ ++p;
70020+
70021+ c = *p++;
70022+ for (;;) {
70023+ unsigned char fn = (unsigned char)*n;
70024+
70025+ if (c == '\0')
70026+ return 1;
70027+ else {
70028+ if (c == fn)
70029+ goto matched;
70030+ cold = c;
70031+ c = *p++;
70032+
70033+ if (c == '-' && *p != ']') {
70034+ unsigned char cend = *p++;
70035+
70036+ if (cend == '\0')
70037+ return 1;
70038+
70039+ if (cold <= fn && fn <= cend)
70040+ goto matched;
70041+
70042+ c = *p++;
70043+ }
70044+ }
70045+
70046+ if (c == ']')
70047+ break;
70048+ }
70049+ if (!not)
70050+ return 1;
70051+ break;
70052+ matched:
70053+ while (c != ']') {
70054+ if (c == '\0')
70055+ return 1;
70056+
70057+ c = *p++;
70058+ }
70059+ if (not)
70060+ return 1;
70061+ }
70062+ break;
70063+ default:
70064+ if (c != *n)
70065+ return 1;
70066+ }
70067+
70068+ ++n;
70069+ }
70070+
70071+ if (*n == '\0')
70072+ return 0;
70073+
70074+ if (*n == '/')
70075+ return 0;
70076+
70077+ return 1;
70078+}
70079+
70080+static struct acl_object_label *
70081+chk_glob_label(struct acl_object_label *globbed,
70082+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
70083+{
70084+ struct acl_object_label *tmp;
70085+
70086+ if (*path == NULL)
70087+ *path = gr_to_filename_nolock(dentry, mnt);
70088+
70089+ tmp = globbed;
70090+
70091+ while (tmp) {
70092+ if (!glob_match(tmp->filename, *path))
70093+ return tmp;
70094+ tmp = tmp->next;
70095+ }
70096+
70097+ return NULL;
70098+}
70099+
70100+static struct acl_object_label *
70101+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70102+ const u64 curr_ino, const dev_t curr_dev,
70103+ const struct acl_subject_label *subj, char **path, const int checkglob)
70104+{
70105+ struct acl_subject_label *tmpsubj;
70106+ struct acl_object_label *retval;
70107+ struct acl_object_label *retval2;
70108+
70109+ tmpsubj = (struct acl_subject_label *) subj;
70110+ read_lock(&gr_inode_lock);
70111+ do {
70112+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70113+ if (retval) {
70114+ if (checkglob && retval->globbed) {
70115+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70116+ if (retval2)
70117+ retval = retval2;
70118+ }
70119+ break;
70120+ }
70121+ } while ((tmpsubj = tmpsubj->parent_subject));
70122+ read_unlock(&gr_inode_lock);
70123+
70124+ return retval;
70125+}
70126+
70127+static __inline__ struct acl_object_label *
70128+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70129+ struct dentry *curr_dentry,
70130+ const struct acl_subject_label *subj, char **path, const int checkglob)
70131+{
70132+ int newglob = checkglob;
70133+ u64 inode;
70134+ dev_t device;
70135+
70136+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70137+ as we don't want a / * rule to match instead of the / object
70138+ don't do this for create lookups that call this function though, since they're looking up
70139+ on the parent and thus need globbing checks on all paths
70140+ */
70141+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70142+ newglob = GR_NO_GLOB;
70143+
70144+ spin_lock(&curr_dentry->d_lock);
70145+ inode = __get_ino(curr_dentry);
70146+ device = __get_dev(curr_dentry);
70147+ spin_unlock(&curr_dentry->d_lock);
70148+
70149+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70150+}
70151+
70152+#ifdef CONFIG_HUGETLBFS
70153+static inline bool
70154+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70155+{
70156+ int i;
70157+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70158+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70159+ return true;
70160+ }
70161+
70162+ return false;
70163+}
70164+#endif
70165+
70166+static struct acl_object_label *
70167+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70168+ const struct acl_subject_label *subj, char *path, const int checkglob)
70169+{
70170+ struct dentry *dentry = (struct dentry *) l_dentry;
70171+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70172+ struct mount *real_mnt = real_mount(mnt);
70173+ struct acl_object_label *retval;
70174+ struct dentry *parent;
70175+
70176+ read_seqlock_excl(&mount_lock);
70177+ write_seqlock(&rename_lock);
70178+
70179+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70180+#ifdef CONFIG_NET
70181+ mnt == sock_mnt ||
70182+#endif
70183+#ifdef CONFIG_HUGETLBFS
70184+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70185+#endif
70186+ /* ignore Eric Biederman */
70187+ IS_PRIVATE(l_dentry->d_inode))) {
70188+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70189+ goto out;
70190+ }
70191+
70192+ for (;;) {
70193+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70194+ break;
70195+
70196+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70197+ if (!mnt_has_parent(real_mnt))
70198+ break;
70199+
70200+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70201+ if (retval != NULL)
70202+ goto out;
70203+
70204+ dentry = real_mnt->mnt_mountpoint;
70205+ real_mnt = real_mnt->mnt_parent;
70206+ mnt = &real_mnt->mnt;
70207+ continue;
70208+ }
70209+
70210+ parent = dentry->d_parent;
70211+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70212+ if (retval != NULL)
70213+ goto out;
70214+
70215+ dentry = parent;
70216+ }
70217+
70218+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70219+
70220+ /* gr_real_root is pinned so we don't have to hold a reference */
70221+ if (retval == NULL)
70222+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70223+out:
70224+ write_sequnlock(&rename_lock);
70225+ read_sequnlock_excl(&mount_lock);
70226+
70227+ BUG_ON(retval == NULL);
70228+
70229+ return retval;
70230+}
70231+
70232+static __inline__ struct acl_object_label *
70233+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70234+ const struct acl_subject_label *subj)
70235+{
70236+ char *path = NULL;
70237+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70238+}
70239+
70240+static __inline__ struct acl_object_label *
70241+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70242+ const struct acl_subject_label *subj)
70243+{
70244+ char *path = NULL;
70245+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70246+}
70247+
70248+static __inline__ struct acl_object_label *
70249+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70250+ const struct acl_subject_label *subj, char *path)
70251+{
70252+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70253+}
70254+
70255+struct acl_subject_label *
70256+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70257+ const struct acl_role_label *role)
70258+{
70259+ struct dentry *dentry = (struct dentry *) l_dentry;
70260+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70261+ struct mount *real_mnt = real_mount(mnt);
70262+ struct acl_subject_label *retval;
70263+ struct dentry *parent;
70264+
70265+ read_seqlock_excl(&mount_lock);
70266+ write_seqlock(&rename_lock);
70267+
70268+ for (;;) {
70269+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70270+ break;
70271+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70272+ if (!mnt_has_parent(real_mnt))
70273+ break;
70274+
70275+ spin_lock(&dentry->d_lock);
70276+ read_lock(&gr_inode_lock);
70277+ retval =
70278+ lookup_acl_subj_label(__get_ino(dentry),
70279+ __get_dev(dentry), role);
70280+ read_unlock(&gr_inode_lock);
70281+ spin_unlock(&dentry->d_lock);
70282+ if (retval != NULL)
70283+ goto out;
70284+
70285+ dentry = real_mnt->mnt_mountpoint;
70286+ real_mnt = real_mnt->mnt_parent;
70287+ mnt = &real_mnt->mnt;
70288+ continue;
70289+ }
70290+
70291+ spin_lock(&dentry->d_lock);
70292+ read_lock(&gr_inode_lock);
70293+ retval = lookup_acl_subj_label(__get_ino(dentry),
70294+ __get_dev(dentry), role);
70295+ read_unlock(&gr_inode_lock);
70296+ parent = dentry->d_parent;
70297+ spin_unlock(&dentry->d_lock);
70298+
70299+ if (retval != NULL)
70300+ goto out;
70301+
70302+ dentry = parent;
70303+ }
70304+
70305+ spin_lock(&dentry->d_lock);
70306+ read_lock(&gr_inode_lock);
70307+ retval = lookup_acl_subj_label(__get_ino(dentry),
70308+ __get_dev(dentry), role);
70309+ read_unlock(&gr_inode_lock);
70310+ spin_unlock(&dentry->d_lock);
70311+
70312+ if (unlikely(retval == NULL)) {
70313+ /* gr_real_root is pinned, we don't need to hold a reference */
70314+ read_lock(&gr_inode_lock);
70315+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70316+ __get_dev(gr_real_root.dentry), role);
70317+ read_unlock(&gr_inode_lock);
70318+ }
70319+out:
70320+ write_sequnlock(&rename_lock);
70321+ read_sequnlock_excl(&mount_lock);
70322+
70323+ BUG_ON(retval == NULL);
70324+
70325+ return retval;
70326+}
70327+
70328+void
70329+assign_special_role(const char *rolename)
70330+{
70331+ struct acl_object_label *obj;
70332+ struct acl_role_label *r;
70333+ struct acl_role_label *assigned = NULL;
70334+ struct task_struct *tsk;
70335+ struct file *filp;
70336+
70337+ FOR_EACH_ROLE_START(r)
70338+ if (!strcmp(rolename, r->rolename) &&
70339+ (r->roletype & GR_ROLE_SPECIAL)) {
70340+ assigned = r;
70341+ break;
70342+ }
70343+ FOR_EACH_ROLE_END(r)
70344+
70345+ if (!assigned)
70346+ return;
70347+
70348+ read_lock(&tasklist_lock);
70349+ read_lock(&grsec_exec_file_lock);
70350+
70351+ tsk = current->real_parent;
70352+ if (tsk == NULL)
70353+ goto out_unlock;
70354+
70355+ filp = tsk->exec_file;
70356+ if (filp == NULL)
70357+ goto out_unlock;
70358+
70359+ tsk->is_writable = 0;
70360+ tsk->inherited = 0;
70361+
70362+ tsk->acl_sp_role = 1;
70363+ tsk->acl_role_id = ++acl_sp_role_value;
70364+ tsk->role = assigned;
70365+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70366+
70367+ /* ignore additional mmap checks for processes that are writable
70368+ by the default ACL */
70369+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70370+ if (unlikely(obj->mode & GR_WRITE))
70371+ tsk->is_writable = 1;
70372+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70373+ if (unlikely(obj->mode & GR_WRITE))
70374+ tsk->is_writable = 1;
70375+
70376+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70377+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70378+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70379+#endif
70380+
70381+out_unlock:
70382+ read_unlock(&grsec_exec_file_lock);
70383+ read_unlock(&tasklist_lock);
70384+ return;
70385+}
70386+
70387+
70388+static void
70389+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70390+{
70391+ struct task_struct *task = current;
70392+ const struct cred *cred = current_cred();
70393+
70394+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70395+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70396+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70397+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70398+
70399+ return;
70400+}
70401+
70402+static void
70403+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70404+{
70405+ struct task_struct *task = current;
70406+ const struct cred *cred = current_cred();
70407+
70408+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70409+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70410+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70411+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70412+
70413+ return;
70414+}
70415+
70416+static void
70417+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70418+{
70419+ struct task_struct *task = current;
70420+ const struct cred *cred = current_cred();
70421+
70422+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70423+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70424+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70425+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70426+
70427+ return;
70428+}
70429+
70430+static void
70431+gr_set_proc_res(struct task_struct *task)
70432+{
70433+ struct acl_subject_label *proc;
70434+ unsigned short i;
70435+
70436+ proc = task->acl;
70437+
70438+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70439+ return;
70440+
70441+ for (i = 0; i < RLIM_NLIMITS; i++) {
70442+ unsigned long rlim_cur, rlim_max;
70443+
70444+ if (!(proc->resmask & (1U << i)))
70445+ continue;
70446+
70447+ rlim_cur = proc->res[i].rlim_cur;
70448+ rlim_max = proc->res[i].rlim_max;
70449+
70450+ if (i == RLIMIT_NOFILE) {
70451+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70452+ if (rlim_cur > saved_sysctl_nr_open)
70453+ rlim_cur = saved_sysctl_nr_open;
70454+ if (rlim_max > saved_sysctl_nr_open)
70455+ rlim_max = saved_sysctl_nr_open;
70456+ }
70457+
70458+ task->signal->rlim[i].rlim_cur = rlim_cur;
70459+ task->signal->rlim[i].rlim_max = rlim_max;
70460+
70461+ if (i == RLIMIT_CPU)
70462+ update_rlimit_cpu(task, rlim_cur);
70463+ }
70464+
70465+ return;
70466+}
70467+
70468+/* both of the below must be called with
70469+ rcu_read_lock();
70470+ read_lock(&tasklist_lock);
70471+ read_lock(&grsec_exec_file_lock);
70472+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70473+*/
70474+
70475+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70476+{
70477+ char *tmpname;
70478+ struct acl_subject_label *tmpsubj;
70479+ struct file *filp;
70480+ struct name_entry *nmatch;
70481+
70482+ filp = task->exec_file;
70483+ if (filp == NULL)
70484+ return NULL;
70485+
70486+ /* the following is to apply the correct subject
70487+ on binaries running when the RBAC system
70488+ is enabled, when the binaries have been
70489+ replaced or deleted since their execution
70490+ -----
70491+ when the RBAC system starts, the inode/dev
70492+ from exec_file will be one the RBAC system
70493+ is unaware of. It only knows the inode/dev
70494+ of the present file on disk, or the absence
70495+ of it.
70496+ */
70497+
70498+ if (filename)
70499+ nmatch = __lookup_name_entry(state, filename);
70500+ else {
70501+ preempt_disable();
70502+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70503+
70504+ nmatch = __lookup_name_entry(state, tmpname);
70505+ preempt_enable();
70506+ }
70507+ tmpsubj = NULL;
70508+ if (nmatch) {
70509+ if (nmatch->deleted)
70510+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70511+ else
70512+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70513+ }
70514+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70515+ then we fall back to a normal lookup based on the binary's ino/dev
70516+ */
70517+ if (tmpsubj == NULL && fallback)
70518+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70519+
70520+ return tmpsubj;
70521+}
70522+
70523+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
70524+{
70525+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
70526+}
70527+
70528+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70529+{
70530+ struct acl_object_label *obj;
70531+ struct file *filp;
70532+
70533+ filp = task->exec_file;
70534+
70535+ task->acl = subj;
70536+ task->is_writable = 0;
70537+ /* ignore additional mmap checks for processes that are writable
70538+ by the default ACL */
70539+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70540+ if (unlikely(obj->mode & GR_WRITE))
70541+ task->is_writable = 1;
70542+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70543+ if (unlikely(obj->mode & GR_WRITE))
70544+ task->is_writable = 1;
70545+
70546+ gr_set_proc_res(task);
70547+
70548+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70549+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70550+#endif
70551+}
70552+
70553+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70554+{
70555+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70556+}
70557+
70558+__u32
70559+gr_search_file(const struct dentry * dentry, const __u32 mode,
70560+ const struct vfsmount * mnt)
70561+{
70562+ __u32 retval = mode;
70563+ struct acl_subject_label *curracl;
70564+ struct acl_object_label *currobj;
70565+
70566+ if (unlikely(!(gr_status & GR_READY)))
70567+ return (mode & ~GR_AUDITS);
70568+
70569+ curracl = current->acl;
70570+
70571+ currobj = chk_obj_label(dentry, mnt, curracl);
70572+ retval = currobj->mode & mode;
70573+
70574+ /* if we're opening a specified transfer file for writing
70575+ (e.g. /dev/initctl), then transfer our role to init
70576+ */
70577+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70578+ current->role->roletype & GR_ROLE_PERSIST)) {
70579+ struct task_struct *task = init_pid_ns.child_reaper;
70580+
70581+ if (task->role != current->role) {
70582+ struct acl_subject_label *subj;
70583+
70584+ task->acl_sp_role = 0;
70585+ task->acl_role_id = current->acl_role_id;
70586+ task->role = current->role;
70587+ rcu_read_lock();
70588+ read_lock(&grsec_exec_file_lock);
70589+ subj = gr_get_subject_for_task(task, NULL, 1);
70590+ gr_apply_subject_to_task(task, subj);
70591+ read_unlock(&grsec_exec_file_lock);
70592+ rcu_read_unlock();
70593+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70594+ }
70595+ }
70596+
70597+ if (unlikely
70598+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70599+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70600+ __u32 new_mode = mode;
70601+
70602+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70603+
70604+ retval = new_mode;
70605+
70606+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70607+ new_mode |= GR_INHERIT;
70608+
70609+ if (!(mode & GR_NOLEARN))
70610+ gr_log_learn(dentry, mnt, new_mode);
70611+ }
70612+
70613+ return retval;
70614+}
70615+
70616+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70617+ const struct dentry *parent,
70618+ const struct vfsmount *mnt)
70619+{
70620+ struct name_entry *match;
70621+ struct acl_object_label *matchpo;
70622+ struct acl_subject_label *curracl;
70623+ char *path;
70624+
70625+ if (unlikely(!(gr_status & GR_READY)))
70626+ return NULL;
70627+
70628+ preempt_disable();
70629+ path = gr_to_filename_rbac(new_dentry, mnt);
70630+ match = lookup_name_entry_create(path);
70631+
70632+ curracl = current->acl;
70633+
70634+ if (match) {
70635+ read_lock(&gr_inode_lock);
70636+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70637+ read_unlock(&gr_inode_lock);
70638+
70639+ if (matchpo) {
70640+ preempt_enable();
70641+ return matchpo;
70642+ }
70643+ }
70644+
70645+ // lookup parent
70646+
70647+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70648+
70649+ preempt_enable();
70650+ return matchpo;
70651+}
70652+
70653+__u32
70654+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70655+ const struct vfsmount * mnt, const __u32 mode)
70656+{
70657+ struct acl_object_label *matchpo;
70658+ __u32 retval;
70659+
70660+ if (unlikely(!(gr_status & GR_READY)))
70661+ return (mode & ~GR_AUDITS);
70662+
70663+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70664+
70665+ retval = matchpo->mode & mode;
70666+
70667+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70668+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70669+ __u32 new_mode = mode;
70670+
70671+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70672+
70673+ gr_log_learn(new_dentry, mnt, new_mode);
70674+ return new_mode;
70675+ }
70676+
70677+ return retval;
70678+}
70679+
70680+__u32
70681+gr_check_link(const struct dentry * new_dentry,
70682+ const struct dentry * parent_dentry,
70683+ const struct vfsmount * parent_mnt,
70684+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70685+{
70686+ struct acl_object_label *obj;
70687+ __u32 oldmode, newmode;
70688+ __u32 needmode;
70689+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70690+ GR_DELETE | GR_INHERIT;
70691+
70692+ if (unlikely(!(gr_status & GR_READY)))
70693+ return (GR_CREATE | GR_LINK);
70694+
70695+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70696+ oldmode = obj->mode;
70697+
70698+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70699+ newmode = obj->mode;
70700+
70701+ needmode = newmode & checkmodes;
70702+
70703+ // old name for hardlink must have at least the permissions of the new name
70704+ if ((oldmode & needmode) != needmode)
70705+ goto bad;
70706+
70707+ // if old name had restrictions/auditing, make sure the new name does as well
70708+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70709+
70710+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70711+ if (is_privileged_binary(old_dentry))
70712+ needmode |= GR_SETID;
70713+
70714+ if ((newmode & needmode) != needmode)
70715+ goto bad;
70716+
70717+ // enforce minimum permissions
70718+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70719+ return newmode;
70720+bad:
70721+ needmode = oldmode;
70722+ if (is_privileged_binary(old_dentry))
70723+ needmode |= GR_SETID;
70724+
70725+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70726+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70727+ return (GR_CREATE | GR_LINK);
70728+ } else if (newmode & GR_SUPPRESS)
70729+ return GR_SUPPRESS;
70730+ else
70731+ return 0;
70732+}
70733+
70734+int
70735+gr_check_hidden_task(const struct task_struct *task)
70736+{
70737+ if (unlikely(!(gr_status & GR_READY)))
70738+ return 0;
70739+
70740+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70741+ return 1;
70742+
70743+ return 0;
70744+}
70745+
70746+int
70747+gr_check_protected_task(const struct task_struct *task)
70748+{
70749+ if (unlikely(!(gr_status & GR_READY) || !task))
70750+ return 0;
70751+
70752+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70753+ task->acl != current->acl)
70754+ return 1;
70755+
70756+ return 0;
70757+}
70758+
70759+int
70760+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70761+{
70762+ struct task_struct *p;
70763+ int ret = 0;
70764+
70765+ if (unlikely(!(gr_status & GR_READY) || !pid))
70766+ return ret;
70767+
70768+ read_lock(&tasklist_lock);
70769+ do_each_pid_task(pid, type, p) {
70770+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70771+ p->acl != current->acl) {
70772+ ret = 1;
70773+ goto out;
70774+ }
70775+ } while_each_pid_task(pid, type, p);
70776+out:
70777+ read_unlock(&tasklist_lock);
70778+
70779+ return ret;
70780+}
70781+
70782+void
70783+gr_copy_label(struct task_struct *tsk)
70784+{
70785+ struct task_struct *p = current;
70786+
70787+ tsk->inherited = p->inherited;
70788+ tsk->acl_sp_role = 0;
70789+ tsk->acl_role_id = p->acl_role_id;
70790+ tsk->acl = p->acl;
70791+ tsk->role = p->role;
70792+ tsk->signal->used_accept = 0;
70793+ tsk->signal->curr_ip = p->signal->curr_ip;
70794+ tsk->signal->saved_ip = p->signal->saved_ip;
70795+ if (p->exec_file)
70796+ get_file(p->exec_file);
70797+ tsk->exec_file = p->exec_file;
70798+ tsk->is_writable = p->is_writable;
70799+ if (unlikely(p->signal->used_accept)) {
70800+ p->signal->curr_ip = 0;
70801+ p->signal->saved_ip = 0;
70802+ }
70803+
70804+ return;
70805+}
70806+
70807+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
70808+
70809+int
70810+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70811+{
70812+ unsigned int i;
70813+ __u16 num;
70814+ uid_t *uidlist;
70815+ uid_t curuid;
70816+ int realok = 0;
70817+ int effectiveok = 0;
70818+ int fsok = 0;
70819+ uid_t globalreal, globaleffective, globalfs;
70820+
70821+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
70822+ struct user_struct *user;
70823+
70824+ if (!uid_valid(real))
70825+ goto skipit;
70826+
70827+ /* find user based on global namespace */
70828+
70829+ globalreal = GR_GLOBAL_UID(real);
70830+
70831+ user = find_user(make_kuid(&init_user_ns, globalreal));
70832+ if (user == NULL)
70833+ goto skipit;
70834+
70835+ if (gr_process_kernel_setuid_ban(user)) {
70836+ /* for find_user */
70837+ free_uid(user);
70838+ return 1;
70839+ }
70840+
70841+ /* for find_user */
70842+ free_uid(user);
70843+
70844+skipit:
70845+#endif
70846+
70847+ if (unlikely(!(gr_status & GR_READY)))
70848+ return 0;
70849+
70850+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70851+ gr_log_learn_uid_change(real, effective, fs);
70852+
70853+ num = current->acl->user_trans_num;
70854+ uidlist = current->acl->user_transitions;
70855+
70856+ if (uidlist == NULL)
70857+ return 0;
70858+
70859+ if (!uid_valid(real)) {
70860+ realok = 1;
70861+ globalreal = (uid_t)-1;
70862+ } else {
70863+ globalreal = GR_GLOBAL_UID(real);
70864+ }
70865+ if (!uid_valid(effective)) {
70866+ effectiveok = 1;
70867+ globaleffective = (uid_t)-1;
70868+ } else {
70869+ globaleffective = GR_GLOBAL_UID(effective);
70870+ }
70871+ if (!uid_valid(fs)) {
70872+ fsok = 1;
70873+ globalfs = (uid_t)-1;
70874+ } else {
70875+ globalfs = GR_GLOBAL_UID(fs);
70876+ }
70877+
70878+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
70879+ for (i = 0; i < num; i++) {
70880+ curuid = uidlist[i];
70881+ if (globalreal == curuid)
70882+ realok = 1;
70883+ if (globaleffective == curuid)
70884+ effectiveok = 1;
70885+ if (globalfs == curuid)
70886+ fsok = 1;
70887+ }
70888+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
70889+ for (i = 0; i < num; i++) {
70890+ curuid = uidlist[i];
70891+ if (globalreal == curuid)
70892+ break;
70893+ if (globaleffective == curuid)
70894+ break;
70895+ if (globalfs == curuid)
70896+ break;
70897+ }
70898+ /* not in deny list */
70899+ if (i == num) {
70900+ realok = 1;
70901+ effectiveok = 1;
70902+ fsok = 1;
70903+ }
70904+ }
70905+
70906+ if (realok && effectiveok && fsok)
70907+ return 0;
70908+ else {
70909+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70910+ return 1;
70911+ }
70912+}
70913+
70914+int
70915+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70916+{
70917+ unsigned int i;
70918+ __u16 num;
70919+ gid_t *gidlist;
70920+ gid_t curgid;
70921+ int realok = 0;
70922+ int effectiveok = 0;
70923+ int fsok = 0;
70924+ gid_t globalreal, globaleffective, globalfs;
70925+
70926+ if (unlikely(!(gr_status & GR_READY)))
70927+ return 0;
70928+
70929+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70930+ gr_log_learn_gid_change(real, effective, fs);
70931+
70932+ num = current->acl->group_trans_num;
70933+ gidlist = current->acl->group_transitions;
70934+
70935+ if (gidlist == NULL)
70936+ return 0;
70937+
70938+ if (!gid_valid(real)) {
70939+ realok = 1;
70940+ globalreal = (gid_t)-1;
70941+ } else {
70942+ globalreal = GR_GLOBAL_GID(real);
70943+ }
70944+ if (!gid_valid(effective)) {
70945+ effectiveok = 1;
70946+ globaleffective = (gid_t)-1;
70947+ } else {
70948+ globaleffective = GR_GLOBAL_GID(effective);
70949+ }
70950+ if (!gid_valid(fs)) {
70951+ fsok = 1;
70952+ globalfs = (gid_t)-1;
70953+ } else {
70954+ globalfs = GR_GLOBAL_GID(fs);
70955+ }
70956+
70957+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
70958+ for (i = 0; i < num; i++) {
70959+ curgid = gidlist[i];
70960+ if (globalreal == curgid)
70961+ realok = 1;
70962+ if (globaleffective == curgid)
70963+ effectiveok = 1;
70964+ if (globalfs == curgid)
70965+ fsok = 1;
70966+ }
70967+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
70968+ for (i = 0; i < num; i++) {
70969+ curgid = gidlist[i];
70970+ if (globalreal == curgid)
70971+ break;
70972+ if (globaleffective == curgid)
70973+ break;
70974+ if (globalfs == curgid)
70975+ break;
70976+ }
70977+ /* not in deny list */
70978+ if (i == num) {
70979+ realok = 1;
70980+ effectiveok = 1;
70981+ fsok = 1;
70982+ }
70983+ }
70984+
70985+ if (realok && effectiveok && fsok)
70986+ return 0;
70987+ else {
70988+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70989+ return 1;
70990+ }
70991+}
70992+
70993+extern int gr_acl_is_capable(const int cap);
70994+
70995+void
70996+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
70997+{
70998+ struct acl_role_label *role = task->role;
70999+ struct acl_role_label *origrole = role;
71000+ struct acl_subject_label *subj = NULL;
71001+ struct acl_object_label *obj;
71002+ struct file *filp;
71003+ uid_t uid;
71004+ gid_t gid;
71005+
71006+ if (unlikely(!(gr_status & GR_READY)))
71007+ return;
71008+
71009+ uid = GR_GLOBAL_UID(kuid);
71010+ gid = GR_GLOBAL_GID(kgid);
71011+
71012+ filp = task->exec_file;
71013+
71014+ /* kernel process, we'll give them the kernel role */
71015+ if (unlikely(!filp)) {
71016+ task->role = running_polstate.kernel_role;
71017+ task->acl = running_polstate.kernel_role->root_label;
71018+ return;
71019+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71020+ /* save the current ip at time of role lookup so that the proper
71021+ IP will be learned for role_allowed_ip */
71022+ task->signal->saved_ip = task->signal->curr_ip;
71023+ role = lookup_acl_role_label(task, uid, gid);
71024+ }
71025+
71026+ /* don't change the role if we're not a privileged process */
71027+ if (role && task->role != role &&
71028+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71029+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71030+ return;
71031+
71032+ task->role = role;
71033+
71034+ if (task->inherited) {
71035+ /* if we reached our subject through inheritance, then first see
71036+ if there's a subject of the same name in the new role that has
71037+ an object that would result in the same inherited subject
71038+ */
71039+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
71040+ if (subj) {
71041+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
71042+ if (!(obj->mode & GR_INHERIT))
71043+ subj = NULL;
71044+ }
71045+
71046+ }
71047+ if (subj == NULL) {
71048+ /* otherwise:
71049+ perform subject lookup in possibly new role
71050+ we can use this result below in the case where role == task->role
71051+ */
71052+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71053+ }
71054+
71055+ /* if we changed uid/gid, but result in the same role
71056+ and are using inheritance, don't lose the inherited subject
71057+ if current subject is other than what normal lookup
71058+ would result in, we arrived via inheritance, don't
71059+ lose subject
71060+ */
71061+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
71062+ (subj == task->acl)))
71063+ task->acl = subj;
71064+
71065+ /* leave task->inherited unaffected */
71066+
71067+ task->is_writable = 0;
71068+
71069+ /* ignore additional mmap checks for processes that are writable
71070+ by the default ACL */
71071+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71072+ if (unlikely(obj->mode & GR_WRITE))
71073+ task->is_writable = 1;
71074+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71075+ if (unlikely(obj->mode & GR_WRITE))
71076+ task->is_writable = 1;
71077+
71078+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71079+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71080+#endif
71081+
71082+ gr_set_proc_res(task);
71083+
71084+ return;
71085+}
71086+
71087+int
71088+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71089+ const int unsafe_flags)
71090+{
71091+ struct task_struct *task = current;
71092+ struct acl_subject_label *newacl;
71093+ struct acl_object_label *obj;
71094+ __u32 retmode;
71095+
71096+ if (unlikely(!(gr_status & GR_READY)))
71097+ return 0;
71098+
71099+ newacl = chk_subj_label(dentry, mnt, task->role);
71100+
71101+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71102+ did an exec
71103+ */
71104+ rcu_read_lock();
71105+ read_lock(&tasklist_lock);
71106+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71107+ (task->parent->acl->mode & GR_POVERRIDE))) {
71108+ read_unlock(&tasklist_lock);
71109+ rcu_read_unlock();
71110+ goto skip_check;
71111+ }
71112+ read_unlock(&tasklist_lock);
71113+ rcu_read_unlock();
71114+
71115+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71116+ !(task->role->roletype & GR_ROLE_GOD) &&
71117+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71118+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71119+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71120+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71121+ else
71122+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71123+ return -EACCES;
71124+ }
71125+
71126+skip_check:
71127+
71128+ obj = chk_obj_label(dentry, mnt, task->acl);
71129+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71130+
71131+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71132+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71133+ if (obj->nested)
71134+ task->acl = obj->nested;
71135+ else
71136+ task->acl = newacl;
71137+ task->inherited = 0;
71138+ } else {
71139+ task->inherited = 1;
71140+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71141+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71142+ }
71143+
71144+ task->is_writable = 0;
71145+
71146+ /* ignore additional mmap checks for processes that are writable
71147+ by the default ACL */
71148+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71149+ if (unlikely(obj->mode & GR_WRITE))
71150+ task->is_writable = 1;
71151+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71152+ if (unlikely(obj->mode & GR_WRITE))
71153+ task->is_writable = 1;
71154+
71155+ gr_set_proc_res(task);
71156+
71157+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71158+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71159+#endif
71160+ return 0;
71161+}
71162+
71163+/* always called with valid inodev ptr */
71164+static void
71165+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71166+{
71167+ struct acl_object_label *matchpo;
71168+ struct acl_subject_label *matchps;
71169+ struct acl_subject_label *subj;
71170+ struct acl_role_label *role;
71171+ unsigned int x;
71172+
71173+ FOR_EACH_ROLE_START(role)
71174+ FOR_EACH_SUBJECT_START(role, subj, x)
71175+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71176+ matchpo->mode |= GR_DELETED;
71177+ FOR_EACH_SUBJECT_END(subj,x)
71178+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71179+ /* nested subjects aren't in the role's subj_hash table */
71180+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71181+ matchpo->mode |= GR_DELETED;
71182+ FOR_EACH_NESTED_SUBJECT_END(subj)
71183+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71184+ matchps->mode |= GR_DELETED;
71185+ FOR_EACH_ROLE_END(role)
71186+
71187+ inodev->nentry->deleted = 1;
71188+
71189+ return;
71190+}
71191+
71192+void
71193+gr_handle_delete(const u64 ino, const dev_t dev)
71194+{
71195+ struct inodev_entry *inodev;
71196+
71197+ if (unlikely(!(gr_status & GR_READY)))
71198+ return;
71199+
71200+ write_lock(&gr_inode_lock);
71201+ inodev = lookup_inodev_entry(ino, dev);
71202+ if (inodev != NULL)
71203+ do_handle_delete(inodev, ino, dev);
71204+ write_unlock(&gr_inode_lock);
71205+
71206+ return;
71207+}
71208+
71209+static void
71210+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71211+ const u64 newinode, const dev_t newdevice,
71212+ struct acl_subject_label *subj)
71213+{
71214+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71215+ struct acl_object_label *match;
71216+
71217+ match = subj->obj_hash[index];
71218+
71219+ while (match && (match->inode != oldinode ||
71220+ match->device != olddevice ||
71221+ !(match->mode & GR_DELETED)))
71222+ match = match->next;
71223+
71224+ if (match && (match->inode == oldinode)
71225+ && (match->device == olddevice)
71226+ && (match->mode & GR_DELETED)) {
71227+ if (match->prev == NULL) {
71228+ subj->obj_hash[index] = match->next;
71229+ if (match->next != NULL)
71230+ match->next->prev = NULL;
71231+ } else {
71232+ match->prev->next = match->next;
71233+ if (match->next != NULL)
71234+ match->next->prev = match->prev;
71235+ }
71236+ match->prev = NULL;
71237+ match->next = NULL;
71238+ match->inode = newinode;
71239+ match->device = newdevice;
71240+ match->mode &= ~GR_DELETED;
71241+
71242+ insert_acl_obj_label(match, subj);
71243+ }
71244+
71245+ return;
71246+}
71247+
71248+static void
71249+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71250+ const u64 newinode, const dev_t newdevice,
71251+ struct acl_role_label *role)
71252+{
71253+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71254+ struct acl_subject_label *match;
71255+
71256+ match = role->subj_hash[index];
71257+
71258+ while (match && (match->inode != oldinode ||
71259+ match->device != olddevice ||
71260+ !(match->mode & GR_DELETED)))
71261+ match = match->next;
71262+
71263+ if (match && (match->inode == oldinode)
71264+ && (match->device == olddevice)
71265+ && (match->mode & GR_DELETED)) {
71266+ if (match->prev == NULL) {
71267+ role->subj_hash[index] = match->next;
71268+ if (match->next != NULL)
71269+ match->next->prev = NULL;
71270+ } else {
71271+ match->prev->next = match->next;
71272+ if (match->next != NULL)
71273+ match->next->prev = match->prev;
71274+ }
71275+ match->prev = NULL;
71276+ match->next = NULL;
71277+ match->inode = newinode;
71278+ match->device = newdevice;
71279+ match->mode &= ~GR_DELETED;
71280+
71281+ insert_acl_subj_label(match, role);
71282+ }
71283+
71284+ return;
71285+}
71286+
71287+static void
71288+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71289+ const u64 newinode, const dev_t newdevice)
71290+{
71291+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71292+ struct inodev_entry *match;
71293+
71294+ match = running_polstate.inodev_set.i_hash[index];
71295+
71296+ while (match && (match->nentry->inode != oldinode ||
71297+ match->nentry->device != olddevice || !match->nentry->deleted))
71298+ match = match->next;
71299+
71300+ if (match && (match->nentry->inode == oldinode)
71301+ && (match->nentry->device == olddevice) &&
71302+ match->nentry->deleted) {
71303+ if (match->prev == NULL) {
71304+ running_polstate.inodev_set.i_hash[index] = match->next;
71305+ if (match->next != NULL)
71306+ match->next->prev = NULL;
71307+ } else {
71308+ match->prev->next = match->next;
71309+ if (match->next != NULL)
71310+ match->next->prev = match->prev;
71311+ }
71312+ match->prev = NULL;
71313+ match->next = NULL;
71314+ match->nentry->inode = newinode;
71315+ match->nentry->device = newdevice;
71316+ match->nentry->deleted = 0;
71317+
71318+ insert_inodev_entry(match);
71319+ }
71320+
71321+ return;
71322+}
71323+
71324+static void
71325+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71326+{
71327+ struct acl_subject_label *subj;
71328+ struct acl_role_label *role;
71329+ unsigned int x;
71330+
71331+ FOR_EACH_ROLE_START(role)
71332+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71333+
71334+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71335+ if ((subj->inode == ino) && (subj->device == dev)) {
71336+ subj->inode = ino;
71337+ subj->device = dev;
71338+ }
71339+ /* nested subjects aren't in the role's subj_hash table */
71340+ update_acl_obj_label(matchn->inode, matchn->device,
71341+ ino, dev, subj);
71342+ FOR_EACH_NESTED_SUBJECT_END(subj)
71343+ FOR_EACH_SUBJECT_START(role, subj, x)
71344+ update_acl_obj_label(matchn->inode, matchn->device,
71345+ ino, dev, subj);
71346+ FOR_EACH_SUBJECT_END(subj,x)
71347+ FOR_EACH_ROLE_END(role)
71348+
71349+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71350+
71351+ return;
71352+}
71353+
71354+static void
71355+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71356+ const struct vfsmount *mnt)
71357+{
71358+ u64 ino = __get_ino(dentry);
71359+ dev_t dev = __get_dev(dentry);
71360+
71361+ __do_handle_create(matchn, ino, dev);
71362+
71363+ return;
71364+}
71365+
71366+void
71367+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71368+{
71369+ struct name_entry *matchn;
71370+
71371+ if (unlikely(!(gr_status & GR_READY)))
71372+ return;
71373+
71374+ preempt_disable();
71375+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71376+
71377+ if (unlikely((unsigned long)matchn)) {
71378+ write_lock(&gr_inode_lock);
71379+ do_handle_create(matchn, dentry, mnt);
71380+ write_unlock(&gr_inode_lock);
71381+ }
71382+ preempt_enable();
71383+
71384+ return;
71385+}
71386+
71387+void
71388+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71389+{
71390+ struct name_entry *matchn;
71391+
71392+ if (unlikely(!(gr_status & GR_READY)))
71393+ return;
71394+
71395+ preempt_disable();
71396+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71397+
71398+ if (unlikely((unsigned long)matchn)) {
71399+ write_lock(&gr_inode_lock);
71400+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71401+ write_unlock(&gr_inode_lock);
71402+ }
71403+ preempt_enable();
71404+
71405+ return;
71406+}
71407+
71408+void
71409+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71410+ struct dentry *old_dentry,
71411+ struct dentry *new_dentry,
71412+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71413+{
71414+ struct name_entry *matchn;
71415+ struct name_entry *matchn2 = NULL;
71416+ struct inodev_entry *inodev;
71417+ struct inode *inode = new_dentry->d_inode;
71418+ u64 old_ino = __get_ino(old_dentry);
71419+ dev_t old_dev = __get_dev(old_dentry);
71420+ unsigned int exchange = flags & RENAME_EXCHANGE;
71421+
71422+ /* vfs_rename swaps the name and parent link for old_dentry and
71423+ new_dentry
71424+ at this point, old_dentry has the new name, parent link, and inode
71425+ for the renamed file
71426+ if a file is being replaced by a rename, new_dentry has the inode
71427+ and name for the replaced file
71428+ */
71429+
71430+ if (unlikely(!(gr_status & GR_READY)))
71431+ return;
71432+
71433+ preempt_disable();
71434+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71435+
71436+ /* exchange cases:
71437+ a filename exists for the source, but not dest
71438+ do a recreate on source
71439+ a filename exists for the dest, but not source
71440+ do a recreate on dest
71441+ a filename exists for both source and dest
71442+ delete source and dest, then create source and dest
71443+ a filename exists for neither source nor dest
71444+ no updates needed
71445+
71446+ the name entry lookups get us the old inode/dev associated with
71447+ each name, so do the deletes first (if possible) so that when
71448+ we do the create, we pick up on the right entries
71449+ */
71450+
71451+ if (exchange)
71452+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71453+
71454+ /* we wouldn't have to check d_inode if it weren't for
71455+ NFS silly-renaming
71456+ */
71457+
71458+ write_lock(&gr_inode_lock);
71459+ if (unlikely((replace || exchange) && inode)) {
71460+ u64 new_ino = __get_ino(new_dentry);
71461+ dev_t new_dev = __get_dev(new_dentry);
71462+
71463+ inodev = lookup_inodev_entry(new_ino, new_dev);
71464+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71465+ do_handle_delete(inodev, new_ino, new_dev);
71466+ }
71467+
71468+ inodev = lookup_inodev_entry(old_ino, old_dev);
71469+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71470+ do_handle_delete(inodev, old_ino, old_dev);
71471+
71472+ if (unlikely(matchn != NULL))
71473+ do_handle_create(matchn, old_dentry, mnt);
71474+
71475+ if (unlikely(matchn2 != NULL))
71476+ do_handle_create(matchn2, new_dentry, mnt);
71477+
71478+ write_unlock(&gr_inode_lock);
71479+ preempt_enable();
71480+
71481+ return;
71482+}
71483+
71484+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71485+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71486+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71487+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71488+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71489+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71490+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71491+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71492+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71493+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71494+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71495+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71496+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71497+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71498+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71499+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71500+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71501+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71502+};
71503+
71504+void
71505+gr_learn_resource(const struct task_struct *task,
71506+ const int res, const unsigned long wanted, const int gt)
71507+{
71508+ struct acl_subject_label *acl;
71509+ const struct cred *cred;
71510+
71511+ if (unlikely((gr_status & GR_READY) &&
71512+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71513+ goto skip_reslog;
71514+
71515+ gr_log_resource(task, res, wanted, gt);
71516+skip_reslog:
71517+
71518+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71519+ return;
71520+
71521+ acl = task->acl;
71522+
71523+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71524+ !(acl->resmask & (1U << (unsigned short) res))))
71525+ return;
71526+
71527+ if (wanted >= acl->res[res].rlim_cur) {
71528+ unsigned long res_add;
71529+
71530+ res_add = wanted + res_learn_bumps[res];
71531+
71532+ acl->res[res].rlim_cur = res_add;
71533+
71534+ if (wanted > acl->res[res].rlim_max)
71535+ acl->res[res].rlim_max = res_add;
71536+
71537+ /* only log the subject filename, since resource logging is supported for
71538+ single-subject learning only */
71539+ rcu_read_lock();
71540+ cred = __task_cred(task);
71541+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71542+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71543+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71544+ "", (unsigned long) res, &task->signal->saved_ip);
71545+ rcu_read_unlock();
71546+ }
71547+
71548+ return;
71549+}
71550+EXPORT_SYMBOL_GPL(gr_learn_resource);
71551+#endif
71552+
71553+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71554+void
71555+pax_set_initial_flags(struct linux_binprm *bprm)
71556+{
71557+ struct task_struct *task = current;
71558+ struct acl_subject_label *proc;
71559+ unsigned long flags;
71560+
71561+ if (unlikely(!(gr_status & GR_READY)))
71562+ return;
71563+
71564+ flags = pax_get_flags(task);
71565+
71566+ proc = task->acl;
71567+
71568+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71569+ flags &= ~MF_PAX_PAGEEXEC;
71570+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71571+ flags &= ~MF_PAX_SEGMEXEC;
71572+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71573+ flags &= ~MF_PAX_RANDMMAP;
71574+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71575+ flags &= ~MF_PAX_EMUTRAMP;
71576+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71577+ flags &= ~MF_PAX_MPROTECT;
71578+
71579+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71580+ flags |= MF_PAX_PAGEEXEC;
71581+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71582+ flags |= MF_PAX_SEGMEXEC;
71583+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71584+ flags |= MF_PAX_RANDMMAP;
71585+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71586+ flags |= MF_PAX_EMUTRAMP;
71587+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71588+ flags |= MF_PAX_MPROTECT;
71589+
71590+ pax_set_flags(task, flags);
71591+
71592+ return;
71593+}
71594+#endif
71595+
71596+int
71597+gr_handle_proc_ptrace(struct task_struct *task)
71598+{
71599+ struct file *filp;
71600+ struct task_struct *tmp = task;
71601+ struct task_struct *curtemp = current;
71602+ __u32 retmode;
71603+
71604+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71605+ if (unlikely(!(gr_status & GR_READY)))
71606+ return 0;
71607+#endif
71608+
71609+ read_lock(&tasklist_lock);
71610+ read_lock(&grsec_exec_file_lock);
71611+ filp = task->exec_file;
71612+
71613+ while (task_pid_nr(tmp) > 0) {
71614+ if (tmp == curtemp)
71615+ break;
71616+ tmp = tmp->real_parent;
71617+ }
71618+
71619+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71620+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71621+ read_unlock(&grsec_exec_file_lock);
71622+ read_unlock(&tasklist_lock);
71623+ return 1;
71624+ }
71625+
71626+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71627+ if (!(gr_status & GR_READY)) {
71628+ read_unlock(&grsec_exec_file_lock);
71629+ read_unlock(&tasklist_lock);
71630+ return 0;
71631+ }
71632+#endif
71633+
71634+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71635+ read_unlock(&grsec_exec_file_lock);
71636+ read_unlock(&tasklist_lock);
71637+
71638+ if (retmode & GR_NOPTRACE)
71639+ return 1;
71640+
71641+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71642+ && (current->acl != task->acl || (current->acl != current->role->root_label
71643+ && task_pid_nr(current) != task_pid_nr(task))))
71644+ return 1;
71645+
71646+ return 0;
71647+}
71648+
71649+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71650+{
71651+ if (unlikely(!(gr_status & GR_READY)))
71652+ return;
71653+
71654+ if (!(current->role->roletype & GR_ROLE_GOD))
71655+ return;
71656+
71657+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71658+ p->role->rolename, gr_task_roletype_to_char(p),
71659+ p->acl->filename);
71660+}
71661+
71662+int
71663+gr_handle_ptrace(struct task_struct *task, const long request)
71664+{
71665+ struct task_struct *tmp = task;
71666+ struct task_struct *curtemp = current;
71667+ __u32 retmode;
71668+
71669+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71670+ if (unlikely(!(gr_status & GR_READY)))
71671+ return 0;
71672+#endif
71673+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71674+ read_lock(&tasklist_lock);
71675+ while (task_pid_nr(tmp) > 0) {
71676+ if (tmp == curtemp)
71677+ break;
71678+ tmp = tmp->real_parent;
71679+ }
71680+
71681+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71682+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71683+ read_unlock(&tasklist_lock);
71684+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71685+ return 1;
71686+ }
71687+ read_unlock(&tasklist_lock);
71688+ }
71689+
71690+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71691+ if (!(gr_status & GR_READY))
71692+ return 0;
71693+#endif
71694+
71695+ read_lock(&grsec_exec_file_lock);
71696+ if (unlikely(!task->exec_file)) {
71697+ read_unlock(&grsec_exec_file_lock);
71698+ return 0;
71699+ }
71700+
71701+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71702+ read_unlock(&grsec_exec_file_lock);
71703+
71704+ if (retmode & GR_NOPTRACE) {
71705+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71706+ return 1;
71707+ }
71708+
71709+ if (retmode & GR_PTRACERD) {
71710+ switch (request) {
71711+ case PTRACE_SEIZE:
71712+ case PTRACE_POKETEXT:
71713+ case PTRACE_POKEDATA:
71714+ case PTRACE_POKEUSR:
71715+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71716+ case PTRACE_SETREGS:
71717+ case PTRACE_SETFPREGS:
71718+#endif
71719+#ifdef CONFIG_X86
71720+ case PTRACE_SETFPXREGS:
71721+#endif
71722+#ifdef CONFIG_ALTIVEC
71723+ case PTRACE_SETVRREGS:
71724+#endif
71725+ return 1;
71726+ default:
71727+ return 0;
71728+ }
71729+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71730+ !(current->role->roletype & GR_ROLE_GOD) &&
71731+ (current->acl != task->acl)) {
71732+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71733+ return 1;
71734+ }
71735+
71736+ return 0;
71737+}
71738+
71739+static int is_writable_mmap(const struct file *filp)
71740+{
71741+ struct task_struct *task = current;
71742+ struct acl_object_label *obj, *obj2;
71743+
71744+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71745+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71746+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71747+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71748+ task->role->root_label);
71749+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71750+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71751+ return 1;
71752+ }
71753+ }
71754+ return 0;
71755+}
71756+
71757+int
71758+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71759+{
71760+ __u32 mode;
71761+
71762+ if (unlikely(!file || !(prot & PROT_EXEC)))
71763+ return 1;
71764+
71765+ if (is_writable_mmap(file))
71766+ return 0;
71767+
71768+ mode =
71769+ gr_search_file(file->f_path.dentry,
71770+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71771+ file->f_path.mnt);
71772+
71773+ if (!gr_tpe_allow(file))
71774+ return 0;
71775+
71776+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71777+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71778+ return 0;
71779+ } else if (unlikely(!(mode & GR_EXEC))) {
71780+ return 0;
71781+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71782+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71783+ return 1;
71784+ }
71785+
71786+ return 1;
71787+}
71788+
71789+int
71790+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
71791+{
71792+ __u32 mode;
71793+
71794+ if (unlikely(!file || !(prot & PROT_EXEC)))
71795+ return 1;
71796+
71797+ if (is_writable_mmap(file))
71798+ return 0;
71799+
71800+ mode =
71801+ gr_search_file(file->f_path.dentry,
71802+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71803+ file->f_path.mnt);
71804+
71805+ if (!gr_tpe_allow(file))
71806+ return 0;
71807+
71808+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71809+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71810+ return 0;
71811+ } else if (unlikely(!(mode & GR_EXEC))) {
71812+ return 0;
71813+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71814+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71815+ return 1;
71816+ }
71817+
71818+ return 1;
71819+}
71820+
71821+void
71822+gr_acl_handle_psacct(struct task_struct *task, const long code)
71823+{
71824+ unsigned long runtime, cputime;
71825+ cputime_t utime, stime;
71826+ unsigned int wday, cday;
71827+ __u8 whr, chr;
71828+ __u8 wmin, cmin;
71829+ __u8 wsec, csec;
71830+ struct timespec curtime, starttime;
71831+
71832+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
71833+ !(task->acl->mode & GR_PROCACCT)))
71834+ return;
71835+
71836+ curtime = ns_to_timespec(ktime_get_ns());
71837+ starttime = ns_to_timespec(task->start_time);
71838+ runtime = curtime.tv_sec - starttime.tv_sec;
71839+ wday = runtime / (60 * 60 * 24);
71840+ runtime -= wday * (60 * 60 * 24);
71841+ whr = runtime / (60 * 60);
71842+ runtime -= whr * (60 * 60);
71843+ wmin = runtime / 60;
71844+ runtime -= wmin * 60;
71845+ wsec = runtime;
71846+
71847+ task_cputime(task, &utime, &stime);
71848+ cputime = cputime_to_secs(utime + stime);
71849+ cday = cputime / (60 * 60 * 24);
71850+ cputime -= cday * (60 * 60 * 24);
71851+ chr = cputime / (60 * 60);
71852+ cputime -= chr * (60 * 60);
71853+ cmin = cputime / 60;
71854+ cputime -= cmin * 60;
71855+ csec = cputime;
71856+
71857+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
71858+
71859+ return;
71860+}
71861+
71862+#ifdef CONFIG_TASKSTATS
71863+int gr_is_taskstats_denied(int pid)
71864+{
71865+ struct task_struct *task;
71866+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71867+ const struct cred *cred;
71868+#endif
71869+ int ret = 0;
71870+
71871+ /* restrict taskstats viewing to un-chrooted root users
71872+ who have the 'view' subject flag if the RBAC system is enabled
71873+ */
71874+
71875+ rcu_read_lock();
71876+ read_lock(&tasklist_lock);
71877+ task = find_task_by_vpid(pid);
71878+ if (task) {
71879+#ifdef CONFIG_GRKERNSEC_CHROOT
71880+ if (proc_is_chrooted(task))
71881+ ret = -EACCES;
71882+#endif
71883+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71884+ cred = __task_cred(task);
71885+#ifdef CONFIG_GRKERNSEC_PROC_USER
71886+ if (gr_is_global_nonroot(cred->uid))
71887+ ret = -EACCES;
71888+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71889+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
71890+ ret = -EACCES;
71891+#endif
71892+#endif
71893+ if (gr_status & GR_READY) {
71894+ if (!(task->acl->mode & GR_VIEW))
71895+ ret = -EACCES;
71896+ }
71897+ } else
71898+ ret = -ENOENT;
71899+
71900+ read_unlock(&tasklist_lock);
71901+ rcu_read_unlock();
71902+
71903+ return ret;
71904+}
71905+#endif
71906+
71907+/* AUXV entries are filled via a descendant of search_binary_handler
71908+ after we've already applied the subject for the target
71909+*/
71910+int gr_acl_enable_at_secure(void)
71911+{
71912+ if (unlikely(!(gr_status & GR_READY)))
71913+ return 0;
71914+
71915+ if (current->acl->mode & GR_ATSECURE)
71916+ return 1;
71917+
71918+ return 0;
71919+}
71920+
71921+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
71922+{
71923+ struct task_struct *task = current;
71924+ struct dentry *dentry = file->f_path.dentry;
71925+ struct vfsmount *mnt = file->f_path.mnt;
71926+ struct acl_object_label *obj, *tmp;
71927+ struct acl_subject_label *subj;
71928+ unsigned int bufsize;
71929+ int is_not_root;
71930+ char *path;
71931+ dev_t dev = __get_dev(dentry);
71932+
71933+ if (unlikely(!(gr_status & GR_READY)))
71934+ return 1;
71935+
71936+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71937+ return 1;
71938+
71939+ /* ignore Eric Biederman */
71940+ if (IS_PRIVATE(dentry->d_inode))
71941+ return 1;
71942+
71943+ subj = task->acl;
71944+ read_lock(&gr_inode_lock);
71945+ do {
71946+ obj = lookup_acl_obj_label(ino, dev, subj);
71947+ if (obj != NULL) {
71948+ read_unlock(&gr_inode_lock);
71949+ return (obj->mode & GR_FIND) ? 1 : 0;
71950+ }
71951+ } while ((subj = subj->parent_subject));
71952+ read_unlock(&gr_inode_lock);
71953+
71954+ /* this is purely an optimization since we're looking for an object
71955+ for the directory we're doing a readdir on
71956+ if it's possible for any globbed object to match the entry we're
71957+ filling into the directory, then the object we find here will be
71958+ an anchor point with attached globbed objects
71959+ */
71960+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
71961+ if (obj->globbed == NULL)
71962+ return (obj->mode & GR_FIND) ? 1 : 0;
71963+
71964+ is_not_root = ((obj->filename[0] == '/') &&
71965+ (obj->filename[1] == '\0')) ? 0 : 1;
71966+ bufsize = PAGE_SIZE - namelen - is_not_root;
71967+
71968+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
71969+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
71970+ return 1;
71971+
71972+ preempt_disable();
71973+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71974+ bufsize);
71975+
71976+ bufsize = strlen(path);
71977+
71978+ /* if base is "/", don't append an additional slash */
71979+ if (is_not_root)
71980+ *(path + bufsize) = '/';
71981+ memcpy(path + bufsize + is_not_root, name, namelen);
71982+ *(path + bufsize + namelen + is_not_root) = '\0';
71983+
71984+ tmp = obj->globbed;
71985+ while (tmp) {
71986+ if (!glob_match(tmp->filename, path)) {
71987+ preempt_enable();
71988+ return (tmp->mode & GR_FIND) ? 1 : 0;
71989+ }
71990+ tmp = tmp->next;
71991+ }
71992+ preempt_enable();
71993+ return (obj->mode & GR_FIND) ? 1 : 0;
71994+}
71995+
71996+void gr_put_exec_file(struct task_struct *task)
71997+{
71998+ struct file *filp;
71999+
72000+ write_lock(&grsec_exec_file_lock);
72001+ filp = task->exec_file;
72002+ task->exec_file = NULL;
72003+ write_unlock(&grsec_exec_file_lock);
72004+
72005+ if (filp)
72006+ fput(filp);
72007+
72008+ return;
72009+}
72010+
72011+
72012+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72013+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72014+#endif
72015+#ifdef CONFIG_SECURITY
72016+EXPORT_SYMBOL_GPL(gr_check_user_change);
72017+EXPORT_SYMBOL_GPL(gr_check_group_change);
72018+#endif
72019+
72020diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72021new file mode 100644
72022index 0000000..18ffbbd
72023--- /dev/null
72024+++ b/grsecurity/gracl_alloc.c
72025@@ -0,0 +1,105 @@
72026+#include <linux/kernel.h>
72027+#include <linux/mm.h>
72028+#include <linux/slab.h>
72029+#include <linux/vmalloc.h>
72030+#include <linux/gracl.h>
72031+#include <linux/grsecurity.h>
72032+
72033+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72034+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72035+
72036+static __inline__ int
72037+alloc_pop(void)
72038+{
72039+ if (current_alloc_state->alloc_stack_next == 1)
72040+ return 0;
72041+
72042+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72043+
72044+ current_alloc_state->alloc_stack_next--;
72045+
72046+ return 1;
72047+}
72048+
72049+static __inline__ int
72050+alloc_push(void *buf)
72051+{
72052+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72053+ return 1;
72054+
72055+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72056+
72057+ current_alloc_state->alloc_stack_next++;
72058+
72059+ return 0;
72060+}
72061+
72062+void *
72063+acl_alloc(unsigned long len)
72064+{
72065+ void *ret = NULL;
72066+
72067+ if (!len || len > PAGE_SIZE)
72068+ goto out;
72069+
72070+ ret = kmalloc(len, GFP_KERNEL);
72071+
72072+ if (ret) {
72073+ if (alloc_push(ret)) {
72074+ kfree(ret);
72075+ ret = NULL;
72076+ }
72077+ }
72078+
72079+out:
72080+ return ret;
72081+}
72082+
72083+void *
72084+acl_alloc_num(unsigned long num, unsigned long len)
72085+{
72086+ if (!len || (num > (PAGE_SIZE / len)))
72087+ return NULL;
72088+
72089+ return acl_alloc(num * len);
72090+}
72091+
72092+void
72093+acl_free_all(void)
72094+{
72095+ if (!current_alloc_state->alloc_stack)
72096+ return;
72097+
72098+ while (alloc_pop()) ;
72099+
72100+ if (current_alloc_state->alloc_stack) {
72101+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72102+ kfree(current_alloc_state->alloc_stack);
72103+ else
72104+ vfree(current_alloc_state->alloc_stack);
72105+ }
72106+
72107+ current_alloc_state->alloc_stack = NULL;
72108+ current_alloc_state->alloc_stack_size = 1;
72109+ current_alloc_state->alloc_stack_next = 1;
72110+
72111+ return;
72112+}
72113+
72114+int
72115+acl_alloc_stack_init(unsigned long size)
72116+{
72117+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72118+ current_alloc_state->alloc_stack =
72119+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72120+ else
72121+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72122+
72123+ current_alloc_state->alloc_stack_size = size;
72124+ current_alloc_state->alloc_stack_next = 1;
72125+
72126+ if (!current_alloc_state->alloc_stack)
72127+ return 0;
72128+ else
72129+ return 1;
72130+}
72131diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72132new file mode 100644
72133index 0000000..1a94c11
72134--- /dev/null
72135+++ b/grsecurity/gracl_cap.c
72136@@ -0,0 +1,127 @@
72137+#include <linux/kernel.h>
72138+#include <linux/module.h>
72139+#include <linux/sched.h>
72140+#include <linux/gracl.h>
72141+#include <linux/grsecurity.h>
72142+#include <linux/grinternal.h>
72143+
72144+extern const char *captab_log[];
72145+extern int captab_log_entries;
72146+
72147+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72148+{
72149+ struct acl_subject_label *curracl;
72150+
72151+ if (!gr_acl_is_enabled())
72152+ return 1;
72153+
72154+ curracl = task->acl;
72155+
72156+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72157+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72158+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72159+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72160+ gr_to_filename(task->exec_file->f_path.dentry,
72161+ task->exec_file->f_path.mnt) : curracl->filename,
72162+ curracl->filename, 0UL,
72163+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72164+ return 1;
72165+ }
72166+
72167+ return 0;
72168+}
72169+
72170+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72171+{
72172+ struct acl_subject_label *curracl;
72173+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72174+ kernel_cap_t cap_audit = __cap_empty_set;
72175+
72176+ if (!gr_acl_is_enabled())
72177+ return 1;
72178+
72179+ curracl = task->acl;
72180+
72181+ cap_drop = curracl->cap_lower;
72182+ cap_mask = curracl->cap_mask;
72183+ cap_audit = curracl->cap_invert_audit;
72184+
72185+ while ((curracl = curracl->parent_subject)) {
72186+ /* if the cap isn't specified in the current computed mask but is specified in the
72187+ current level subject, and is lowered in the current level subject, then add
72188+ it to the set of dropped capabilities
72189+ otherwise, add the current level subject's mask to the current computed mask
72190+ */
72191+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72192+ cap_raise(cap_mask, cap);
72193+ if (cap_raised(curracl->cap_lower, cap))
72194+ cap_raise(cap_drop, cap);
72195+ if (cap_raised(curracl->cap_invert_audit, cap))
72196+ cap_raise(cap_audit, cap);
72197+ }
72198+ }
72199+
72200+ if (!cap_raised(cap_drop, cap)) {
72201+ if (cap_raised(cap_audit, cap))
72202+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72203+ return 1;
72204+ }
72205+
72206+ /* only learn the capability use if the process has the capability in the
72207+ general case, the two uses in sys.c of gr_learn_cap are an exception
72208+ to this rule to ensure any role transition involves what the full-learned
72209+ policy believes in a privileged process
72210+ */
72211+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72212+ return 1;
72213+
72214+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72215+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72216+
72217+ return 0;
72218+}
72219+
72220+int
72221+gr_acl_is_capable(const int cap)
72222+{
72223+ return gr_task_acl_is_capable(current, current_cred(), cap);
72224+}
72225+
72226+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72227+{
72228+ struct acl_subject_label *curracl;
72229+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72230+
72231+ if (!gr_acl_is_enabled())
72232+ return 1;
72233+
72234+ curracl = task->acl;
72235+
72236+ cap_drop = curracl->cap_lower;
72237+ cap_mask = curracl->cap_mask;
72238+
72239+ while ((curracl = curracl->parent_subject)) {
72240+ /* if the cap isn't specified in the current computed mask but is specified in the
72241+ current level subject, and is lowered in the current level subject, then add
72242+ it to the set of dropped capabilities
72243+ otherwise, add the current level subject's mask to the current computed mask
72244+ */
72245+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72246+ cap_raise(cap_mask, cap);
72247+ if (cap_raised(curracl->cap_lower, cap))
72248+ cap_raise(cap_drop, cap);
72249+ }
72250+ }
72251+
72252+ if (!cap_raised(cap_drop, cap))
72253+ return 1;
72254+
72255+ return 0;
72256+}
72257+
72258+int
72259+gr_acl_is_capable_nolog(const int cap)
72260+{
72261+ return gr_task_acl_is_capable_nolog(current, cap);
72262+}
72263+
72264diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72265new file mode 100644
72266index 0000000..a43dd06
72267--- /dev/null
72268+++ b/grsecurity/gracl_compat.c
72269@@ -0,0 +1,269 @@
72270+#include <linux/kernel.h>
72271+#include <linux/gracl.h>
72272+#include <linux/compat.h>
72273+#include <linux/gracl_compat.h>
72274+
72275+#include <asm/uaccess.h>
72276+
72277+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72278+{
72279+ struct gr_arg_wrapper_compat uwrapcompat;
72280+
72281+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72282+ return -EFAULT;
72283+
72284+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72285+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72286+ return -EINVAL;
72287+
72288+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72289+ uwrap->version = uwrapcompat.version;
72290+ uwrap->size = sizeof(struct gr_arg);
72291+
72292+ return 0;
72293+}
72294+
72295+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72296+{
72297+ struct gr_arg_compat argcompat;
72298+
72299+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72300+ return -EFAULT;
72301+
72302+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72303+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72304+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72305+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72306+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72307+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72308+
72309+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72310+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72311+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72312+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72313+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72314+ arg->segv_device = argcompat.segv_device;
72315+ arg->segv_inode = argcompat.segv_inode;
72316+ arg->segv_uid = argcompat.segv_uid;
72317+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72318+ arg->mode = argcompat.mode;
72319+
72320+ return 0;
72321+}
72322+
72323+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72324+{
72325+ struct acl_object_label_compat objcompat;
72326+
72327+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72328+ return -EFAULT;
72329+
72330+ obj->filename = compat_ptr(objcompat.filename);
72331+ obj->inode = objcompat.inode;
72332+ obj->device = objcompat.device;
72333+ obj->mode = objcompat.mode;
72334+
72335+ obj->nested = compat_ptr(objcompat.nested);
72336+ obj->globbed = compat_ptr(objcompat.globbed);
72337+
72338+ obj->prev = compat_ptr(objcompat.prev);
72339+ obj->next = compat_ptr(objcompat.next);
72340+
72341+ return 0;
72342+}
72343+
72344+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72345+{
72346+ unsigned int i;
72347+ struct acl_subject_label_compat subjcompat;
72348+
72349+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72350+ return -EFAULT;
72351+
72352+ subj->filename = compat_ptr(subjcompat.filename);
72353+ subj->inode = subjcompat.inode;
72354+ subj->device = subjcompat.device;
72355+ subj->mode = subjcompat.mode;
72356+ subj->cap_mask = subjcompat.cap_mask;
72357+ subj->cap_lower = subjcompat.cap_lower;
72358+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72359+
72360+ for (i = 0; i < GR_NLIMITS; i++) {
72361+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72362+ subj->res[i].rlim_cur = RLIM_INFINITY;
72363+ else
72364+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72365+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72366+ subj->res[i].rlim_max = RLIM_INFINITY;
72367+ else
72368+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72369+ }
72370+ subj->resmask = subjcompat.resmask;
72371+
72372+ subj->user_trans_type = subjcompat.user_trans_type;
72373+ subj->group_trans_type = subjcompat.group_trans_type;
72374+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72375+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72376+ subj->user_trans_num = subjcompat.user_trans_num;
72377+ subj->group_trans_num = subjcompat.group_trans_num;
72378+
72379+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72380+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72381+ subj->ip_type = subjcompat.ip_type;
72382+ subj->ips = compat_ptr(subjcompat.ips);
72383+ subj->ip_num = subjcompat.ip_num;
72384+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72385+
72386+ subj->crashes = subjcompat.crashes;
72387+ subj->expires = subjcompat.expires;
72388+
72389+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72390+ subj->hash = compat_ptr(subjcompat.hash);
72391+ subj->prev = compat_ptr(subjcompat.prev);
72392+ subj->next = compat_ptr(subjcompat.next);
72393+
72394+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72395+ subj->obj_hash_size = subjcompat.obj_hash_size;
72396+ subj->pax_flags = subjcompat.pax_flags;
72397+
72398+ return 0;
72399+}
72400+
72401+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72402+{
72403+ struct acl_role_label_compat rolecompat;
72404+
72405+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72406+ return -EFAULT;
72407+
72408+ role->rolename = compat_ptr(rolecompat.rolename);
72409+ role->uidgid = rolecompat.uidgid;
72410+ role->roletype = rolecompat.roletype;
72411+
72412+ role->auth_attempts = rolecompat.auth_attempts;
72413+ role->expires = rolecompat.expires;
72414+
72415+ role->root_label = compat_ptr(rolecompat.root_label);
72416+ role->hash = compat_ptr(rolecompat.hash);
72417+
72418+ role->prev = compat_ptr(rolecompat.prev);
72419+ role->next = compat_ptr(rolecompat.next);
72420+
72421+ role->transitions = compat_ptr(rolecompat.transitions);
72422+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72423+ role->domain_children = compat_ptr(rolecompat.domain_children);
72424+ role->domain_child_num = rolecompat.domain_child_num;
72425+
72426+ role->umask = rolecompat.umask;
72427+
72428+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72429+ role->subj_hash_size = rolecompat.subj_hash_size;
72430+
72431+ return 0;
72432+}
72433+
72434+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72435+{
72436+ struct role_allowed_ip_compat roleip_compat;
72437+
72438+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72439+ return -EFAULT;
72440+
72441+ roleip->addr = roleip_compat.addr;
72442+ roleip->netmask = roleip_compat.netmask;
72443+
72444+ roleip->prev = compat_ptr(roleip_compat.prev);
72445+ roleip->next = compat_ptr(roleip_compat.next);
72446+
72447+ return 0;
72448+}
72449+
72450+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72451+{
72452+ struct role_transition_compat trans_compat;
72453+
72454+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72455+ return -EFAULT;
72456+
72457+ trans->rolename = compat_ptr(trans_compat.rolename);
72458+
72459+ trans->prev = compat_ptr(trans_compat.prev);
72460+ trans->next = compat_ptr(trans_compat.next);
72461+
72462+ return 0;
72463+
72464+}
72465+
72466+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72467+{
72468+ struct gr_hash_struct_compat hash_compat;
72469+
72470+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72471+ return -EFAULT;
72472+
72473+ hash->table = compat_ptr(hash_compat.table);
72474+ hash->nametable = compat_ptr(hash_compat.nametable);
72475+ hash->first = compat_ptr(hash_compat.first);
72476+
72477+ hash->table_size = hash_compat.table_size;
72478+ hash->used_size = hash_compat.used_size;
72479+
72480+ hash->type = hash_compat.type;
72481+
72482+ return 0;
72483+}
72484+
72485+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72486+{
72487+ compat_uptr_t ptrcompat;
72488+
72489+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72490+ return -EFAULT;
72491+
72492+ *(void **)ptr = compat_ptr(ptrcompat);
72493+
72494+ return 0;
72495+}
72496+
72497+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72498+{
72499+ struct acl_ip_label_compat ip_compat;
72500+
72501+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72502+ return -EFAULT;
72503+
72504+ ip->iface = compat_ptr(ip_compat.iface);
72505+ ip->addr = ip_compat.addr;
72506+ ip->netmask = ip_compat.netmask;
72507+ ip->low = ip_compat.low;
72508+ ip->high = ip_compat.high;
72509+ ip->mode = ip_compat.mode;
72510+ ip->type = ip_compat.type;
72511+
72512+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72513+
72514+ ip->prev = compat_ptr(ip_compat.prev);
72515+ ip->next = compat_ptr(ip_compat.next);
72516+
72517+ return 0;
72518+}
72519+
72520+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72521+{
72522+ struct sprole_pw_compat pw_compat;
72523+
72524+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72525+ return -EFAULT;
72526+
72527+ pw->rolename = compat_ptr(pw_compat.rolename);
72528+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72529+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72530+
72531+ return 0;
72532+}
72533+
72534+size_t get_gr_arg_wrapper_size_compat(void)
72535+{
72536+ return sizeof(struct gr_arg_wrapper_compat);
72537+}
72538+
72539diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72540new file mode 100644
72541index 0000000..8ee8e4f
72542--- /dev/null
72543+++ b/grsecurity/gracl_fs.c
72544@@ -0,0 +1,447 @@
72545+#include <linux/kernel.h>
72546+#include <linux/sched.h>
72547+#include <linux/types.h>
72548+#include <linux/fs.h>
72549+#include <linux/file.h>
72550+#include <linux/stat.h>
72551+#include <linux/grsecurity.h>
72552+#include <linux/grinternal.h>
72553+#include <linux/gracl.h>
72554+
72555+umode_t
72556+gr_acl_umask(void)
72557+{
72558+ if (unlikely(!gr_acl_is_enabled()))
72559+ return 0;
72560+
72561+ return current->role->umask;
72562+}
72563+
72564+__u32
72565+gr_acl_handle_hidden_file(const struct dentry * dentry,
72566+ const struct vfsmount * mnt)
72567+{
72568+ __u32 mode;
72569+
72570+ if (unlikely(d_is_negative(dentry)))
72571+ return GR_FIND;
72572+
72573+ mode =
72574+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72575+
72576+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72577+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72578+ return mode;
72579+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72580+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72581+ return 0;
72582+ } else if (unlikely(!(mode & GR_FIND)))
72583+ return 0;
72584+
72585+ return GR_FIND;
72586+}
72587+
72588+__u32
72589+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72590+ int acc_mode)
72591+{
72592+ __u32 reqmode = GR_FIND;
72593+ __u32 mode;
72594+
72595+ if (unlikely(d_is_negative(dentry)))
72596+ return reqmode;
72597+
72598+ if (acc_mode & MAY_APPEND)
72599+ reqmode |= GR_APPEND;
72600+ else if (acc_mode & MAY_WRITE)
72601+ reqmode |= GR_WRITE;
72602+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72603+ reqmode |= GR_READ;
72604+
72605+ mode =
72606+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72607+ mnt);
72608+
72609+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72610+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72611+ reqmode & GR_READ ? " reading" : "",
72612+ reqmode & GR_WRITE ? " writing" : reqmode &
72613+ GR_APPEND ? " appending" : "");
72614+ return reqmode;
72615+ } else
72616+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72617+ {
72618+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72619+ reqmode & GR_READ ? " reading" : "",
72620+ reqmode & GR_WRITE ? " writing" : reqmode &
72621+ GR_APPEND ? " appending" : "");
72622+ return 0;
72623+ } else if (unlikely((mode & reqmode) != reqmode))
72624+ return 0;
72625+
72626+ return reqmode;
72627+}
72628+
72629+__u32
72630+gr_acl_handle_creat(const struct dentry * dentry,
72631+ const struct dentry * p_dentry,
72632+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72633+ const int imode)
72634+{
72635+ __u32 reqmode = GR_WRITE | GR_CREATE;
72636+ __u32 mode;
72637+
72638+ if (acc_mode & MAY_APPEND)
72639+ reqmode |= GR_APPEND;
72640+ // if a directory was required or the directory already exists, then
72641+ // don't count this open as a read
72642+ if ((acc_mode & MAY_READ) &&
72643+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72644+ reqmode |= GR_READ;
72645+ if ((open_flags & O_CREAT) &&
72646+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72647+ reqmode |= GR_SETID;
72648+
72649+ mode =
72650+ gr_check_create(dentry, p_dentry, p_mnt,
72651+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72652+
72653+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72654+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72655+ reqmode & GR_READ ? " reading" : "",
72656+ reqmode & GR_WRITE ? " writing" : reqmode &
72657+ GR_APPEND ? " appending" : "");
72658+ return reqmode;
72659+ } else
72660+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72661+ {
72662+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72663+ reqmode & GR_READ ? " reading" : "",
72664+ reqmode & GR_WRITE ? " writing" : reqmode &
72665+ GR_APPEND ? " appending" : "");
72666+ return 0;
72667+ } else if (unlikely((mode & reqmode) != reqmode))
72668+ return 0;
72669+
72670+ return reqmode;
72671+}
72672+
72673+__u32
72674+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72675+ const int fmode)
72676+{
72677+ __u32 mode, reqmode = GR_FIND;
72678+
72679+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72680+ reqmode |= GR_EXEC;
72681+ if (fmode & S_IWOTH)
72682+ reqmode |= GR_WRITE;
72683+ if (fmode & S_IROTH)
72684+ reqmode |= GR_READ;
72685+
72686+ mode =
72687+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72688+ mnt);
72689+
72690+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72691+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72692+ reqmode & GR_READ ? " reading" : "",
72693+ reqmode & GR_WRITE ? " writing" : "",
72694+ reqmode & GR_EXEC ? " executing" : "");
72695+ return reqmode;
72696+ } else
72697+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72698+ {
72699+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72700+ reqmode & GR_READ ? " reading" : "",
72701+ reqmode & GR_WRITE ? " writing" : "",
72702+ reqmode & GR_EXEC ? " executing" : "");
72703+ return 0;
72704+ } else if (unlikely((mode & reqmode) != reqmode))
72705+ return 0;
72706+
72707+ return reqmode;
72708+}
72709+
72710+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72711+{
72712+ __u32 mode;
72713+
72714+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72715+
72716+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72717+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72718+ return mode;
72719+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72720+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72721+ return 0;
72722+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72723+ return 0;
72724+
72725+ return (reqmode);
72726+}
72727+
72728+__u32
72729+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72730+{
72731+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72732+}
72733+
72734+__u32
72735+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72736+{
72737+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72738+}
72739+
72740+__u32
72741+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72742+{
72743+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72744+}
72745+
72746+__u32
72747+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72748+{
72749+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72750+}
72751+
72752+__u32
72753+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72754+ umode_t *modeptr)
72755+{
72756+ umode_t mode;
72757+
72758+ *modeptr &= ~gr_acl_umask();
72759+ mode = *modeptr;
72760+
72761+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72762+ return 1;
72763+
72764+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72765+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72766+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72767+ GR_CHMOD_ACL_MSG);
72768+ } else {
72769+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72770+ }
72771+}
72772+
72773+__u32
72774+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72775+{
72776+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72777+}
72778+
72779+__u32
72780+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72781+{
72782+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72783+}
72784+
72785+__u32
72786+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
72787+{
72788+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
72789+}
72790+
72791+__u32
72792+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
72793+{
72794+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
72795+}
72796+
72797+__u32
72798+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
72799+{
72800+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
72801+ GR_UNIXCONNECT_ACL_MSG);
72802+}
72803+
72804+/* hardlinks require at minimum create and link permission,
72805+ any additional privilege required is based on the
72806+ privilege of the file being linked to
72807+*/
72808+__u32
72809+gr_acl_handle_link(const struct dentry * new_dentry,
72810+ const struct dentry * parent_dentry,
72811+ const struct vfsmount * parent_mnt,
72812+ const struct dentry * old_dentry,
72813+ const struct vfsmount * old_mnt, const struct filename *to)
72814+{
72815+ __u32 mode;
72816+ __u32 needmode = GR_CREATE | GR_LINK;
72817+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
72818+
72819+ mode =
72820+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
72821+ old_mnt);
72822+
72823+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
72824+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72825+ return mode;
72826+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72827+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72828+ return 0;
72829+ } else if (unlikely((mode & needmode) != needmode))
72830+ return 0;
72831+
72832+ return 1;
72833+}
72834+
72835+__u32
72836+gr_acl_handle_symlink(const struct dentry * new_dentry,
72837+ const struct dentry * parent_dentry,
72838+ const struct vfsmount * parent_mnt, const struct filename *from)
72839+{
72840+ __u32 needmode = GR_WRITE | GR_CREATE;
72841+ __u32 mode;
72842+
72843+ mode =
72844+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
72845+ GR_CREATE | GR_AUDIT_CREATE |
72846+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
72847+
72848+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
72849+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72850+ return mode;
72851+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72852+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72853+ return 0;
72854+ } else if (unlikely((mode & needmode) != needmode))
72855+ return 0;
72856+
72857+ return (GR_WRITE | GR_CREATE);
72858+}
72859+
72860+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
72861+{
72862+ __u32 mode;
72863+
72864+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72865+
72866+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72867+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
72868+ return mode;
72869+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72870+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
72871+ return 0;
72872+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72873+ return 0;
72874+
72875+ return (reqmode);
72876+}
72877+
72878+__u32
72879+gr_acl_handle_mknod(const struct dentry * new_dentry,
72880+ const struct dentry * parent_dentry,
72881+ const struct vfsmount * parent_mnt,
72882+ const int mode)
72883+{
72884+ __u32 reqmode = GR_WRITE | GR_CREATE;
72885+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72886+ reqmode |= GR_SETID;
72887+
72888+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72889+ reqmode, GR_MKNOD_ACL_MSG);
72890+}
72891+
72892+__u32
72893+gr_acl_handle_mkdir(const struct dentry *new_dentry,
72894+ const struct dentry *parent_dentry,
72895+ const struct vfsmount *parent_mnt)
72896+{
72897+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72898+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
72899+}
72900+
72901+#define RENAME_CHECK_SUCCESS(old, new) \
72902+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
72903+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
72904+
72905+int
72906+gr_acl_handle_rename(struct dentry *new_dentry,
72907+ struct dentry *parent_dentry,
72908+ const struct vfsmount *parent_mnt,
72909+ struct dentry *old_dentry,
72910+ struct inode *old_parent_inode,
72911+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
72912+{
72913+ __u32 comp1, comp2;
72914+ int error = 0;
72915+
72916+ if (unlikely(!gr_acl_is_enabled()))
72917+ return 0;
72918+
72919+ if (flags & RENAME_EXCHANGE) {
72920+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72921+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72922+ GR_SUPPRESS, parent_mnt);
72923+ comp2 =
72924+ gr_search_file(old_dentry,
72925+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72926+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72927+ } else if (d_is_negative(new_dentry)) {
72928+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
72929+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
72930+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
72931+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
72932+ GR_DELETE | GR_AUDIT_DELETE |
72933+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72934+ GR_SUPPRESS, old_mnt);
72935+ } else {
72936+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72937+ GR_CREATE | GR_DELETE |
72938+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
72939+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72940+ GR_SUPPRESS, parent_mnt);
72941+ comp2 =
72942+ gr_search_file(old_dentry,
72943+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72944+ GR_DELETE | GR_AUDIT_DELETE |
72945+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72946+ }
72947+
72948+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
72949+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
72950+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72951+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
72952+ && !(comp2 & GR_SUPPRESS)) {
72953+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72954+ error = -EACCES;
72955+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
72956+ error = -EACCES;
72957+
72958+ return error;
72959+}
72960+
72961+void
72962+gr_acl_handle_exit(void)
72963+{
72964+ u16 id;
72965+ char *rolename;
72966+
72967+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
72968+ !(current->role->roletype & GR_ROLE_PERSIST))) {
72969+ id = current->acl_role_id;
72970+ rolename = current->role->rolename;
72971+ gr_set_acls(1);
72972+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
72973+ }
72974+
72975+ gr_put_exec_file(current);
72976+ return;
72977+}
72978+
72979+int
72980+gr_acl_handle_procpidmem(const struct task_struct *task)
72981+{
72982+ if (unlikely(!gr_acl_is_enabled()))
72983+ return 0;
72984+
72985+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
72986+ !(current->acl->mode & GR_POVERRIDE) &&
72987+ !(current->role->roletype & GR_ROLE_GOD))
72988+ return -EACCES;
72989+
72990+ return 0;
72991+}
72992diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
72993new file mode 100644
72994index 0000000..f056b81
72995--- /dev/null
72996+++ b/grsecurity/gracl_ip.c
72997@@ -0,0 +1,386 @@
72998+#include <linux/kernel.h>
72999+#include <asm/uaccess.h>
73000+#include <asm/errno.h>
73001+#include <net/sock.h>
73002+#include <linux/file.h>
73003+#include <linux/fs.h>
73004+#include <linux/net.h>
73005+#include <linux/in.h>
73006+#include <linux/skbuff.h>
73007+#include <linux/ip.h>
73008+#include <linux/udp.h>
73009+#include <linux/types.h>
73010+#include <linux/sched.h>
73011+#include <linux/netdevice.h>
73012+#include <linux/inetdevice.h>
73013+#include <linux/gracl.h>
73014+#include <linux/grsecurity.h>
73015+#include <linux/grinternal.h>
73016+
73017+#define GR_BIND 0x01
73018+#define GR_CONNECT 0x02
73019+#define GR_INVERT 0x04
73020+#define GR_BINDOVERRIDE 0x08
73021+#define GR_CONNECTOVERRIDE 0x10
73022+#define GR_SOCK_FAMILY 0x20
73023+
73024+static const char * gr_protocols[IPPROTO_MAX] = {
73025+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73026+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73027+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73028+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73029+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73030+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73031+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73032+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73033+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73034+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73035+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73036+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73037+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73038+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73039+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73040+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73041+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73042+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73043+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73044+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73045+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73046+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73047+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73048+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73049+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73050+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73051+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73052+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73053+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73054+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73055+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73056+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73057+ };
73058+
73059+static const char * gr_socktypes[SOCK_MAX] = {
73060+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73061+ "unknown:7", "unknown:8", "unknown:9", "packet"
73062+ };
73063+
73064+static const char * gr_sockfamilies[AF_MAX+1] = {
73065+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73066+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73067+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73068+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73069+ };
73070+
73071+const char *
73072+gr_proto_to_name(unsigned char proto)
73073+{
73074+ return gr_protocols[proto];
73075+}
73076+
73077+const char *
73078+gr_socktype_to_name(unsigned char type)
73079+{
73080+ return gr_socktypes[type];
73081+}
73082+
73083+const char *
73084+gr_sockfamily_to_name(unsigned char family)
73085+{
73086+ return gr_sockfamilies[family];
73087+}
73088+
73089+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73090+
73091+int
73092+gr_search_socket(const int domain, const int type, const int protocol)
73093+{
73094+ struct acl_subject_label *curr;
73095+ const struct cred *cred = current_cred();
73096+
73097+ if (unlikely(!gr_acl_is_enabled()))
73098+ goto exit;
73099+
73100+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73101+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73102+ goto exit; // let the kernel handle it
73103+
73104+ curr = current->acl;
73105+
73106+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73107+ /* the family is allowed, if this is PF_INET allow it only if
73108+ the extra sock type/protocol checks pass */
73109+ if (domain == PF_INET)
73110+ goto inet_check;
73111+ goto exit;
73112+ } else {
73113+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73114+ __u32 fakeip = 0;
73115+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73116+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73117+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73118+ gr_to_filename(current->exec_file->f_path.dentry,
73119+ current->exec_file->f_path.mnt) :
73120+ curr->filename, curr->filename,
73121+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73122+ &current->signal->saved_ip);
73123+ goto exit;
73124+ }
73125+ goto exit_fail;
73126+ }
73127+
73128+inet_check:
73129+ /* the rest of this checking is for IPv4 only */
73130+ if (!curr->ips)
73131+ goto exit;
73132+
73133+ if ((curr->ip_type & (1U << type)) &&
73134+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73135+ goto exit;
73136+
73137+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73138+ /* we don't place acls on raw sockets , and sometimes
73139+ dgram/ip sockets are opened for ioctl and not
73140+ bind/connect, so we'll fake a bind learn log */
73141+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73142+ __u32 fakeip = 0;
73143+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73144+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73145+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73146+ gr_to_filename(current->exec_file->f_path.dentry,
73147+ current->exec_file->f_path.mnt) :
73148+ curr->filename, curr->filename,
73149+ &fakeip, 0, type,
73150+ protocol, GR_CONNECT, &current->signal->saved_ip);
73151+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73152+ __u32 fakeip = 0;
73153+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73154+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73155+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73156+ gr_to_filename(current->exec_file->f_path.dentry,
73157+ current->exec_file->f_path.mnt) :
73158+ curr->filename, curr->filename,
73159+ &fakeip, 0, type,
73160+ protocol, GR_BIND, &current->signal->saved_ip);
73161+ }
73162+ /* we'll log when they use connect or bind */
73163+ goto exit;
73164+ }
73165+
73166+exit_fail:
73167+ if (domain == PF_INET)
73168+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73169+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73170+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73171+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73172+ gr_socktype_to_name(type), protocol);
73173+
73174+ return 0;
73175+exit:
73176+ return 1;
73177+}
73178+
73179+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73180+{
73181+ if ((ip->mode & mode) &&
73182+ (ip_port >= ip->low) &&
73183+ (ip_port <= ip->high) &&
73184+ ((ntohl(ip_addr) & our_netmask) ==
73185+ (ntohl(our_addr) & our_netmask))
73186+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73187+ && (ip->type & (1U << type))) {
73188+ if (ip->mode & GR_INVERT)
73189+ return 2; // specifically denied
73190+ else
73191+ return 1; // allowed
73192+ }
73193+
73194+ return 0; // not specifically allowed, may continue parsing
73195+}
73196+
73197+static int
73198+gr_search_connectbind(const int full_mode, struct sock *sk,
73199+ struct sockaddr_in *addr, const int type)
73200+{
73201+ char iface[IFNAMSIZ] = {0};
73202+ struct acl_subject_label *curr;
73203+ struct acl_ip_label *ip;
73204+ struct inet_sock *isk;
73205+ struct net_device *dev;
73206+ struct in_device *idev;
73207+ unsigned long i;
73208+ int ret;
73209+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73210+ __u32 ip_addr = 0;
73211+ __u32 our_addr;
73212+ __u32 our_netmask;
73213+ char *p;
73214+ __u16 ip_port = 0;
73215+ const struct cred *cred = current_cred();
73216+
73217+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73218+ return 0;
73219+
73220+ curr = current->acl;
73221+ isk = inet_sk(sk);
73222+
73223+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73224+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73225+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73226+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73227+ struct sockaddr_in saddr;
73228+ int err;
73229+
73230+ saddr.sin_family = AF_INET;
73231+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73232+ saddr.sin_port = isk->inet_sport;
73233+
73234+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73235+ if (err)
73236+ return err;
73237+
73238+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73239+ if (err)
73240+ return err;
73241+ }
73242+
73243+ if (!curr->ips)
73244+ return 0;
73245+
73246+ ip_addr = addr->sin_addr.s_addr;
73247+ ip_port = ntohs(addr->sin_port);
73248+
73249+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73250+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73251+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73252+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73253+ gr_to_filename(current->exec_file->f_path.dentry,
73254+ current->exec_file->f_path.mnt) :
73255+ curr->filename, curr->filename,
73256+ &ip_addr, ip_port, type,
73257+ sk->sk_protocol, mode, &current->signal->saved_ip);
73258+ return 0;
73259+ }
73260+
73261+ for (i = 0; i < curr->ip_num; i++) {
73262+ ip = *(curr->ips + i);
73263+ if (ip->iface != NULL) {
73264+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73265+ p = strchr(iface, ':');
73266+ if (p != NULL)
73267+ *p = '\0';
73268+ dev = dev_get_by_name(sock_net(sk), iface);
73269+ if (dev == NULL)
73270+ continue;
73271+ idev = in_dev_get(dev);
73272+ if (idev == NULL) {
73273+ dev_put(dev);
73274+ continue;
73275+ }
73276+ rcu_read_lock();
73277+ for_ifa(idev) {
73278+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73279+ our_addr = ifa->ifa_address;
73280+ our_netmask = 0xffffffff;
73281+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73282+ if (ret == 1) {
73283+ rcu_read_unlock();
73284+ in_dev_put(idev);
73285+ dev_put(dev);
73286+ return 0;
73287+ } else if (ret == 2) {
73288+ rcu_read_unlock();
73289+ in_dev_put(idev);
73290+ dev_put(dev);
73291+ goto denied;
73292+ }
73293+ }
73294+ } endfor_ifa(idev);
73295+ rcu_read_unlock();
73296+ in_dev_put(idev);
73297+ dev_put(dev);
73298+ } else {
73299+ our_addr = ip->addr;
73300+ our_netmask = ip->netmask;
73301+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73302+ if (ret == 1)
73303+ return 0;
73304+ else if (ret == 2)
73305+ goto denied;
73306+ }
73307+ }
73308+
73309+denied:
73310+ if (mode == GR_BIND)
73311+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73312+ else if (mode == GR_CONNECT)
73313+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73314+
73315+ return -EACCES;
73316+}
73317+
73318+int
73319+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73320+{
73321+ /* always allow disconnection of dgram sockets with connect */
73322+ if (addr->sin_family == AF_UNSPEC)
73323+ return 0;
73324+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73325+}
73326+
73327+int
73328+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73329+{
73330+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73331+}
73332+
73333+int gr_search_listen(struct socket *sock)
73334+{
73335+ struct sock *sk = sock->sk;
73336+ struct sockaddr_in addr;
73337+
73338+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73339+ addr.sin_port = inet_sk(sk)->inet_sport;
73340+
73341+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73342+}
73343+
73344+int gr_search_accept(struct socket *sock)
73345+{
73346+ struct sock *sk = sock->sk;
73347+ struct sockaddr_in addr;
73348+
73349+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73350+ addr.sin_port = inet_sk(sk)->inet_sport;
73351+
73352+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73353+}
73354+
73355+int
73356+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73357+{
73358+ if (addr)
73359+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73360+ else {
73361+ struct sockaddr_in sin;
73362+ const struct inet_sock *inet = inet_sk(sk);
73363+
73364+ sin.sin_addr.s_addr = inet->inet_daddr;
73365+ sin.sin_port = inet->inet_dport;
73366+
73367+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73368+ }
73369+}
73370+
73371+int
73372+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73373+{
73374+ struct sockaddr_in sin;
73375+
73376+ if (unlikely(skb->len < sizeof (struct udphdr)))
73377+ return 0; // skip this packet
73378+
73379+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73380+ sin.sin_port = udp_hdr(skb)->source;
73381+
73382+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73383+}
73384diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73385new file mode 100644
73386index 0000000..25f54ef
73387--- /dev/null
73388+++ b/grsecurity/gracl_learn.c
73389@@ -0,0 +1,207 @@
73390+#include <linux/kernel.h>
73391+#include <linux/mm.h>
73392+#include <linux/sched.h>
73393+#include <linux/poll.h>
73394+#include <linux/string.h>
73395+#include <linux/file.h>
73396+#include <linux/types.h>
73397+#include <linux/vmalloc.h>
73398+#include <linux/grinternal.h>
73399+
73400+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73401+ size_t count, loff_t *ppos);
73402+extern int gr_acl_is_enabled(void);
73403+
73404+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73405+static int gr_learn_attached;
73406+
73407+/* use a 512k buffer */
73408+#define LEARN_BUFFER_SIZE (512 * 1024)
73409+
73410+static DEFINE_SPINLOCK(gr_learn_lock);
73411+static DEFINE_MUTEX(gr_learn_user_mutex);
73412+
73413+/* we need to maintain two buffers, so that the kernel context of grlearn
73414+ uses a semaphore around the userspace copying, and the other kernel contexts
73415+ use a spinlock when copying into the buffer, since they cannot sleep
73416+*/
73417+static char *learn_buffer;
73418+static char *learn_buffer_user;
73419+static int learn_buffer_len;
73420+static int learn_buffer_user_len;
73421+
73422+static ssize_t
73423+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73424+{
73425+ DECLARE_WAITQUEUE(wait, current);
73426+ ssize_t retval = 0;
73427+
73428+ add_wait_queue(&learn_wait, &wait);
73429+ set_current_state(TASK_INTERRUPTIBLE);
73430+ do {
73431+ mutex_lock(&gr_learn_user_mutex);
73432+ spin_lock(&gr_learn_lock);
73433+ if (learn_buffer_len)
73434+ break;
73435+ spin_unlock(&gr_learn_lock);
73436+ mutex_unlock(&gr_learn_user_mutex);
73437+ if (file->f_flags & O_NONBLOCK) {
73438+ retval = -EAGAIN;
73439+ goto out;
73440+ }
73441+ if (signal_pending(current)) {
73442+ retval = -ERESTARTSYS;
73443+ goto out;
73444+ }
73445+
73446+ schedule();
73447+ } while (1);
73448+
73449+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73450+ learn_buffer_user_len = learn_buffer_len;
73451+ retval = learn_buffer_len;
73452+ learn_buffer_len = 0;
73453+
73454+ spin_unlock(&gr_learn_lock);
73455+
73456+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73457+ retval = -EFAULT;
73458+
73459+ mutex_unlock(&gr_learn_user_mutex);
73460+out:
73461+ set_current_state(TASK_RUNNING);
73462+ remove_wait_queue(&learn_wait, &wait);
73463+ return retval;
73464+}
73465+
73466+static unsigned int
73467+poll_learn(struct file * file, poll_table * wait)
73468+{
73469+ poll_wait(file, &learn_wait, wait);
73470+
73471+ if (learn_buffer_len)
73472+ return (POLLIN | POLLRDNORM);
73473+
73474+ return 0;
73475+}
73476+
73477+void
73478+gr_clear_learn_entries(void)
73479+{
73480+ char *tmp;
73481+
73482+ mutex_lock(&gr_learn_user_mutex);
73483+ spin_lock(&gr_learn_lock);
73484+ tmp = learn_buffer;
73485+ learn_buffer = NULL;
73486+ spin_unlock(&gr_learn_lock);
73487+ if (tmp)
73488+ vfree(tmp);
73489+ if (learn_buffer_user != NULL) {
73490+ vfree(learn_buffer_user);
73491+ learn_buffer_user = NULL;
73492+ }
73493+ learn_buffer_len = 0;
73494+ mutex_unlock(&gr_learn_user_mutex);
73495+
73496+ return;
73497+}
73498+
73499+void
73500+gr_add_learn_entry(const char *fmt, ...)
73501+{
73502+ va_list args;
73503+ unsigned int len;
73504+
73505+ if (!gr_learn_attached)
73506+ return;
73507+
73508+ spin_lock(&gr_learn_lock);
73509+
73510+ /* leave a gap at the end so we know when it's "full" but don't have to
73511+ compute the exact length of the string we're trying to append
73512+ */
73513+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73514+ spin_unlock(&gr_learn_lock);
73515+ wake_up_interruptible(&learn_wait);
73516+ return;
73517+ }
73518+ if (learn_buffer == NULL) {
73519+ spin_unlock(&gr_learn_lock);
73520+ return;
73521+ }
73522+
73523+ va_start(args, fmt);
73524+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73525+ va_end(args);
73526+
73527+ learn_buffer_len += len + 1;
73528+
73529+ spin_unlock(&gr_learn_lock);
73530+ wake_up_interruptible(&learn_wait);
73531+
73532+ return;
73533+}
73534+
73535+static int
73536+open_learn(struct inode *inode, struct file *file)
73537+{
73538+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73539+ return -EBUSY;
73540+ if (file->f_mode & FMODE_READ) {
73541+ int retval = 0;
73542+ mutex_lock(&gr_learn_user_mutex);
73543+ if (learn_buffer == NULL)
73544+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73545+ if (learn_buffer_user == NULL)
73546+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73547+ if (learn_buffer == NULL) {
73548+ retval = -ENOMEM;
73549+ goto out_error;
73550+ }
73551+ if (learn_buffer_user == NULL) {
73552+ retval = -ENOMEM;
73553+ goto out_error;
73554+ }
73555+ learn_buffer_len = 0;
73556+ learn_buffer_user_len = 0;
73557+ gr_learn_attached = 1;
73558+out_error:
73559+ mutex_unlock(&gr_learn_user_mutex);
73560+ return retval;
73561+ }
73562+ return 0;
73563+}
73564+
73565+static int
73566+close_learn(struct inode *inode, struct file *file)
73567+{
73568+ if (file->f_mode & FMODE_READ) {
73569+ char *tmp = NULL;
73570+ mutex_lock(&gr_learn_user_mutex);
73571+ spin_lock(&gr_learn_lock);
73572+ tmp = learn_buffer;
73573+ learn_buffer = NULL;
73574+ spin_unlock(&gr_learn_lock);
73575+ if (tmp)
73576+ vfree(tmp);
73577+ if (learn_buffer_user != NULL) {
73578+ vfree(learn_buffer_user);
73579+ learn_buffer_user = NULL;
73580+ }
73581+ learn_buffer_len = 0;
73582+ learn_buffer_user_len = 0;
73583+ gr_learn_attached = 0;
73584+ mutex_unlock(&gr_learn_user_mutex);
73585+ }
73586+
73587+ return 0;
73588+}
73589+
73590+const struct file_operations grsec_fops = {
73591+ .read = read_learn,
73592+ .write = write_grsec_handler,
73593+ .open = open_learn,
73594+ .release = close_learn,
73595+ .poll = poll_learn,
73596+};
73597diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73598new file mode 100644
73599index 0000000..fd26052
73600--- /dev/null
73601+++ b/grsecurity/gracl_policy.c
73602@@ -0,0 +1,1781 @@
73603+#include <linux/kernel.h>
73604+#include <linux/module.h>
73605+#include <linux/sched.h>
73606+#include <linux/mm.h>
73607+#include <linux/file.h>
73608+#include <linux/fs.h>
73609+#include <linux/namei.h>
73610+#include <linux/mount.h>
73611+#include <linux/tty.h>
73612+#include <linux/proc_fs.h>
73613+#include <linux/lglock.h>
73614+#include <linux/slab.h>
73615+#include <linux/vmalloc.h>
73616+#include <linux/types.h>
73617+#include <linux/sysctl.h>
73618+#include <linux/netdevice.h>
73619+#include <linux/ptrace.h>
73620+#include <linux/gracl.h>
73621+#include <linux/gralloc.h>
73622+#include <linux/security.h>
73623+#include <linux/grinternal.h>
73624+#include <linux/pid_namespace.h>
73625+#include <linux/stop_machine.h>
73626+#include <linux/fdtable.h>
73627+#include <linux/percpu.h>
73628+#include <linux/lglock.h>
73629+#include <linux/hugetlb.h>
73630+#include <linux/posix-timers.h>
73631+#include "../fs/mount.h"
73632+
73633+#include <asm/uaccess.h>
73634+#include <asm/errno.h>
73635+#include <asm/mman.h>
73636+
73637+extern struct gr_policy_state *polstate;
73638+
73639+#define FOR_EACH_ROLE_START(role) \
73640+ role = polstate->role_list; \
73641+ while (role) {
73642+
73643+#define FOR_EACH_ROLE_END(role) \
73644+ role = role->prev; \
73645+ }
73646+
73647+struct path gr_real_root;
73648+
73649+extern struct gr_alloc_state *current_alloc_state;
73650+
73651+u16 acl_sp_role_value;
73652+
73653+static DEFINE_MUTEX(gr_dev_mutex);
73654+
73655+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73656+extern void gr_clear_learn_entries(void);
73657+
73658+struct gr_arg *gr_usermode __read_only;
73659+unsigned char *gr_system_salt __read_only;
73660+unsigned char *gr_system_sum __read_only;
73661+
73662+static unsigned int gr_auth_attempts = 0;
73663+static unsigned long gr_auth_expires = 0UL;
73664+
73665+struct acl_object_label *fakefs_obj_rw;
73666+struct acl_object_label *fakefs_obj_rwx;
73667+
73668+extern int gr_init_uidset(void);
73669+extern void gr_free_uidset(void);
73670+extern void gr_remove_uid(uid_t uid);
73671+extern int gr_find_uid(uid_t uid);
73672+
73673+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
73674+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73675+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73676+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73677+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73678+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73679+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73680+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73681+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73682+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73683+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73684+extern void assign_special_role(const char *rolename);
73685+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73686+extern int gr_rbac_disable(void *unused);
73687+extern void gr_enable_rbac_system(void);
73688+
73689+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73690+{
73691+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73692+ return -EFAULT;
73693+
73694+ return 0;
73695+}
73696+
73697+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73698+{
73699+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73700+ return -EFAULT;
73701+
73702+ return 0;
73703+}
73704+
73705+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73706+{
73707+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73708+ return -EFAULT;
73709+
73710+ return 0;
73711+}
73712+
73713+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73714+{
73715+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73716+ return -EFAULT;
73717+
73718+ return 0;
73719+}
73720+
73721+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73722+{
73723+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73724+ return -EFAULT;
73725+
73726+ return 0;
73727+}
73728+
73729+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73730+{
73731+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73732+ return -EFAULT;
73733+
73734+ return 0;
73735+}
73736+
73737+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73738+{
73739+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73740+ return -EFAULT;
73741+
73742+ return 0;
73743+}
73744+
73745+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73746+{
73747+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73748+ return -EFAULT;
73749+
73750+ return 0;
73751+}
73752+
73753+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73754+{
73755+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73756+ return -EFAULT;
73757+
73758+ return 0;
73759+}
73760+
73761+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73762+{
73763+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73764+ return -EFAULT;
73765+
73766+ if ((uwrap->version != GRSECURITY_VERSION) ||
73767+ (uwrap->size != sizeof(struct gr_arg)))
73768+ return -EINVAL;
73769+
73770+ return 0;
73771+}
73772+
73773+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73774+{
73775+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73776+ return -EFAULT;
73777+
73778+ return 0;
73779+}
73780+
73781+static size_t get_gr_arg_wrapper_size_normal(void)
73782+{
73783+ return sizeof(struct gr_arg_wrapper);
73784+}
73785+
73786+#ifdef CONFIG_COMPAT
73787+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
73788+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
73789+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
73790+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
73791+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
73792+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
73793+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
73794+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
73795+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
73796+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
73797+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
73798+extern size_t get_gr_arg_wrapper_size_compat(void);
73799+
73800+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
73801+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
73802+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
73803+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
73804+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
73805+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
73806+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
73807+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
73808+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
73809+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
73810+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
73811+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
73812+
73813+#else
73814+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
73815+#define copy_gr_arg copy_gr_arg_normal
73816+#define copy_gr_hash_struct copy_gr_hash_struct_normal
73817+#define copy_acl_object_label copy_acl_object_label_normal
73818+#define copy_acl_subject_label copy_acl_subject_label_normal
73819+#define copy_acl_role_label copy_acl_role_label_normal
73820+#define copy_acl_ip_label copy_acl_ip_label_normal
73821+#define copy_pointer_from_array copy_pointer_from_array_normal
73822+#define copy_sprole_pw copy_sprole_pw_normal
73823+#define copy_role_transition copy_role_transition_normal
73824+#define copy_role_allowed_ip copy_role_allowed_ip_normal
73825+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
73826+#endif
73827+
73828+static struct acl_subject_label *
73829+lookup_subject_map(const struct acl_subject_label *userp)
73830+{
73831+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
73832+ struct subject_map *match;
73833+
73834+ match = polstate->subj_map_set.s_hash[index];
73835+
73836+ while (match && match->user != userp)
73837+ match = match->next;
73838+
73839+ if (match != NULL)
73840+ return match->kernel;
73841+ else
73842+ return NULL;
73843+}
73844+
73845+static void
73846+insert_subj_map_entry(struct subject_map *subjmap)
73847+{
73848+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
73849+ struct subject_map **curr;
73850+
73851+ subjmap->prev = NULL;
73852+
73853+ curr = &polstate->subj_map_set.s_hash[index];
73854+ if (*curr != NULL)
73855+ (*curr)->prev = subjmap;
73856+
73857+ subjmap->next = *curr;
73858+ *curr = subjmap;
73859+
73860+ return;
73861+}
73862+
73863+static void
73864+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
73865+{
73866+ unsigned int index =
73867+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
73868+ struct acl_role_label **curr;
73869+ struct acl_role_label *tmp, *tmp2;
73870+
73871+ curr = &polstate->acl_role_set.r_hash[index];
73872+
73873+ /* simple case, slot is empty, just set it to our role */
73874+ if (*curr == NULL) {
73875+ *curr = role;
73876+ } else {
73877+ /* example:
73878+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
73879+ 2 -> 3
73880+ */
73881+ /* first check to see if we can already be reached via this slot */
73882+ tmp = *curr;
73883+ while (tmp && tmp != role)
73884+ tmp = tmp->next;
73885+ if (tmp == role) {
73886+ /* we don't need to add ourselves to this slot's chain */
73887+ return;
73888+ }
73889+ /* we need to add ourselves to this chain, two cases */
73890+ if (role->next == NULL) {
73891+ /* simple case, append the current chain to our role */
73892+ role->next = *curr;
73893+ *curr = role;
73894+ } else {
73895+ /* 1 -> 2 -> 3 -> 4
73896+ 2 -> 3 -> 4
73897+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
73898+ */
73899+ /* trickier case: walk our role's chain until we find
73900+ the role for the start of the current slot's chain */
73901+ tmp = role;
73902+ tmp2 = *curr;
73903+ while (tmp->next && tmp->next != tmp2)
73904+ tmp = tmp->next;
73905+ if (tmp->next == tmp2) {
73906+ /* from example above, we found 3, so just
73907+ replace this slot's chain with ours */
73908+ *curr = role;
73909+ } else {
73910+ /* we didn't find a subset of our role's chain
73911+ in the current slot's chain, so append their
73912+ chain to ours, and set us as the first role in
73913+ the slot's chain
73914+
73915+ we could fold this case with the case above,
73916+ but making it explicit for clarity
73917+ */
73918+ tmp->next = tmp2;
73919+ *curr = role;
73920+ }
73921+ }
73922+ }
73923+
73924+ return;
73925+}
73926+
73927+static void
73928+insert_acl_role_label(struct acl_role_label *role)
73929+{
73930+ int i;
73931+
73932+ if (polstate->role_list == NULL) {
73933+ polstate->role_list = role;
73934+ role->prev = NULL;
73935+ } else {
73936+ role->prev = polstate->role_list;
73937+ polstate->role_list = role;
73938+ }
73939+
73940+ /* used for hash chains */
73941+ role->next = NULL;
73942+
73943+ if (role->roletype & GR_ROLE_DOMAIN) {
73944+ for (i = 0; i < role->domain_child_num; i++)
73945+ __insert_acl_role_label(role, role->domain_children[i]);
73946+ } else
73947+ __insert_acl_role_label(role, role->uidgid);
73948+}
73949+
73950+static int
73951+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
73952+{
73953+ struct name_entry **curr, *nentry;
73954+ struct inodev_entry *ientry;
73955+ unsigned int len = strlen(name);
73956+ unsigned int key = full_name_hash(name, len);
73957+ unsigned int index = key % polstate->name_set.n_size;
73958+
73959+ curr = &polstate->name_set.n_hash[index];
73960+
73961+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
73962+ curr = &((*curr)->next);
73963+
73964+ if (*curr != NULL)
73965+ return 1;
73966+
73967+ nentry = acl_alloc(sizeof (struct name_entry));
73968+ if (nentry == NULL)
73969+ return 0;
73970+ ientry = acl_alloc(sizeof (struct inodev_entry));
73971+ if (ientry == NULL)
73972+ return 0;
73973+ ientry->nentry = nentry;
73974+
73975+ nentry->key = key;
73976+ nentry->name = name;
73977+ nentry->inode = inode;
73978+ nentry->device = device;
73979+ nentry->len = len;
73980+ nentry->deleted = deleted;
73981+
73982+ nentry->prev = NULL;
73983+ curr = &polstate->name_set.n_hash[index];
73984+ if (*curr != NULL)
73985+ (*curr)->prev = nentry;
73986+ nentry->next = *curr;
73987+ *curr = nentry;
73988+
73989+ /* insert us into the table searchable by inode/dev */
73990+ __insert_inodev_entry(polstate, ientry);
73991+
73992+ return 1;
73993+}
73994+
73995+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
73996+
73997+static void *
73998+create_table(__u32 * len, int elementsize)
73999+{
74000+ unsigned int table_sizes[] = {
74001+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74002+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74003+ 4194301, 8388593, 16777213, 33554393, 67108859
74004+ };
74005+ void *newtable = NULL;
74006+ unsigned int pwr = 0;
74007+
74008+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74009+ table_sizes[pwr] <= *len)
74010+ pwr++;
74011+
74012+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74013+ return newtable;
74014+
74015+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74016+ newtable =
74017+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74018+ else
74019+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74020+
74021+ *len = table_sizes[pwr];
74022+
74023+ return newtable;
74024+}
74025+
74026+static int
74027+init_variables(const struct gr_arg *arg, bool reload)
74028+{
74029+ struct task_struct *reaper = init_pid_ns.child_reaper;
74030+ unsigned int stacksize;
74031+
74032+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74033+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74034+ polstate->name_set.n_size = arg->role_db.num_objects;
74035+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74036+
74037+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74038+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74039+ return 1;
74040+
74041+ if (!reload) {
74042+ if (!gr_init_uidset())
74043+ return 1;
74044+ }
74045+
74046+ /* set up the stack that holds allocation info */
74047+
74048+ stacksize = arg->role_db.num_pointers + 5;
74049+
74050+ if (!acl_alloc_stack_init(stacksize))
74051+ return 1;
74052+
74053+ if (!reload) {
74054+ /* grab reference for the real root dentry and vfsmount */
74055+ get_fs_root(reaper->fs, &gr_real_root);
74056+
74057+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74058+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74059+#endif
74060+
74061+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74062+ if (fakefs_obj_rw == NULL)
74063+ return 1;
74064+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74065+
74066+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74067+ if (fakefs_obj_rwx == NULL)
74068+ return 1;
74069+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74070+ }
74071+
74072+ polstate->subj_map_set.s_hash =
74073+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74074+ polstate->acl_role_set.r_hash =
74075+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74076+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74077+ polstate->inodev_set.i_hash =
74078+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74079+
74080+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74081+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74082+ return 1;
74083+
74084+ memset(polstate->subj_map_set.s_hash, 0,
74085+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74086+ memset(polstate->acl_role_set.r_hash, 0,
74087+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74088+ memset(polstate->name_set.n_hash, 0,
74089+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74090+ memset(polstate->inodev_set.i_hash, 0,
74091+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
74092+
74093+ return 0;
74094+}
74095+
74096+/* free information not needed after startup
74097+ currently contains user->kernel pointer mappings for subjects
74098+*/
74099+
74100+static void
74101+free_init_variables(void)
74102+{
74103+ __u32 i;
74104+
74105+ if (polstate->subj_map_set.s_hash) {
74106+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74107+ if (polstate->subj_map_set.s_hash[i]) {
74108+ kfree(polstate->subj_map_set.s_hash[i]);
74109+ polstate->subj_map_set.s_hash[i] = NULL;
74110+ }
74111+ }
74112+
74113+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74114+ PAGE_SIZE)
74115+ kfree(polstate->subj_map_set.s_hash);
74116+ else
74117+ vfree(polstate->subj_map_set.s_hash);
74118+ }
74119+
74120+ return;
74121+}
74122+
74123+static void
74124+free_variables(bool reload)
74125+{
74126+ struct acl_subject_label *s;
74127+ struct acl_role_label *r;
74128+ struct task_struct *task, *task2;
74129+ unsigned int x;
74130+
74131+ if (!reload) {
74132+ gr_clear_learn_entries();
74133+
74134+ read_lock(&tasklist_lock);
74135+ do_each_thread(task2, task) {
74136+ task->acl_sp_role = 0;
74137+ task->acl_role_id = 0;
74138+ task->inherited = 0;
74139+ task->acl = NULL;
74140+ task->role = NULL;
74141+ } while_each_thread(task2, task);
74142+ read_unlock(&tasklist_lock);
74143+
74144+ kfree(fakefs_obj_rw);
74145+ fakefs_obj_rw = NULL;
74146+ kfree(fakefs_obj_rwx);
74147+ fakefs_obj_rwx = NULL;
74148+
74149+ /* release the reference to the real root dentry and vfsmount */
74150+ path_put(&gr_real_root);
74151+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74152+ }
74153+
74154+ /* free all object hash tables */
74155+
74156+ FOR_EACH_ROLE_START(r)
74157+ if (r->subj_hash == NULL)
74158+ goto next_role;
74159+ FOR_EACH_SUBJECT_START(r, s, x)
74160+ if (s->obj_hash == NULL)
74161+ break;
74162+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74163+ kfree(s->obj_hash);
74164+ else
74165+ vfree(s->obj_hash);
74166+ FOR_EACH_SUBJECT_END(s, x)
74167+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74168+ if (s->obj_hash == NULL)
74169+ break;
74170+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74171+ kfree(s->obj_hash);
74172+ else
74173+ vfree(s->obj_hash);
74174+ FOR_EACH_NESTED_SUBJECT_END(s)
74175+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74176+ kfree(r->subj_hash);
74177+ else
74178+ vfree(r->subj_hash);
74179+ r->subj_hash = NULL;
74180+next_role:
74181+ FOR_EACH_ROLE_END(r)
74182+
74183+ acl_free_all();
74184+
74185+ if (polstate->acl_role_set.r_hash) {
74186+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74187+ PAGE_SIZE)
74188+ kfree(polstate->acl_role_set.r_hash);
74189+ else
74190+ vfree(polstate->acl_role_set.r_hash);
74191+ }
74192+ if (polstate->name_set.n_hash) {
74193+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74194+ PAGE_SIZE)
74195+ kfree(polstate->name_set.n_hash);
74196+ else
74197+ vfree(polstate->name_set.n_hash);
74198+ }
74199+
74200+ if (polstate->inodev_set.i_hash) {
74201+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74202+ PAGE_SIZE)
74203+ kfree(polstate->inodev_set.i_hash);
74204+ else
74205+ vfree(polstate->inodev_set.i_hash);
74206+ }
74207+
74208+ if (!reload)
74209+ gr_free_uidset();
74210+
74211+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74212+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74213+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74214+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74215+
74216+ polstate->default_role = NULL;
74217+ polstate->kernel_role = NULL;
74218+ polstate->role_list = NULL;
74219+
74220+ return;
74221+}
74222+
74223+static struct acl_subject_label *
74224+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74225+
74226+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74227+{
74228+ unsigned int len = strnlen_user(*name, maxlen);
74229+ char *tmp;
74230+
74231+ if (!len || len >= maxlen)
74232+ return -EINVAL;
74233+
74234+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74235+ return -ENOMEM;
74236+
74237+ if (copy_from_user(tmp, *name, len))
74238+ return -EFAULT;
74239+
74240+ tmp[len-1] = '\0';
74241+ *name = tmp;
74242+
74243+ return 0;
74244+}
74245+
74246+static int
74247+copy_user_glob(struct acl_object_label *obj)
74248+{
74249+ struct acl_object_label *g_tmp, **guser;
74250+ int error;
74251+
74252+ if (obj->globbed == NULL)
74253+ return 0;
74254+
74255+ guser = &obj->globbed;
74256+ while (*guser) {
74257+ g_tmp = (struct acl_object_label *)
74258+ acl_alloc(sizeof (struct acl_object_label));
74259+ if (g_tmp == NULL)
74260+ return -ENOMEM;
74261+
74262+ if (copy_acl_object_label(g_tmp, *guser))
74263+ return -EFAULT;
74264+
74265+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74266+ if (error)
74267+ return error;
74268+
74269+ *guser = g_tmp;
74270+ guser = &(g_tmp->next);
74271+ }
74272+
74273+ return 0;
74274+}
74275+
74276+static int
74277+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74278+ struct acl_role_label *role)
74279+{
74280+ struct acl_object_label *o_tmp;
74281+ int ret;
74282+
74283+ while (userp) {
74284+ if ((o_tmp = (struct acl_object_label *)
74285+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74286+ return -ENOMEM;
74287+
74288+ if (copy_acl_object_label(o_tmp, userp))
74289+ return -EFAULT;
74290+
74291+ userp = o_tmp->prev;
74292+
74293+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74294+ if (ret)
74295+ return ret;
74296+
74297+ insert_acl_obj_label(o_tmp, subj);
74298+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74299+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74300+ return -ENOMEM;
74301+
74302+ ret = copy_user_glob(o_tmp);
74303+ if (ret)
74304+ return ret;
74305+
74306+ if (o_tmp->nested) {
74307+ int already_copied;
74308+
74309+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74310+ if (IS_ERR(o_tmp->nested))
74311+ return PTR_ERR(o_tmp->nested);
74312+
74313+ /* insert into nested subject list if we haven't copied this one yet
74314+ to prevent duplicate entries */
74315+ if (!already_copied) {
74316+ o_tmp->nested->next = role->hash->first;
74317+ role->hash->first = o_tmp->nested;
74318+ }
74319+ }
74320+ }
74321+
74322+ return 0;
74323+}
74324+
74325+static __u32
74326+count_user_subjs(struct acl_subject_label *userp)
74327+{
74328+ struct acl_subject_label s_tmp;
74329+ __u32 num = 0;
74330+
74331+ while (userp) {
74332+ if (copy_acl_subject_label(&s_tmp, userp))
74333+ break;
74334+
74335+ userp = s_tmp.prev;
74336+ }
74337+
74338+ return num;
74339+}
74340+
74341+static int
74342+copy_user_allowedips(struct acl_role_label *rolep)
74343+{
74344+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74345+
74346+ ruserip = rolep->allowed_ips;
74347+
74348+ while (ruserip) {
74349+ rlast = rtmp;
74350+
74351+ if ((rtmp = (struct role_allowed_ip *)
74352+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74353+ return -ENOMEM;
74354+
74355+ if (copy_role_allowed_ip(rtmp, ruserip))
74356+ return -EFAULT;
74357+
74358+ ruserip = rtmp->prev;
74359+
74360+ if (!rlast) {
74361+ rtmp->prev = NULL;
74362+ rolep->allowed_ips = rtmp;
74363+ } else {
74364+ rlast->next = rtmp;
74365+ rtmp->prev = rlast;
74366+ }
74367+
74368+ if (!ruserip)
74369+ rtmp->next = NULL;
74370+ }
74371+
74372+ return 0;
74373+}
74374+
74375+static int
74376+copy_user_transitions(struct acl_role_label *rolep)
74377+{
74378+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74379+ int error;
74380+
74381+ rusertp = rolep->transitions;
74382+
74383+ while (rusertp) {
74384+ rlast = rtmp;
74385+
74386+ if ((rtmp = (struct role_transition *)
74387+ acl_alloc(sizeof (struct role_transition))) == NULL)
74388+ return -ENOMEM;
74389+
74390+ if (copy_role_transition(rtmp, rusertp))
74391+ return -EFAULT;
74392+
74393+ rusertp = rtmp->prev;
74394+
74395+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74396+ if (error)
74397+ return error;
74398+
74399+ if (!rlast) {
74400+ rtmp->prev = NULL;
74401+ rolep->transitions = rtmp;
74402+ } else {
74403+ rlast->next = rtmp;
74404+ rtmp->prev = rlast;
74405+ }
74406+
74407+ if (!rusertp)
74408+ rtmp->next = NULL;
74409+ }
74410+
74411+ return 0;
74412+}
74413+
74414+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74415+{
74416+ struct acl_object_label o_tmp;
74417+ __u32 num = 0;
74418+
74419+ while (userp) {
74420+ if (copy_acl_object_label(&o_tmp, userp))
74421+ break;
74422+
74423+ userp = o_tmp.prev;
74424+ num++;
74425+ }
74426+
74427+ return num;
74428+}
74429+
74430+static struct acl_subject_label *
74431+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74432+{
74433+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74434+ __u32 num_objs;
74435+ struct acl_ip_label **i_tmp, *i_utmp2;
74436+ struct gr_hash_struct ghash;
74437+ struct subject_map *subjmap;
74438+ unsigned int i_num;
74439+ int err;
74440+
74441+ if (already_copied != NULL)
74442+ *already_copied = 0;
74443+
74444+ s_tmp = lookup_subject_map(userp);
74445+
74446+ /* we've already copied this subject into the kernel, just return
74447+ the reference to it, and don't copy it over again
74448+ */
74449+ if (s_tmp) {
74450+ if (already_copied != NULL)
74451+ *already_copied = 1;
74452+ return(s_tmp);
74453+ }
74454+
74455+ if ((s_tmp = (struct acl_subject_label *)
74456+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74457+ return ERR_PTR(-ENOMEM);
74458+
74459+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74460+ if (subjmap == NULL)
74461+ return ERR_PTR(-ENOMEM);
74462+
74463+ subjmap->user = userp;
74464+ subjmap->kernel = s_tmp;
74465+ insert_subj_map_entry(subjmap);
74466+
74467+ if (copy_acl_subject_label(s_tmp, userp))
74468+ return ERR_PTR(-EFAULT);
74469+
74470+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74471+ if (err)
74472+ return ERR_PTR(err);
74473+
74474+ if (!strcmp(s_tmp->filename, "/"))
74475+ role->root_label = s_tmp;
74476+
74477+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74478+ return ERR_PTR(-EFAULT);
74479+
74480+ /* copy user and group transition tables */
74481+
74482+ if (s_tmp->user_trans_num) {
74483+ uid_t *uidlist;
74484+
74485+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74486+ if (uidlist == NULL)
74487+ return ERR_PTR(-ENOMEM);
74488+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74489+ return ERR_PTR(-EFAULT);
74490+
74491+ s_tmp->user_transitions = uidlist;
74492+ }
74493+
74494+ if (s_tmp->group_trans_num) {
74495+ gid_t *gidlist;
74496+
74497+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74498+ if (gidlist == NULL)
74499+ return ERR_PTR(-ENOMEM);
74500+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74501+ return ERR_PTR(-EFAULT);
74502+
74503+ s_tmp->group_transitions = gidlist;
74504+ }
74505+
74506+ /* set up object hash table */
74507+ num_objs = count_user_objs(ghash.first);
74508+
74509+ s_tmp->obj_hash_size = num_objs;
74510+ s_tmp->obj_hash =
74511+ (struct acl_object_label **)
74512+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74513+
74514+ if (!s_tmp->obj_hash)
74515+ return ERR_PTR(-ENOMEM);
74516+
74517+ memset(s_tmp->obj_hash, 0,
74518+ s_tmp->obj_hash_size *
74519+ sizeof (struct acl_object_label *));
74520+
74521+ /* add in objects */
74522+ err = copy_user_objs(ghash.first, s_tmp, role);
74523+
74524+ if (err)
74525+ return ERR_PTR(err);
74526+
74527+ /* set pointer for parent subject */
74528+ if (s_tmp->parent_subject) {
74529+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74530+
74531+ if (IS_ERR(s_tmp2))
74532+ return s_tmp2;
74533+
74534+ s_tmp->parent_subject = s_tmp2;
74535+ }
74536+
74537+ /* add in ip acls */
74538+
74539+ if (!s_tmp->ip_num) {
74540+ s_tmp->ips = NULL;
74541+ goto insert;
74542+ }
74543+
74544+ i_tmp =
74545+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74546+ sizeof (struct acl_ip_label *));
74547+
74548+ if (!i_tmp)
74549+ return ERR_PTR(-ENOMEM);
74550+
74551+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74552+ *(i_tmp + i_num) =
74553+ (struct acl_ip_label *)
74554+ acl_alloc(sizeof (struct acl_ip_label));
74555+ if (!*(i_tmp + i_num))
74556+ return ERR_PTR(-ENOMEM);
74557+
74558+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74559+ return ERR_PTR(-EFAULT);
74560+
74561+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74562+ return ERR_PTR(-EFAULT);
74563+
74564+ if ((*(i_tmp + i_num))->iface == NULL)
74565+ continue;
74566+
74567+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74568+ if (err)
74569+ return ERR_PTR(err);
74570+ }
74571+
74572+ s_tmp->ips = i_tmp;
74573+
74574+insert:
74575+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74576+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74577+ return ERR_PTR(-ENOMEM);
74578+
74579+ return s_tmp;
74580+}
74581+
74582+static int
74583+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74584+{
74585+ struct acl_subject_label s_pre;
74586+ struct acl_subject_label * ret;
74587+ int err;
74588+
74589+ while (userp) {
74590+ if (copy_acl_subject_label(&s_pre, userp))
74591+ return -EFAULT;
74592+
74593+ ret = do_copy_user_subj(userp, role, NULL);
74594+
74595+ err = PTR_ERR(ret);
74596+ if (IS_ERR(ret))
74597+ return err;
74598+
74599+ insert_acl_subj_label(ret, role);
74600+
74601+ userp = s_pre.prev;
74602+ }
74603+
74604+ return 0;
74605+}
74606+
74607+static int
74608+copy_user_acl(struct gr_arg *arg)
74609+{
74610+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74611+ struct acl_subject_label *subj_list;
74612+ struct sprole_pw *sptmp;
74613+ struct gr_hash_struct *ghash;
74614+ uid_t *domainlist;
74615+ unsigned int r_num;
74616+ int err = 0;
74617+ __u16 i;
74618+ __u32 num_subjs;
74619+
74620+ /* we need a default and kernel role */
74621+ if (arg->role_db.num_roles < 2)
74622+ return -EINVAL;
74623+
74624+ /* copy special role authentication info from userspace */
74625+
74626+ polstate->num_sprole_pws = arg->num_sprole_pws;
74627+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74628+
74629+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74630+ return -ENOMEM;
74631+
74632+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74633+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74634+ if (!sptmp)
74635+ return -ENOMEM;
74636+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74637+ return -EFAULT;
74638+
74639+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74640+ if (err)
74641+ return err;
74642+
74643+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74644+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74645+#endif
74646+
74647+ polstate->acl_special_roles[i] = sptmp;
74648+ }
74649+
74650+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74651+
74652+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74653+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74654+
74655+ if (!r_tmp)
74656+ return -ENOMEM;
74657+
74658+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74659+ return -EFAULT;
74660+
74661+ if (copy_acl_role_label(r_tmp, r_utmp2))
74662+ return -EFAULT;
74663+
74664+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74665+ if (err)
74666+ return err;
74667+
74668+ if (!strcmp(r_tmp->rolename, "default")
74669+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74670+ polstate->default_role = r_tmp;
74671+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74672+ polstate->kernel_role = r_tmp;
74673+ }
74674+
74675+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74676+ return -ENOMEM;
74677+
74678+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74679+ return -EFAULT;
74680+
74681+ r_tmp->hash = ghash;
74682+
74683+ num_subjs = count_user_subjs(r_tmp->hash->first);
74684+
74685+ r_tmp->subj_hash_size = num_subjs;
74686+ r_tmp->subj_hash =
74687+ (struct acl_subject_label **)
74688+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74689+
74690+ if (!r_tmp->subj_hash)
74691+ return -ENOMEM;
74692+
74693+ err = copy_user_allowedips(r_tmp);
74694+ if (err)
74695+ return err;
74696+
74697+ /* copy domain info */
74698+ if (r_tmp->domain_children != NULL) {
74699+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74700+ if (domainlist == NULL)
74701+ return -ENOMEM;
74702+
74703+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74704+ return -EFAULT;
74705+
74706+ r_tmp->domain_children = domainlist;
74707+ }
74708+
74709+ err = copy_user_transitions(r_tmp);
74710+ if (err)
74711+ return err;
74712+
74713+ memset(r_tmp->subj_hash, 0,
74714+ r_tmp->subj_hash_size *
74715+ sizeof (struct acl_subject_label *));
74716+
74717+ /* acquire the list of subjects, then NULL out
74718+ the list prior to parsing the subjects for this role,
74719+ as during this parsing the list is replaced with a list
74720+ of *nested* subjects for the role
74721+ */
74722+ subj_list = r_tmp->hash->first;
74723+
74724+ /* set nested subject list to null */
74725+ r_tmp->hash->first = NULL;
74726+
74727+ err = copy_user_subjs(subj_list, r_tmp);
74728+
74729+ if (err)
74730+ return err;
74731+
74732+ insert_acl_role_label(r_tmp);
74733+ }
74734+
74735+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74736+ return -EINVAL;
74737+
74738+ return err;
74739+}
74740+
74741+static int gracl_reload_apply_policies(void *reload)
74742+{
74743+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74744+ struct task_struct *task, *task2;
74745+ struct acl_role_label *role, *rtmp;
74746+ struct acl_subject_label *subj;
74747+ const struct cred *cred;
74748+ int role_applied;
74749+ int ret = 0;
74750+
74751+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74752+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74753+
74754+ /* first make sure we'll be able to apply the new policy cleanly */
74755+ do_each_thread(task2, task) {
74756+ if (task->exec_file == NULL)
74757+ continue;
74758+ role_applied = 0;
74759+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74760+ /* preserve special roles */
74761+ FOR_EACH_ROLE_START(role)
74762+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74763+ rtmp = task->role;
74764+ task->role = role;
74765+ role_applied = 1;
74766+ break;
74767+ }
74768+ FOR_EACH_ROLE_END(role)
74769+ }
74770+ if (!role_applied) {
74771+ cred = __task_cred(task);
74772+ rtmp = task->role;
74773+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74774+ }
74775+ /* this handles non-nested inherited subjects, nested subjects will still
74776+ be dropped currently */
74777+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74778+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
74779+ /* change the role back so that we've made no modifications to the policy */
74780+ task->role = rtmp;
74781+
74782+ if (subj == NULL || task->tmpacl == NULL) {
74783+ ret = -EINVAL;
74784+ goto out;
74785+ }
74786+ } while_each_thread(task2, task);
74787+
74788+ /* now actually apply the policy */
74789+
74790+ do_each_thread(task2, task) {
74791+ if (task->exec_file) {
74792+ role_applied = 0;
74793+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74794+ /* preserve special roles */
74795+ FOR_EACH_ROLE_START(role)
74796+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74797+ task->role = role;
74798+ role_applied = 1;
74799+ break;
74800+ }
74801+ FOR_EACH_ROLE_END(role)
74802+ }
74803+ if (!role_applied) {
74804+ cred = __task_cred(task);
74805+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74806+ }
74807+ /* this handles non-nested inherited subjects, nested subjects will still
74808+ be dropped currently */
74809+ if (!reload_state->oldmode && task->inherited)
74810+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74811+ else {
74812+ /* looked up and tagged to the task previously */
74813+ subj = task->tmpacl;
74814+ }
74815+ /* subj will be non-null */
74816+ __gr_apply_subject_to_task(polstate, task, subj);
74817+ if (reload_state->oldmode) {
74818+ task->acl_role_id = 0;
74819+ task->acl_sp_role = 0;
74820+ task->inherited = 0;
74821+ }
74822+ } else {
74823+ // it's a kernel process
74824+ task->role = polstate->kernel_role;
74825+ task->acl = polstate->kernel_role->root_label;
74826+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
74827+ task->acl->mode &= ~GR_PROCFIND;
74828+#endif
74829+ }
74830+ } while_each_thread(task2, task);
74831+
74832+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
74833+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
74834+
74835+out:
74836+
74837+ return ret;
74838+}
74839+
74840+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
74841+{
74842+ struct gr_reload_state new_reload_state = { };
74843+ int err;
74844+
74845+ new_reload_state.oldpolicy_ptr = polstate;
74846+ new_reload_state.oldalloc_ptr = current_alloc_state;
74847+ new_reload_state.oldmode = oldmode;
74848+
74849+ current_alloc_state = &new_reload_state.newalloc;
74850+ polstate = &new_reload_state.newpolicy;
74851+
74852+ /* everything relevant is now saved off, copy in the new policy */
74853+ if (init_variables(args, true)) {
74854+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74855+ err = -ENOMEM;
74856+ goto error;
74857+ }
74858+
74859+ err = copy_user_acl(args);
74860+ free_init_variables();
74861+ if (err)
74862+ goto error;
74863+ /* the new policy is copied in, with the old policy available via saved_state
74864+ first go through applying roles, making sure to preserve special roles
74865+ then apply new subjects, making sure to preserve inherited and nested subjects,
74866+ though currently only inherited subjects will be preserved
74867+ */
74868+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
74869+ if (err)
74870+ goto error;
74871+
74872+ /* we've now applied the new policy, so restore the old policy state to free it */
74873+ polstate = &new_reload_state.oldpolicy;
74874+ current_alloc_state = &new_reload_state.oldalloc;
74875+ free_variables(true);
74876+
74877+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
74878+ to running_polstate/current_alloc_state inside stop_machine
74879+ */
74880+ err = 0;
74881+ goto out;
74882+error:
74883+ /* on error of loading the new policy, we'll just keep the previous
74884+ policy set around
74885+ */
74886+ free_variables(true);
74887+
74888+ /* doesn't affect runtime, but maintains consistent state */
74889+out:
74890+ polstate = new_reload_state.oldpolicy_ptr;
74891+ current_alloc_state = new_reload_state.oldalloc_ptr;
74892+
74893+ return err;
74894+}
74895+
74896+static int
74897+gracl_init(struct gr_arg *args)
74898+{
74899+ int error = 0;
74900+
74901+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
74902+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
74903+
74904+ if (init_variables(args, false)) {
74905+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74906+ error = -ENOMEM;
74907+ goto out;
74908+ }
74909+
74910+ error = copy_user_acl(args);
74911+ free_init_variables();
74912+ if (error)
74913+ goto out;
74914+
74915+ error = gr_set_acls(0);
74916+ if (error)
74917+ goto out;
74918+
74919+ gr_enable_rbac_system();
74920+
74921+ return 0;
74922+
74923+out:
74924+ free_variables(false);
74925+ return error;
74926+}
74927+
74928+static int
74929+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
74930+ unsigned char **sum)
74931+{
74932+ struct acl_role_label *r;
74933+ struct role_allowed_ip *ipp;
74934+ struct role_transition *trans;
74935+ unsigned int i;
74936+ int found = 0;
74937+ u32 curr_ip = current->signal->curr_ip;
74938+
74939+ current->signal->saved_ip = curr_ip;
74940+
74941+ /* check transition table */
74942+
74943+ for (trans = current->role->transitions; trans; trans = trans->next) {
74944+ if (!strcmp(rolename, trans->rolename)) {
74945+ found = 1;
74946+ break;
74947+ }
74948+ }
74949+
74950+ if (!found)
74951+ return 0;
74952+
74953+ /* handle special roles that do not require authentication
74954+ and check ip */
74955+
74956+ FOR_EACH_ROLE_START(r)
74957+ if (!strcmp(rolename, r->rolename) &&
74958+ (r->roletype & GR_ROLE_SPECIAL)) {
74959+ found = 0;
74960+ if (r->allowed_ips != NULL) {
74961+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
74962+ if ((ntohl(curr_ip) & ipp->netmask) ==
74963+ (ntohl(ipp->addr) & ipp->netmask))
74964+ found = 1;
74965+ }
74966+ } else
74967+ found = 2;
74968+ if (!found)
74969+ return 0;
74970+
74971+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
74972+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
74973+ *salt = NULL;
74974+ *sum = NULL;
74975+ return 1;
74976+ }
74977+ }
74978+ FOR_EACH_ROLE_END(r)
74979+
74980+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74981+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
74982+ *salt = polstate->acl_special_roles[i]->salt;
74983+ *sum = polstate->acl_special_roles[i]->sum;
74984+ return 1;
74985+ }
74986+ }
74987+
74988+ return 0;
74989+}
74990+
74991+int gr_check_secure_terminal(struct task_struct *task)
74992+{
74993+ struct task_struct *p, *p2, *p3;
74994+ struct files_struct *files;
74995+ struct fdtable *fdt;
74996+ struct file *our_file = NULL, *file;
74997+ int i;
74998+
74999+ if (task->signal->tty == NULL)
75000+ return 1;
75001+
75002+ files = get_files_struct(task);
75003+ if (files != NULL) {
75004+ rcu_read_lock();
75005+ fdt = files_fdtable(files);
75006+ for (i=0; i < fdt->max_fds; i++) {
75007+ file = fcheck_files(files, i);
75008+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75009+ get_file(file);
75010+ our_file = file;
75011+ }
75012+ }
75013+ rcu_read_unlock();
75014+ put_files_struct(files);
75015+ }
75016+
75017+ if (our_file == NULL)
75018+ return 1;
75019+
75020+ read_lock(&tasklist_lock);
75021+ do_each_thread(p2, p) {
75022+ files = get_files_struct(p);
75023+ if (files == NULL ||
75024+ (p->signal && p->signal->tty == task->signal->tty)) {
75025+ if (files != NULL)
75026+ put_files_struct(files);
75027+ continue;
75028+ }
75029+ rcu_read_lock();
75030+ fdt = files_fdtable(files);
75031+ for (i=0; i < fdt->max_fds; i++) {
75032+ file = fcheck_files(files, i);
75033+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75034+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75035+ p3 = task;
75036+ while (task_pid_nr(p3) > 0) {
75037+ if (p3 == p)
75038+ break;
75039+ p3 = p3->real_parent;
75040+ }
75041+ if (p3 == p)
75042+ break;
75043+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75044+ gr_handle_alertkill(p);
75045+ rcu_read_unlock();
75046+ put_files_struct(files);
75047+ read_unlock(&tasklist_lock);
75048+ fput(our_file);
75049+ return 0;
75050+ }
75051+ }
75052+ rcu_read_unlock();
75053+ put_files_struct(files);
75054+ } while_each_thread(p2, p);
75055+ read_unlock(&tasklist_lock);
75056+
75057+ fput(our_file);
75058+ return 1;
75059+}
75060+
75061+ssize_t
75062+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75063+{
75064+ struct gr_arg_wrapper uwrap;
75065+ unsigned char *sprole_salt = NULL;
75066+ unsigned char *sprole_sum = NULL;
75067+ int error = 0;
75068+ int error2 = 0;
75069+ size_t req_count = 0;
75070+ unsigned char oldmode = 0;
75071+
75072+ mutex_lock(&gr_dev_mutex);
75073+
75074+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75075+ error = -EPERM;
75076+ goto out;
75077+ }
75078+
75079+#ifdef CONFIG_COMPAT
75080+ pax_open_kernel();
75081+ if (is_compat_task()) {
75082+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75083+ copy_gr_arg = &copy_gr_arg_compat;
75084+ copy_acl_object_label = &copy_acl_object_label_compat;
75085+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75086+ copy_acl_role_label = &copy_acl_role_label_compat;
75087+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75088+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75089+ copy_role_transition = &copy_role_transition_compat;
75090+ copy_sprole_pw = &copy_sprole_pw_compat;
75091+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
75092+ copy_pointer_from_array = &copy_pointer_from_array_compat;
75093+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
75094+ } else {
75095+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
75096+ copy_gr_arg = &copy_gr_arg_normal;
75097+ copy_acl_object_label = &copy_acl_object_label_normal;
75098+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75099+ copy_acl_role_label = &copy_acl_role_label_normal;
75100+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75101+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75102+ copy_role_transition = &copy_role_transition_normal;
75103+ copy_sprole_pw = &copy_sprole_pw_normal;
75104+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75105+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75106+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75107+ }
75108+ pax_close_kernel();
75109+#endif
75110+
75111+ req_count = get_gr_arg_wrapper_size();
75112+
75113+ if (count != req_count) {
75114+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75115+ error = -EINVAL;
75116+ goto out;
75117+ }
75118+
75119+
75120+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75121+ gr_auth_expires = 0;
75122+ gr_auth_attempts = 0;
75123+ }
75124+
75125+ error = copy_gr_arg_wrapper(buf, &uwrap);
75126+ if (error)
75127+ goto out;
75128+
75129+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75130+ if (error)
75131+ goto out;
75132+
75133+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75134+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75135+ time_after(gr_auth_expires, get_seconds())) {
75136+ error = -EBUSY;
75137+ goto out;
75138+ }
75139+
75140+ /* if non-root trying to do anything other than use a special role,
75141+ do not attempt authentication, do not count towards authentication
75142+ locking
75143+ */
75144+
75145+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75146+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75147+ gr_is_global_nonroot(current_uid())) {
75148+ error = -EPERM;
75149+ goto out;
75150+ }
75151+
75152+ /* ensure pw and special role name are null terminated */
75153+
75154+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75155+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75156+
75157+ /* Okay.
75158+ * We have our enough of the argument structure..(we have yet
75159+ * to copy_from_user the tables themselves) . Copy the tables
75160+ * only if we need them, i.e. for loading operations. */
75161+
75162+ switch (gr_usermode->mode) {
75163+ case GR_STATUS:
75164+ if (gr_acl_is_enabled()) {
75165+ error = 1;
75166+ if (!gr_check_secure_terminal(current))
75167+ error = 3;
75168+ } else
75169+ error = 2;
75170+ goto out;
75171+ case GR_SHUTDOWN:
75172+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75173+ stop_machine(gr_rbac_disable, NULL, NULL);
75174+ free_variables(false);
75175+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75176+ memset(gr_system_salt, 0, GR_SALT_LEN);
75177+ memset(gr_system_sum, 0, GR_SHA_LEN);
75178+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75179+ } else if (gr_acl_is_enabled()) {
75180+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75181+ error = -EPERM;
75182+ } else {
75183+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75184+ error = -EAGAIN;
75185+ }
75186+ break;
75187+ case GR_ENABLE:
75188+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75189+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75190+ else {
75191+ if (gr_acl_is_enabled())
75192+ error = -EAGAIN;
75193+ else
75194+ error = error2;
75195+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75196+ }
75197+ break;
75198+ case GR_OLDRELOAD:
75199+ oldmode = 1;
75200+ case GR_RELOAD:
75201+ if (!gr_acl_is_enabled()) {
75202+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75203+ error = -EAGAIN;
75204+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75205+ error2 = gracl_reload(gr_usermode, oldmode);
75206+ if (!error2)
75207+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75208+ else {
75209+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75210+ error = error2;
75211+ }
75212+ } else {
75213+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75214+ error = -EPERM;
75215+ }
75216+ break;
75217+ case GR_SEGVMOD:
75218+ if (unlikely(!gr_acl_is_enabled())) {
75219+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75220+ error = -EAGAIN;
75221+ break;
75222+ }
75223+
75224+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75225+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75226+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75227+ struct acl_subject_label *segvacl;
75228+ segvacl =
75229+ lookup_acl_subj_label(gr_usermode->segv_inode,
75230+ gr_usermode->segv_device,
75231+ current->role);
75232+ if (segvacl) {
75233+ segvacl->crashes = 0;
75234+ segvacl->expires = 0;
75235+ }
75236+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75237+ gr_remove_uid(gr_usermode->segv_uid);
75238+ }
75239+ } else {
75240+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75241+ error = -EPERM;
75242+ }
75243+ break;
75244+ case GR_SPROLE:
75245+ case GR_SPROLEPAM:
75246+ if (unlikely(!gr_acl_is_enabled())) {
75247+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75248+ error = -EAGAIN;
75249+ break;
75250+ }
75251+
75252+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75253+ current->role->expires = 0;
75254+ current->role->auth_attempts = 0;
75255+ }
75256+
75257+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75258+ time_after(current->role->expires, get_seconds())) {
75259+ error = -EBUSY;
75260+ goto out;
75261+ }
75262+
75263+ if (lookup_special_role_auth
75264+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75265+ && ((!sprole_salt && !sprole_sum)
75266+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75267+ char *p = "";
75268+ assign_special_role(gr_usermode->sp_role);
75269+ read_lock(&tasklist_lock);
75270+ if (current->real_parent)
75271+ p = current->real_parent->role->rolename;
75272+ read_unlock(&tasklist_lock);
75273+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75274+ p, acl_sp_role_value);
75275+ } else {
75276+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75277+ error = -EPERM;
75278+ if(!(current->role->auth_attempts++))
75279+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75280+
75281+ goto out;
75282+ }
75283+ break;
75284+ case GR_UNSPROLE:
75285+ if (unlikely(!gr_acl_is_enabled())) {
75286+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75287+ error = -EAGAIN;
75288+ break;
75289+ }
75290+
75291+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75292+ char *p = "";
75293+ int i = 0;
75294+
75295+ read_lock(&tasklist_lock);
75296+ if (current->real_parent) {
75297+ p = current->real_parent->role->rolename;
75298+ i = current->real_parent->acl_role_id;
75299+ }
75300+ read_unlock(&tasklist_lock);
75301+
75302+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75303+ gr_set_acls(1);
75304+ } else {
75305+ error = -EPERM;
75306+ goto out;
75307+ }
75308+ break;
75309+ default:
75310+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75311+ error = -EINVAL;
75312+ break;
75313+ }
75314+
75315+ if (error != -EPERM)
75316+ goto out;
75317+
75318+ if(!(gr_auth_attempts++))
75319+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75320+
75321+ out:
75322+ mutex_unlock(&gr_dev_mutex);
75323+
75324+ if (!error)
75325+ error = req_count;
75326+
75327+ return error;
75328+}
75329+
75330+int
75331+gr_set_acls(const int type)
75332+{
75333+ struct task_struct *task, *task2;
75334+ struct acl_role_label *role = current->role;
75335+ struct acl_subject_label *subj;
75336+ __u16 acl_role_id = current->acl_role_id;
75337+ const struct cred *cred;
75338+ int ret;
75339+
75340+ rcu_read_lock();
75341+ read_lock(&tasklist_lock);
75342+ read_lock(&grsec_exec_file_lock);
75343+ do_each_thread(task2, task) {
75344+ /* check to see if we're called from the exit handler,
75345+ if so, only replace ACLs that have inherited the admin
75346+ ACL */
75347+
75348+ if (type && (task->role != role ||
75349+ task->acl_role_id != acl_role_id))
75350+ continue;
75351+
75352+ task->acl_role_id = 0;
75353+ task->acl_sp_role = 0;
75354+ task->inherited = 0;
75355+
75356+ if (task->exec_file) {
75357+ cred = __task_cred(task);
75358+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75359+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75360+ if (subj == NULL) {
75361+ ret = -EINVAL;
75362+ read_unlock(&grsec_exec_file_lock);
75363+ read_unlock(&tasklist_lock);
75364+ rcu_read_unlock();
75365+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75366+ return ret;
75367+ }
75368+ __gr_apply_subject_to_task(polstate, task, subj);
75369+ } else {
75370+ // it's a kernel process
75371+ task->role = polstate->kernel_role;
75372+ task->acl = polstate->kernel_role->root_label;
75373+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75374+ task->acl->mode &= ~GR_PROCFIND;
75375+#endif
75376+ }
75377+ } while_each_thread(task2, task);
75378+ read_unlock(&grsec_exec_file_lock);
75379+ read_unlock(&tasklist_lock);
75380+ rcu_read_unlock();
75381+
75382+ return 0;
75383+}
75384diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75385new file mode 100644
75386index 0000000..39645c9
75387--- /dev/null
75388+++ b/grsecurity/gracl_res.c
75389@@ -0,0 +1,68 @@
75390+#include <linux/kernel.h>
75391+#include <linux/sched.h>
75392+#include <linux/gracl.h>
75393+#include <linux/grinternal.h>
75394+
75395+static const char *restab_log[] = {
75396+ [RLIMIT_CPU] = "RLIMIT_CPU",
75397+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75398+ [RLIMIT_DATA] = "RLIMIT_DATA",
75399+ [RLIMIT_STACK] = "RLIMIT_STACK",
75400+ [RLIMIT_CORE] = "RLIMIT_CORE",
75401+ [RLIMIT_RSS] = "RLIMIT_RSS",
75402+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75403+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75404+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75405+ [RLIMIT_AS] = "RLIMIT_AS",
75406+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75407+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75408+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75409+ [RLIMIT_NICE] = "RLIMIT_NICE",
75410+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75411+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75412+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75413+};
75414+
75415+void
75416+gr_log_resource(const struct task_struct *task,
75417+ const int res, const unsigned long wanted, const int gt)
75418+{
75419+ const struct cred *cred;
75420+ unsigned long rlim;
75421+
75422+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75423+ return;
75424+
75425+ // not yet supported resource
75426+ if (unlikely(!restab_log[res]))
75427+ return;
75428+
75429+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75430+ rlim = task_rlimit_max(task, res);
75431+ else
75432+ rlim = task_rlimit(task, res);
75433+
75434+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75435+ return;
75436+
75437+ rcu_read_lock();
75438+ cred = __task_cred(task);
75439+
75440+ if (res == RLIMIT_NPROC &&
75441+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75442+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75443+ goto out_rcu_unlock;
75444+ else if (res == RLIMIT_MEMLOCK &&
75445+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75446+ goto out_rcu_unlock;
75447+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75448+ goto out_rcu_unlock;
75449+ rcu_read_unlock();
75450+
75451+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75452+
75453+ return;
75454+out_rcu_unlock:
75455+ rcu_read_unlock();
75456+ return;
75457+}
75458diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75459new file mode 100644
75460index 0000000..218b66b
75461--- /dev/null
75462+++ b/grsecurity/gracl_segv.c
75463@@ -0,0 +1,324 @@
75464+#include <linux/kernel.h>
75465+#include <linux/mm.h>
75466+#include <asm/uaccess.h>
75467+#include <asm/errno.h>
75468+#include <asm/mman.h>
75469+#include <net/sock.h>
75470+#include <linux/file.h>
75471+#include <linux/fs.h>
75472+#include <linux/net.h>
75473+#include <linux/in.h>
75474+#include <linux/slab.h>
75475+#include <linux/types.h>
75476+#include <linux/sched.h>
75477+#include <linux/timer.h>
75478+#include <linux/gracl.h>
75479+#include <linux/grsecurity.h>
75480+#include <linux/grinternal.h>
75481+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75482+#include <linux/magic.h>
75483+#include <linux/pagemap.h>
75484+#include "../fs/btrfs/async-thread.h"
75485+#include "../fs/btrfs/ctree.h"
75486+#include "../fs/btrfs/btrfs_inode.h"
75487+#endif
75488+
75489+static struct crash_uid *uid_set;
75490+static unsigned short uid_used;
75491+static DEFINE_SPINLOCK(gr_uid_lock);
75492+extern rwlock_t gr_inode_lock;
75493+extern struct acl_subject_label *
75494+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
75495+ struct acl_role_label *role);
75496+
75497+static inline dev_t __get_dev(const struct dentry *dentry)
75498+{
75499+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75500+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75501+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75502+ else
75503+#endif
75504+ return dentry->d_sb->s_dev;
75505+}
75506+
75507+static inline u64 __get_ino(const struct dentry *dentry)
75508+{
75509+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75510+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75511+ return btrfs_ino(dentry->d_inode);
75512+ else
75513+#endif
75514+ return dentry->d_inode->i_ino;
75515+}
75516+
75517+int
75518+gr_init_uidset(void)
75519+{
75520+ uid_set =
75521+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75522+ uid_used = 0;
75523+
75524+ return uid_set ? 1 : 0;
75525+}
75526+
75527+void
75528+gr_free_uidset(void)
75529+{
75530+ if (uid_set) {
75531+ struct crash_uid *tmpset;
75532+ spin_lock(&gr_uid_lock);
75533+ tmpset = uid_set;
75534+ uid_set = NULL;
75535+ uid_used = 0;
75536+ spin_unlock(&gr_uid_lock);
75537+ if (tmpset)
75538+ kfree(tmpset);
75539+ }
75540+
75541+ return;
75542+}
75543+
75544+int
75545+gr_find_uid(const uid_t uid)
75546+{
75547+ struct crash_uid *tmp = uid_set;
75548+ uid_t buid;
75549+ int low = 0, high = uid_used - 1, mid;
75550+
75551+ while (high >= low) {
75552+ mid = (low + high) >> 1;
75553+ buid = tmp[mid].uid;
75554+ if (buid == uid)
75555+ return mid;
75556+ if (buid > uid)
75557+ high = mid - 1;
75558+ if (buid < uid)
75559+ low = mid + 1;
75560+ }
75561+
75562+ return -1;
75563+}
75564+
75565+static __inline__ void
75566+gr_insertsort(void)
75567+{
75568+ unsigned short i, j;
75569+ struct crash_uid index;
75570+
75571+ for (i = 1; i < uid_used; i++) {
75572+ index = uid_set[i];
75573+ j = i;
75574+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75575+ uid_set[j] = uid_set[j - 1];
75576+ j--;
75577+ }
75578+ uid_set[j] = index;
75579+ }
75580+
75581+ return;
75582+}
75583+
75584+static __inline__ void
75585+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75586+{
75587+ int loc;
75588+ uid_t uid = GR_GLOBAL_UID(kuid);
75589+
75590+ if (uid_used == GR_UIDTABLE_MAX)
75591+ return;
75592+
75593+ loc = gr_find_uid(uid);
75594+
75595+ if (loc >= 0) {
75596+ uid_set[loc].expires = expires;
75597+ return;
75598+ }
75599+
75600+ uid_set[uid_used].uid = uid;
75601+ uid_set[uid_used].expires = expires;
75602+ uid_used++;
75603+
75604+ gr_insertsort();
75605+
75606+ return;
75607+}
75608+
75609+void
75610+gr_remove_uid(const unsigned short loc)
75611+{
75612+ unsigned short i;
75613+
75614+ for (i = loc + 1; i < uid_used; i++)
75615+ uid_set[i - 1] = uid_set[i];
75616+
75617+ uid_used--;
75618+
75619+ return;
75620+}
75621+
75622+int
75623+gr_check_crash_uid(const kuid_t kuid)
75624+{
75625+ int loc;
75626+ int ret = 0;
75627+ uid_t uid;
75628+
75629+ if (unlikely(!gr_acl_is_enabled()))
75630+ return 0;
75631+
75632+ uid = GR_GLOBAL_UID(kuid);
75633+
75634+ spin_lock(&gr_uid_lock);
75635+ loc = gr_find_uid(uid);
75636+
75637+ if (loc < 0)
75638+ goto out_unlock;
75639+
75640+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75641+ gr_remove_uid(loc);
75642+ else
75643+ ret = 1;
75644+
75645+out_unlock:
75646+ spin_unlock(&gr_uid_lock);
75647+ return ret;
75648+}
75649+
75650+static __inline__ int
75651+proc_is_setxid(const struct cred *cred)
75652+{
75653+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75654+ !uid_eq(cred->uid, cred->fsuid))
75655+ return 1;
75656+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75657+ !gid_eq(cred->gid, cred->fsgid))
75658+ return 1;
75659+
75660+ return 0;
75661+}
75662+
75663+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75664+
75665+void
75666+gr_handle_crash(struct task_struct *task, const int sig)
75667+{
75668+ struct acl_subject_label *curr;
75669+ struct task_struct *tsk, *tsk2;
75670+ const struct cred *cred;
75671+ const struct cred *cred2;
75672+
75673+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75674+ return;
75675+
75676+ if (unlikely(!gr_acl_is_enabled()))
75677+ return;
75678+
75679+ curr = task->acl;
75680+
75681+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75682+ return;
75683+
75684+ if (time_before_eq(curr->expires, get_seconds())) {
75685+ curr->expires = 0;
75686+ curr->crashes = 0;
75687+ }
75688+
75689+ curr->crashes++;
75690+
75691+ if (!curr->expires)
75692+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75693+
75694+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75695+ time_after(curr->expires, get_seconds())) {
75696+ rcu_read_lock();
75697+ cred = __task_cred(task);
75698+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75699+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75700+ spin_lock(&gr_uid_lock);
75701+ gr_insert_uid(cred->uid, curr->expires);
75702+ spin_unlock(&gr_uid_lock);
75703+ curr->expires = 0;
75704+ curr->crashes = 0;
75705+ read_lock(&tasklist_lock);
75706+ do_each_thread(tsk2, tsk) {
75707+ cred2 = __task_cred(tsk);
75708+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75709+ gr_fake_force_sig(SIGKILL, tsk);
75710+ } while_each_thread(tsk2, tsk);
75711+ read_unlock(&tasklist_lock);
75712+ } else {
75713+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75714+ read_lock(&tasklist_lock);
75715+ read_lock(&grsec_exec_file_lock);
75716+ do_each_thread(tsk2, tsk) {
75717+ if (likely(tsk != task)) {
75718+ // if this thread has the same subject as the one that triggered
75719+ // RES_CRASH and it's the same binary, kill it
75720+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75721+ gr_fake_force_sig(SIGKILL, tsk);
75722+ }
75723+ } while_each_thread(tsk2, tsk);
75724+ read_unlock(&grsec_exec_file_lock);
75725+ read_unlock(&tasklist_lock);
75726+ }
75727+ rcu_read_unlock();
75728+ }
75729+
75730+ return;
75731+}
75732+
75733+int
75734+gr_check_crash_exec(const struct file *filp)
75735+{
75736+ struct acl_subject_label *curr;
75737+ struct dentry *dentry;
75738+
75739+ if (unlikely(!gr_acl_is_enabled()))
75740+ return 0;
75741+
75742+ read_lock(&gr_inode_lock);
75743+ dentry = filp->f_path.dentry;
75744+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
75745+ current->role);
75746+ read_unlock(&gr_inode_lock);
75747+
75748+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75749+ (!curr->crashes && !curr->expires))
75750+ return 0;
75751+
75752+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75753+ time_after(curr->expires, get_seconds()))
75754+ return 1;
75755+ else if (time_before_eq(curr->expires, get_seconds())) {
75756+ curr->crashes = 0;
75757+ curr->expires = 0;
75758+ }
75759+
75760+ return 0;
75761+}
75762+
75763+void
75764+gr_handle_alertkill(struct task_struct *task)
75765+{
75766+ struct acl_subject_label *curracl;
75767+ __u32 curr_ip;
75768+ struct task_struct *p, *p2;
75769+
75770+ if (unlikely(!gr_acl_is_enabled()))
75771+ return;
75772+
75773+ curracl = task->acl;
75774+ curr_ip = task->signal->curr_ip;
75775+
75776+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75777+ read_lock(&tasklist_lock);
75778+ do_each_thread(p2, p) {
75779+ if (p->signal->curr_ip == curr_ip)
75780+ gr_fake_force_sig(SIGKILL, p);
75781+ } while_each_thread(p2, p);
75782+ read_unlock(&tasklist_lock);
75783+ } else if (curracl->mode & GR_KILLPROC)
75784+ gr_fake_force_sig(SIGKILL, task);
75785+
75786+ return;
75787+}
75788diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75789new file mode 100644
75790index 0000000..6b0c9cc
75791--- /dev/null
75792+++ b/grsecurity/gracl_shm.c
75793@@ -0,0 +1,40 @@
75794+#include <linux/kernel.h>
75795+#include <linux/mm.h>
75796+#include <linux/sched.h>
75797+#include <linux/file.h>
75798+#include <linux/ipc.h>
75799+#include <linux/gracl.h>
75800+#include <linux/grsecurity.h>
75801+#include <linux/grinternal.h>
75802+
75803+int
75804+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
75805+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
75806+{
75807+ struct task_struct *task;
75808+
75809+ if (!gr_acl_is_enabled())
75810+ return 1;
75811+
75812+ rcu_read_lock();
75813+ read_lock(&tasklist_lock);
75814+
75815+ task = find_task_by_vpid(shm_cprid);
75816+
75817+ if (unlikely(!task))
75818+ task = find_task_by_vpid(shm_lapid);
75819+
75820+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
75821+ (task_pid_nr(task) == shm_lapid)) &&
75822+ (task->acl->mode & GR_PROTSHM) &&
75823+ (task->acl != current->acl))) {
75824+ read_unlock(&tasklist_lock);
75825+ rcu_read_unlock();
75826+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
75827+ return 0;
75828+ }
75829+ read_unlock(&tasklist_lock);
75830+ rcu_read_unlock();
75831+
75832+ return 1;
75833+}
75834diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
75835new file mode 100644
75836index 0000000..bc0be01
75837--- /dev/null
75838+++ b/grsecurity/grsec_chdir.c
75839@@ -0,0 +1,19 @@
75840+#include <linux/kernel.h>
75841+#include <linux/sched.h>
75842+#include <linux/fs.h>
75843+#include <linux/file.h>
75844+#include <linux/grsecurity.h>
75845+#include <linux/grinternal.h>
75846+
75847+void
75848+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
75849+{
75850+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75851+ if ((grsec_enable_chdir && grsec_enable_group &&
75852+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
75853+ !grsec_enable_group)) {
75854+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
75855+ }
75856+#endif
75857+ return;
75858+}
75859diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
75860new file mode 100644
75861index 0000000..114ea4f
75862--- /dev/null
75863+++ b/grsecurity/grsec_chroot.c
75864@@ -0,0 +1,467 @@
75865+#include <linux/kernel.h>
75866+#include <linux/module.h>
75867+#include <linux/sched.h>
75868+#include <linux/file.h>
75869+#include <linux/fs.h>
75870+#include <linux/mount.h>
75871+#include <linux/types.h>
75872+#include "../fs/mount.h"
75873+#include <linux/grsecurity.h>
75874+#include <linux/grinternal.h>
75875+
75876+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75877+int gr_init_ran;
75878+#endif
75879+
75880+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75881+{
75882+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75883+ struct dentry *tmpd = dentry;
75884+
75885+ read_seqlock_excl(&mount_lock);
75886+ write_seqlock(&rename_lock);
75887+
75888+ while (tmpd != mnt->mnt_root) {
75889+ atomic_inc(&tmpd->chroot_refcnt);
75890+ tmpd = tmpd->d_parent;
75891+ }
75892+ atomic_inc(&tmpd->chroot_refcnt);
75893+
75894+ write_sequnlock(&rename_lock);
75895+ read_sequnlock_excl(&mount_lock);
75896+#endif
75897+}
75898+
75899+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75900+{
75901+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75902+ struct dentry *tmpd = dentry;
75903+
75904+ read_seqlock_excl(&mount_lock);
75905+ write_seqlock(&rename_lock);
75906+
75907+ while (tmpd != mnt->mnt_root) {
75908+ atomic_dec(&tmpd->chroot_refcnt);
75909+ tmpd = tmpd->d_parent;
75910+ }
75911+ atomic_dec(&tmpd->chroot_refcnt);
75912+
75913+ write_sequnlock(&rename_lock);
75914+ read_sequnlock_excl(&mount_lock);
75915+#endif
75916+}
75917+
75918+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75919+static struct dentry *get_closest_chroot(struct dentry *dentry)
75920+{
75921+ write_seqlock(&rename_lock);
75922+ do {
75923+ if (atomic_read(&dentry->chroot_refcnt)) {
75924+ write_sequnlock(&rename_lock);
75925+ return dentry;
75926+ }
75927+ dentry = dentry->d_parent;
75928+ } while (!IS_ROOT(dentry));
75929+ write_sequnlock(&rename_lock);
75930+ return NULL;
75931+}
75932+#endif
75933+
75934+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
75935+ struct dentry *newdentry, struct vfsmount *newmnt)
75936+{
75937+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75938+ struct dentry *chroot;
75939+
75940+ if (unlikely(!grsec_enable_chroot_rename))
75941+ return 0;
75942+
75943+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
75944+ return 0;
75945+
75946+ chroot = get_closest_chroot(olddentry);
75947+
75948+ if (chroot == NULL)
75949+ return 0;
75950+
75951+ if (is_subdir(newdentry, chroot))
75952+ return 0;
75953+
75954+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
75955+
75956+ return 1;
75957+#else
75958+ return 0;
75959+#endif
75960+}
75961+
75962+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
75963+{
75964+#ifdef CONFIG_GRKERNSEC
75965+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
75966+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
75967+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75968+ && gr_init_ran
75969+#endif
75970+ )
75971+ task->gr_is_chrooted = 1;
75972+ else {
75973+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75974+ if (task_pid_nr(task) == 1 && !gr_init_ran)
75975+ gr_init_ran = 1;
75976+#endif
75977+ task->gr_is_chrooted = 0;
75978+ }
75979+
75980+ task->gr_chroot_dentry = path->dentry;
75981+#endif
75982+ return;
75983+}
75984+
75985+void gr_clear_chroot_entries(struct task_struct *task)
75986+{
75987+#ifdef CONFIG_GRKERNSEC
75988+ task->gr_is_chrooted = 0;
75989+ task->gr_chroot_dentry = NULL;
75990+#endif
75991+ return;
75992+}
75993+
75994+int
75995+gr_handle_chroot_unix(const pid_t pid)
75996+{
75997+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
75998+ struct task_struct *p;
75999+
76000+ if (unlikely(!grsec_enable_chroot_unix))
76001+ return 1;
76002+
76003+ if (likely(!proc_is_chrooted(current)))
76004+ return 1;
76005+
76006+ rcu_read_lock();
76007+ read_lock(&tasklist_lock);
76008+ p = find_task_by_vpid_unrestricted(pid);
76009+ if (unlikely(p && !have_same_root(current, p))) {
76010+ read_unlock(&tasklist_lock);
76011+ rcu_read_unlock();
76012+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76013+ return 0;
76014+ }
76015+ read_unlock(&tasklist_lock);
76016+ rcu_read_unlock();
76017+#endif
76018+ return 1;
76019+}
76020+
76021+int
76022+gr_handle_chroot_nice(void)
76023+{
76024+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76025+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76026+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76027+ return -EPERM;
76028+ }
76029+#endif
76030+ return 0;
76031+}
76032+
76033+int
76034+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76035+{
76036+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76037+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76038+ && proc_is_chrooted(current)) {
76039+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76040+ return -EACCES;
76041+ }
76042+#endif
76043+ return 0;
76044+}
76045+
76046+int
76047+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76048+{
76049+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76050+ struct task_struct *p;
76051+ int ret = 0;
76052+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76053+ return ret;
76054+
76055+ read_lock(&tasklist_lock);
76056+ do_each_pid_task(pid, type, p) {
76057+ if (!have_same_root(current, p)) {
76058+ ret = 1;
76059+ goto out;
76060+ }
76061+ } while_each_pid_task(pid, type, p);
76062+out:
76063+ read_unlock(&tasklist_lock);
76064+ return ret;
76065+#endif
76066+ return 0;
76067+}
76068+
76069+int
76070+gr_pid_is_chrooted(struct task_struct *p)
76071+{
76072+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76073+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76074+ return 0;
76075+
76076+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76077+ !have_same_root(current, p)) {
76078+ return 1;
76079+ }
76080+#endif
76081+ return 0;
76082+}
76083+
76084+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76085+
76086+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76087+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76088+{
76089+ struct path path, currentroot;
76090+ int ret = 0;
76091+
76092+ path.dentry = (struct dentry *)u_dentry;
76093+ path.mnt = (struct vfsmount *)u_mnt;
76094+ get_fs_root(current->fs, &currentroot);
76095+ if (path_is_under(&path, &currentroot))
76096+ ret = 1;
76097+ path_put(&currentroot);
76098+
76099+ return ret;
76100+}
76101+#endif
76102+
76103+int
76104+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76105+{
76106+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76107+ if (!grsec_enable_chroot_fchdir)
76108+ return 1;
76109+
76110+ if (!proc_is_chrooted(current))
76111+ return 1;
76112+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76113+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76114+ return 0;
76115+ }
76116+#endif
76117+ return 1;
76118+}
76119+
76120+int
76121+gr_chroot_fhandle(void)
76122+{
76123+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76124+ if (!grsec_enable_chroot_fchdir)
76125+ return 1;
76126+
76127+ if (!proc_is_chrooted(current))
76128+ return 1;
76129+ else {
76130+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76131+ return 0;
76132+ }
76133+#endif
76134+ return 1;
76135+}
76136+
76137+int
76138+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76139+ const u64 shm_createtime)
76140+{
76141+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76142+ struct task_struct *p;
76143+
76144+ if (unlikely(!grsec_enable_chroot_shmat))
76145+ return 1;
76146+
76147+ if (likely(!proc_is_chrooted(current)))
76148+ return 1;
76149+
76150+ rcu_read_lock();
76151+ read_lock(&tasklist_lock);
76152+
76153+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76154+ if (time_before_eq64(p->start_time, shm_createtime)) {
76155+ if (have_same_root(current, p)) {
76156+ goto allow;
76157+ } else {
76158+ read_unlock(&tasklist_lock);
76159+ rcu_read_unlock();
76160+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76161+ return 0;
76162+ }
76163+ }
76164+ /* creator exited, pid reuse, fall through to next check */
76165+ }
76166+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76167+ if (unlikely(!have_same_root(current, p))) {
76168+ read_unlock(&tasklist_lock);
76169+ rcu_read_unlock();
76170+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76171+ return 0;
76172+ }
76173+ }
76174+
76175+allow:
76176+ read_unlock(&tasklist_lock);
76177+ rcu_read_unlock();
76178+#endif
76179+ return 1;
76180+}
76181+
76182+void
76183+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76184+{
76185+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76186+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76187+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76188+#endif
76189+ return;
76190+}
76191+
76192+int
76193+gr_handle_chroot_mknod(const struct dentry *dentry,
76194+ const struct vfsmount *mnt, const int mode)
76195+{
76196+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76197+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76198+ proc_is_chrooted(current)) {
76199+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76200+ return -EPERM;
76201+ }
76202+#endif
76203+ return 0;
76204+}
76205+
76206+int
76207+gr_handle_chroot_mount(const struct dentry *dentry,
76208+ const struct vfsmount *mnt, const char *dev_name)
76209+{
76210+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76211+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76212+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76213+ return -EPERM;
76214+ }
76215+#endif
76216+ return 0;
76217+}
76218+
76219+int
76220+gr_handle_chroot_pivot(void)
76221+{
76222+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76223+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76224+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76225+ return -EPERM;
76226+ }
76227+#endif
76228+ return 0;
76229+}
76230+
76231+int
76232+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76233+{
76234+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76235+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76236+ !gr_is_outside_chroot(dentry, mnt)) {
76237+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76238+ return -EPERM;
76239+ }
76240+#endif
76241+ return 0;
76242+}
76243+
76244+extern const char *captab_log[];
76245+extern int captab_log_entries;
76246+
76247+int
76248+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76249+{
76250+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76251+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76252+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76253+ if (cap_raised(chroot_caps, cap)) {
76254+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76255+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76256+ }
76257+ return 0;
76258+ }
76259+ }
76260+#endif
76261+ return 1;
76262+}
76263+
76264+int
76265+gr_chroot_is_capable(const int cap)
76266+{
76267+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76268+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76269+#endif
76270+ return 1;
76271+}
76272+
76273+int
76274+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76275+{
76276+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76277+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76278+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76279+ if (cap_raised(chroot_caps, cap)) {
76280+ return 0;
76281+ }
76282+ }
76283+#endif
76284+ return 1;
76285+}
76286+
76287+int
76288+gr_chroot_is_capable_nolog(const int cap)
76289+{
76290+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76291+ return gr_task_chroot_is_capable_nolog(current, cap);
76292+#endif
76293+ return 1;
76294+}
76295+
76296+int
76297+gr_handle_chroot_sysctl(const int op)
76298+{
76299+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76300+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76301+ proc_is_chrooted(current))
76302+ return -EACCES;
76303+#endif
76304+ return 0;
76305+}
76306+
76307+void
76308+gr_handle_chroot_chdir(const struct path *path)
76309+{
76310+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76311+ if (grsec_enable_chroot_chdir)
76312+ set_fs_pwd(current->fs, path);
76313+#endif
76314+ return;
76315+}
76316+
76317+int
76318+gr_handle_chroot_chmod(const struct dentry *dentry,
76319+ const struct vfsmount *mnt, const int mode)
76320+{
76321+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76322+ /* allow chmod +s on directories, but not files */
76323+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76324+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76325+ proc_is_chrooted(current)) {
76326+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76327+ return -EPERM;
76328+ }
76329+#endif
76330+ return 0;
76331+}
76332diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76333new file mode 100644
76334index 0000000..946f750
76335--- /dev/null
76336+++ b/grsecurity/grsec_disabled.c
76337@@ -0,0 +1,445 @@
76338+#include <linux/kernel.h>
76339+#include <linux/module.h>
76340+#include <linux/sched.h>
76341+#include <linux/file.h>
76342+#include <linux/fs.h>
76343+#include <linux/kdev_t.h>
76344+#include <linux/net.h>
76345+#include <linux/in.h>
76346+#include <linux/ip.h>
76347+#include <linux/skbuff.h>
76348+#include <linux/sysctl.h>
76349+
76350+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76351+void
76352+pax_set_initial_flags(struct linux_binprm *bprm)
76353+{
76354+ return;
76355+}
76356+#endif
76357+
76358+#ifdef CONFIG_SYSCTL
76359+__u32
76360+gr_handle_sysctl(const struct ctl_table * table, const int op)
76361+{
76362+ return 0;
76363+}
76364+#endif
76365+
76366+#ifdef CONFIG_TASKSTATS
76367+int gr_is_taskstats_denied(int pid)
76368+{
76369+ return 0;
76370+}
76371+#endif
76372+
76373+int
76374+gr_acl_is_enabled(void)
76375+{
76376+ return 0;
76377+}
76378+
76379+int
76380+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76381+{
76382+ return 0;
76383+}
76384+
76385+void
76386+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76387+{
76388+ return;
76389+}
76390+
76391+int
76392+gr_handle_rawio(const struct inode *inode)
76393+{
76394+ return 0;
76395+}
76396+
76397+void
76398+gr_acl_handle_psacct(struct task_struct *task, const long code)
76399+{
76400+ return;
76401+}
76402+
76403+int
76404+gr_handle_ptrace(struct task_struct *task, const long request)
76405+{
76406+ return 0;
76407+}
76408+
76409+int
76410+gr_handle_proc_ptrace(struct task_struct *task)
76411+{
76412+ return 0;
76413+}
76414+
76415+int
76416+gr_set_acls(const int type)
76417+{
76418+ return 0;
76419+}
76420+
76421+int
76422+gr_check_hidden_task(const struct task_struct *tsk)
76423+{
76424+ return 0;
76425+}
76426+
76427+int
76428+gr_check_protected_task(const struct task_struct *task)
76429+{
76430+ return 0;
76431+}
76432+
76433+int
76434+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76435+{
76436+ return 0;
76437+}
76438+
76439+void
76440+gr_copy_label(struct task_struct *tsk)
76441+{
76442+ return;
76443+}
76444+
76445+void
76446+gr_set_pax_flags(struct task_struct *task)
76447+{
76448+ return;
76449+}
76450+
76451+int
76452+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76453+ const int unsafe_share)
76454+{
76455+ return 0;
76456+}
76457+
76458+void
76459+gr_handle_delete(const u64 ino, const dev_t dev)
76460+{
76461+ return;
76462+}
76463+
76464+void
76465+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76466+{
76467+ return;
76468+}
76469+
76470+void
76471+gr_handle_crash(struct task_struct *task, const int sig)
76472+{
76473+ return;
76474+}
76475+
76476+int
76477+gr_check_crash_exec(const struct file *filp)
76478+{
76479+ return 0;
76480+}
76481+
76482+int
76483+gr_check_crash_uid(const kuid_t uid)
76484+{
76485+ return 0;
76486+}
76487+
76488+void
76489+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76490+ struct dentry *old_dentry,
76491+ struct dentry *new_dentry,
76492+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76493+{
76494+ return;
76495+}
76496+
76497+int
76498+gr_search_socket(const int family, const int type, const int protocol)
76499+{
76500+ return 1;
76501+}
76502+
76503+int
76504+gr_search_connectbind(const int mode, const struct socket *sock,
76505+ const struct sockaddr_in *addr)
76506+{
76507+ return 0;
76508+}
76509+
76510+void
76511+gr_handle_alertkill(struct task_struct *task)
76512+{
76513+ return;
76514+}
76515+
76516+__u32
76517+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76518+{
76519+ return 1;
76520+}
76521+
76522+__u32
76523+gr_acl_handle_hidden_file(const struct dentry * dentry,
76524+ const struct vfsmount * mnt)
76525+{
76526+ return 1;
76527+}
76528+
76529+__u32
76530+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76531+ int acc_mode)
76532+{
76533+ return 1;
76534+}
76535+
76536+__u32
76537+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76538+{
76539+ return 1;
76540+}
76541+
76542+__u32
76543+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76544+{
76545+ return 1;
76546+}
76547+
76548+int
76549+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76550+ unsigned int *vm_flags)
76551+{
76552+ return 1;
76553+}
76554+
76555+__u32
76556+gr_acl_handle_truncate(const struct dentry * dentry,
76557+ const struct vfsmount * mnt)
76558+{
76559+ return 1;
76560+}
76561+
76562+__u32
76563+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76564+{
76565+ return 1;
76566+}
76567+
76568+__u32
76569+gr_acl_handle_access(const struct dentry * dentry,
76570+ const struct vfsmount * mnt, const int fmode)
76571+{
76572+ return 1;
76573+}
76574+
76575+__u32
76576+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76577+ umode_t *mode)
76578+{
76579+ return 1;
76580+}
76581+
76582+__u32
76583+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76584+{
76585+ return 1;
76586+}
76587+
76588+__u32
76589+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76590+{
76591+ return 1;
76592+}
76593+
76594+__u32
76595+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76596+{
76597+ return 1;
76598+}
76599+
76600+void
76601+grsecurity_init(void)
76602+{
76603+ return;
76604+}
76605+
76606+umode_t gr_acl_umask(void)
76607+{
76608+ return 0;
76609+}
76610+
76611+__u32
76612+gr_acl_handle_mknod(const struct dentry * new_dentry,
76613+ const struct dentry * parent_dentry,
76614+ const struct vfsmount * parent_mnt,
76615+ const int mode)
76616+{
76617+ return 1;
76618+}
76619+
76620+__u32
76621+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76622+ const struct dentry * parent_dentry,
76623+ const struct vfsmount * parent_mnt)
76624+{
76625+ return 1;
76626+}
76627+
76628+__u32
76629+gr_acl_handle_symlink(const struct dentry * new_dentry,
76630+ const struct dentry * parent_dentry,
76631+ const struct vfsmount * parent_mnt, const struct filename *from)
76632+{
76633+ return 1;
76634+}
76635+
76636+__u32
76637+gr_acl_handle_link(const struct dentry * new_dentry,
76638+ const struct dentry * parent_dentry,
76639+ const struct vfsmount * parent_mnt,
76640+ const struct dentry * old_dentry,
76641+ const struct vfsmount * old_mnt, const struct filename *to)
76642+{
76643+ return 1;
76644+}
76645+
76646+int
76647+gr_acl_handle_rename(const struct dentry *new_dentry,
76648+ const struct dentry *parent_dentry,
76649+ const struct vfsmount *parent_mnt,
76650+ const struct dentry *old_dentry,
76651+ const struct inode *old_parent_inode,
76652+ const struct vfsmount *old_mnt, const struct filename *newname,
76653+ unsigned int flags)
76654+{
76655+ return 0;
76656+}
76657+
76658+int
76659+gr_acl_handle_filldir(const struct file *file, const char *name,
76660+ const int namelen, const u64 ino)
76661+{
76662+ return 1;
76663+}
76664+
76665+int
76666+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76667+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76668+{
76669+ return 1;
76670+}
76671+
76672+int
76673+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76674+{
76675+ return 0;
76676+}
76677+
76678+int
76679+gr_search_accept(const struct socket *sock)
76680+{
76681+ return 0;
76682+}
76683+
76684+int
76685+gr_search_listen(const struct socket *sock)
76686+{
76687+ return 0;
76688+}
76689+
76690+int
76691+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76692+{
76693+ return 0;
76694+}
76695+
76696+__u32
76697+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76698+{
76699+ return 1;
76700+}
76701+
76702+__u32
76703+gr_acl_handle_creat(const struct dentry * dentry,
76704+ const struct dentry * p_dentry,
76705+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76706+ const int imode)
76707+{
76708+ return 1;
76709+}
76710+
76711+void
76712+gr_acl_handle_exit(void)
76713+{
76714+ return;
76715+}
76716+
76717+int
76718+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76719+{
76720+ return 1;
76721+}
76722+
76723+void
76724+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76725+{
76726+ return;
76727+}
76728+
76729+int
76730+gr_acl_handle_procpidmem(const struct task_struct *task)
76731+{
76732+ return 0;
76733+}
76734+
76735+int
76736+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76737+{
76738+ return 0;
76739+}
76740+
76741+int
76742+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76743+{
76744+ return 0;
76745+}
76746+
76747+int
76748+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76749+{
76750+ return 0;
76751+}
76752+
76753+int
76754+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76755+{
76756+ return 0;
76757+}
76758+
76759+int gr_acl_enable_at_secure(void)
76760+{
76761+ return 0;
76762+}
76763+
76764+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76765+{
76766+ return dentry->d_sb->s_dev;
76767+}
76768+
76769+u64 gr_get_ino_from_dentry(struct dentry *dentry)
76770+{
76771+ return dentry->d_inode->i_ino;
76772+}
76773+
76774+void gr_put_exec_file(struct task_struct *task)
76775+{
76776+ return;
76777+}
76778+
76779+#ifdef CONFIG_SECURITY
76780+EXPORT_SYMBOL_GPL(gr_check_user_change);
76781+EXPORT_SYMBOL_GPL(gr_check_group_change);
76782+#endif
76783diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76784new file mode 100644
76785index 0000000..14638ff
76786--- /dev/null
76787+++ b/grsecurity/grsec_exec.c
76788@@ -0,0 +1,188 @@
76789+#include <linux/kernel.h>
76790+#include <linux/sched.h>
76791+#include <linux/file.h>
76792+#include <linux/binfmts.h>
76793+#include <linux/fs.h>
76794+#include <linux/types.h>
76795+#include <linux/grdefs.h>
76796+#include <linux/grsecurity.h>
76797+#include <linux/grinternal.h>
76798+#include <linux/capability.h>
76799+#include <linux/module.h>
76800+#include <linux/compat.h>
76801+
76802+#include <asm/uaccess.h>
76803+
76804+#ifdef CONFIG_GRKERNSEC_EXECLOG
76805+static char gr_exec_arg_buf[132];
76806+static DEFINE_MUTEX(gr_exec_arg_mutex);
76807+#endif
76808+
76809+struct user_arg_ptr {
76810+#ifdef CONFIG_COMPAT
76811+ bool is_compat;
76812+#endif
76813+ union {
76814+ const char __user *const __user *native;
76815+#ifdef CONFIG_COMPAT
76816+ const compat_uptr_t __user *compat;
76817+#endif
76818+ } ptr;
76819+};
76820+
76821+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76822+
76823+void
76824+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76825+{
76826+#ifdef CONFIG_GRKERNSEC_EXECLOG
76827+ char *grarg = gr_exec_arg_buf;
76828+ unsigned int i, x, execlen = 0;
76829+ char c;
76830+
76831+ if (!((grsec_enable_execlog && grsec_enable_group &&
76832+ in_group_p(grsec_audit_gid))
76833+ || (grsec_enable_execlog && !grsec_enable_group)))
76834+ return;
76835+
76836+ mutex_lock(&gr_exec_arg_mutex);
76837+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76838+
76839+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76840+ const char __user *p;
76841+ unsigned int len;
76842+
76843+ p = get_user_arg_ptr(argv, i);
76844+ if (IS_ERR(p))
76845+ goto log;
76846+
76847+ len = strnlen_user(p, 128 - execlen);
76848+ if (len > 128 - execlen)
76849+ len = 128 - execlen;
76850+ else if (len > 0)
76851+ len--;
76852+ if (copy_from_user(grarg + execlen, p, len))
76853+ goto log;
76854+
76855+ /* rewrite unprintable characters */
76856+ for (x = 0; x < len; x++) {
76857+ c = *(grarg + execlen + x);
76858+ if (c < 32 || c > 126)
76859+ *(grarg + execlen + x) = ' ';
76860+ }
76861+
76862+ execlen += len;
76863+ *(grarg + execlen) = ' ';
76864+ *(grarg + execlen + 1) = '\0';
76865+ execlen++;
76866+ }
76867+
76868+ log:
76869+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76870+ bprm->file->f_path.mnt, grarg);
76871+ mutex_unlock(&gr_exec_arg_mutex);
76872+#endif
76873+ return;
76874+}
76875+
76876+#ifdef CONFIG_GRKERNSEC
76877+extern int gr_acl_is_capable(const int cap);
76878+extern int gr_acl_is_capable_nolog(const int cap);
76879+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76880+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
76881+extern int gr_chroot_is_capable(const int cap);
76882+extern int gr_chroot_is_capable_nolog(const int cap);
76883+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76884+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
76885+#endif
76886+
76887+const char *captab_log[] = {
76888+ "CAP_CHOWN",
76889+ "CAP_DAC_OVERRIDE",
76890+ "CAP_DAC_READ_SEARCH",
76891+ "CAP_FOWNER",
76892+ "CAP_FSETID",
76893+ "CAP_KILL",
76894+ "CAP_SETGID",
76895+ "CAP_SETUID",
76896+ "CAP_SETPCAP",
76897+ "CAP_LINUX_IMMUTABLE",
76898+ "CAP_NET_BIND_SERVICE",
76899+ "CAP_NET_BROADCAST",
76900+ "CAP_NET_ADMIN",
76901+ "CAP_NET_RAW",
76902+ "CAP_IPC_LOCK",
76903+ "CAP_IPC_OWNER",
76904+ "CAP_SYS_MODULE",
76905+ "CAP_SYS_RAWIO",
76906+ "CAP_SYS_CHROOT",
76907+ "CAP_SYS_PTRACE",
76908+ "CAP_SYS_PACCT",
76909+ "CAP_SYS_ADMIN",
76910+ "CAP_SYS_BOOT",
76911+ "CAP_SYS_NICE",
76912+ "CAP_SYS_RESOURCE",
76913+ "CAP_SYS_TIME",
76914+ "CAP_SYS_TTY_CONFIG",
76915+ "CAP_MKNOD",
76916+ "CAP_LEASE",
76917+ "CAP_AUDIT_WRITE",
76918+ "CAP_AUDIT_CONTROL",
76919+ "CAP_SETFCAP",
76920+ "CAP_MAC_OVERRIDE",
76921+ "CAP_MAC_ADMIN",
76922+ "CAP_SYSLOG",
76923+ "CAP_WAKE_ALARM",
76924+ "CAP_BLOCK_SUSPEND"
76925+};
76926+
76927+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
76928+
76929+int gr_is_capable(const int cap)
76930+{
76931+#ifdef CONFIG_GRKERNSEC
76932+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
76933+ return 1;
76934+ return 0;
76935+#else
76936+ return 1;
76937+#endif
76938+}
76939+
76940+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76941+{
76942+#ifdef CONFIG_GRKERNSEC
76943+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
76944+ return 1;
76945+ return 0;
76946+#else
76947+ return 1;
76948+#endif
76949+}
76950+
76951+int gr_is_capable_nolog(const int cap)
76952+{
76953+#ifdef CONFIG_GRKERNSEC
76954+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
76955+ return 1;
76956+ return 0;
76957+#else
76958+ return 1;
76959+#endif
76960+}
76961+
76962+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
76963+{
76964+#ifdef CONFIG_GRKERNSEC
76965+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
76966+ return 1;
76967+ return 0;
76968+#else
76969+ return 1;
76970+#endif
76971+}
76972+
76973+EXPORT_SYMBOL_GPL(gr_is_capable);
76974+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
76975+EXPORT_SYMBOL_GPL(gr_task_is_capable);
76976+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
76977diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
76978new file mode 100644
76979index 0000000..06cc6ea
76980--- /dev/null
76981+++ b/grsecurity/grsec_fifo.c
76982@@ -0,0 +1,24 @@
76983+#include <linux/kernel.h>
76984+#include <linux/sched.h>
76985+#include <linux/fs.h>
76986+#include <linux/file.h>
76987+#include <linux/grinternal.h>
76988+
76989+int
76990+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
76991+ const struct dentry *dir, const int flag, const int acc_mode)
76992+{
76993+#ifdef CONFIG_GRKERNSEC_FIFO
76994+ const struct cred *cred = current_cred();
76995+
76996+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
76997+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
76998+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
76999+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77000+ if (!inode_permission(dentry->d_inode, acc_mode))
77001+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77002+ return -EACCES;
77003+ }
77004+#endif
77005+ return 0;
77006+}
77007diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77008new file mode 100644
77009index 0000000..8ca18bf
77010--- /dev/null
77011+++ b/grsecurity/grsec_fork.c
77012@@ -0,0 +1,23 @@
77013+#include <linux/kernel.h>
77014+#include <linux/sched.h>
77015+#include <linux/grsecurity.h>
77016+#include <linux/grinternal.h>
77017+#include <linux/errno.h>
77018+
77019+void
77020+gr_log_forkfail(const int retval)
77021+{
77022+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77023+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77024+ switch (retval) {
77025+ case -EAGAIN:
77026+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77027+ break;
77028+ case -ENOMEM:
77029+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77030+ break;
77031+ }
77032+ }
77033+#endif
77034+ return;
77035+}
77036diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77037new file mode 100644
77038index 0000000..4ed9e7d
77039--- /dev/null
77040+++ b/grsecurity/grsec_init.c
77041@@ -0,0 +1,290 @@
77042+#include <linux/kernel.h>
77043+#include <linux/sched.h>
77044+#include <linux/mm.h>
77045+#include <linux/gracl.h>
77046+#include <linux/slab.h>
77047+#include <linux/vmalloc.h>
77048+#include <linux/percpu.h>
77049+#include <linux/module.h>
77050+
77051+int grsec_enable_ptrace_readexec;
77052+int grsec_enable_setxid;
77053+int grsec_enable_symlinkown;
77054+kgid_t grsec_symlinkown_gid;
77055+int grsec_enable_brute;
77056+int grsec_enable_link;
77057+int grsec_enable_dmesg;
77058+int grsec_enable_harden_ptrace;
77059+int grsec_enable_harden_ipc;
77060+int grsec_enable_fifo;
77061+int grsec_enable_execlog;
77062+int grsec_enable_signal;
77063+int grsec_enable_forkfail;
77064+int grsec_enable_audit_ptrace;
77065+int grsec_enable_time;
77066+int grsec_enable_group;
77067+kgid_t grsec_audit_gid;
77068+int grsec_enable_chdir;
77069+int grsec_enable_mount;
77070+int grsec_enable_rofs;
77071+int grsec_deny_new_usb;
77072+int grsec_enable_chroot_findtask;
77073+int grsec_enable_chroot_mount;
77074+int grsec_enable_chroot_shmat;
77075+int grsec_enable_chroot_fchdir;
77076+int grsec_enable_chroot_double;
77077+int grsec_enable_chroot_pivot;
77078+int grsec_enable_chroot_chdir;
77079+int grsec_enable_chroot_chmod;
77080+int grsec_enable_chroot_mknod;
77081+int grsec_enable_chroot_nice;
77082+int grsec_enable_chroot_execlog;
77083+int grsec_enable_chroot_caps;
77084+int grsec_enable_chroot_rename;
77085+int grsec_enable_chroot_sysctl;
77086+int grsec_enable_chroot_unix;
77087+int grsec_enable_tpe;
77088+kgid_t grsec_tpe_gid;
77089+int grsec_enable_blackhole;
77090+#ifdef CONFIG_IPV6_MODULE
77091+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77092+#endif
77093+int grsec_lastack_retries;
77094+int grsec_enable_tpe_all;
77095+int grsec_enable_tpe_invert;
77096+int grsec_enable_socket_all;
77097+kgid_t grsec_socket_all_gid;
77098+int grsec_enable_socket_client;
77099+kgid_t grsec_socket_client_gid;
77100+int grsec_enable_socket_server;
77101+kgid_t grsec_socket_server_gid;
77102+int grsec_resource_logging;
77103+int grsec_disable_privio;
77104+int grsec_enable_log_rwxmaps;
77105+int grsec_lock;
77106+
77107+DEFINE_SPINLOCK(grsec_alert_lock);
77108+unsigned long grsec_alert_wtime = 0;
77109+unsigned long grsec_alert_fyet = 0;
77110+
77111+DEFINE_SPINLOCK(grsec_audit_lock);
77112+
77113+DEFINE_RWLOCK(grsec_exec_file_lock);
77114+
77115+char *gr_shared_page[4];
77116+
77117+char *gr_alert_log_fmt;
77118+char *gr_audit_log_fmt;
77119+char *gr_alert_log_buf;
77120+char *gr_audit_log_buf;
77121+
77122+extern struct gr_arg *gr_usermode;
77123+extern unsigned char *gr_system_salt;
77124+extern unsigned char *gr_system_sum;
77125+
77126+void __init
77127+grsecurity_init(void)
77128+{
77129+ int j;
77130+ /* create the per-cpu shared pages */
77131+
77132+#ifdef CONFIG_X86
77133+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77134+#endif
77135+
77136+ for (j = 0; j < 4; j++) {
77137+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77138+ if (gr_shared_page[j] == NULL) {
77139+ panic("Unable to allocate grsecurity shared page");
77140+ return;
77141+ }
77142+ }
77143+
77144+ /* allocate log buffers */
77145+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77146+ if (!gr_alert_log_fmt) {
77147+ panic("Unable to allocate grsecurity alert log format buffer");
77148+ return;
77149+ }
77150+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77151+ if (!gr_audit_log_fmt) {
77152+ panic("Unable to allocate grsecurity audit log format buffer");
77153+ return;
77154+ }
77155+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77156+ if (!gr_alert_log_buf) {
77157+ panic("Unable to allocate grsecurity alert log buffer");
77158+ return;
77159+ }
77160+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77161+ if (!gr_audit_log_buf) {
77162+ panic("Unable to allocate grsecurity audit log buffer");
77163+ return;
77164+ }
77165+
77166+ /* allocate memory for authentication structure */
77167+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77168+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77169+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77170+
77171+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77172+ panic("Unable to allocate grsecurity authentication structure");
77173+ return;
77174+ }
77175+
77176+#ifdef CONFIG_GRKERNSEC_IO
77177+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77178+ grsec_disable_privio = 1;
77179+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77180+ grsec_disable_privio = 1;
77181+#else
77182+ grsec_disable_privio = 0;
77183+#endif
77184+#endif
77185+
77186+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77187+ /* for backward compatibility, tpe_invert always defaults to on if
77188+ enabled in the kernel
77189+ */
77190+ grsec_enable_tpe_invert = 1;
77191+#endif
77192+
77193+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77194+#ifndef CONFIG_GRKERNSEC_SYSCTL
77195+ grsec_lock = 1;
77196+#endif
77197+
77198+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77199+ grsec_enable_log_rwxmaps = 1;
77200+#endif
77201+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77202+ grsec_enable_group = 1;
77203+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77204+#endif
77205+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77206+ grsec_enable_ptrace_readexec = 1;
77207+#endif
77208+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77209+ grsec_enable_chdir = 1;
77210+#endif
77211+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77212+ grsec_enable_harden_ptrace = 1;
77213+#endif
77214+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77215+ grsec_enable_harden_ipc = 1;
77216+#endif
77217+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77218+ grsec_enable_mount = 1;
77219+#endif
77220+#ifdef CONFIG_GRKERNSEC_LINK
77221+ grsec_enable_link = 1;
77222+#endif
77223+#ifdef CONFIG_GRKERNSEC_BRUTE
77224+ grsec_enable_brute = 1;
77225+#endif
77226+#ifdef CONFIG_GRKERNSEC_DMESG
77227+ grsec_enable_dmesg = 1;
77228+#endif
77229+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77230+ grsec_enable_blackhole = 1;
77231+ grsec_lastack_retries = 4;
77232+#endif
77233+#ifdef CONFIG_GRKERNSEC_FIFO
77234+ grsec_enable_fifo = 1;
77235+#endif
77236+#ifdef CONFIG_GRKERNSEC_EXECLOG
77237+ grsec_enable_execlog = 1;
77238+#endif
77239+#ifdef CONFIG_GRKERNSEC_SETXID
77240+ grsec_enable_setxid = 1;
77241+#endif
77242+#ifdef CONFIG_GRKERNSEC_SIGNAL
77243+ grsec_enable_signal = 1;
77244+#endif
77245+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77246+ grsec_enable_forkfail = 1;
77247+#endif
77248+#ifdef CONFIG_GRKERNSEC_TIME
77249+ grsec_enable_time = 1;
77250+#endif
77251+#ifdef CONFIG_GRKERNSEC_RESLOG
77252+ grsec_resource_logging = 1;
77253+#endif
77254+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77255+ grsec_enable_chroot_findtask = 1;
77256+#endif
77257+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77258+ grsec_enable_chroot_unix = 1;
77259+#endif
77260+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77261+ grsec_enable_chroot_mount = 1;
77262+#endif
77263+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77264+ grsec_enable_chroot_fchdir = 1;
77265+#endif
77266+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77267+ grsec_enable_chroot_shmat = 1;
77268+#endif
77269+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77270+ grsec_enable_audit_ptrace = 1;
77271+#endif
77272+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77273+ grsec_enable_chroot_double = 1;
77274+#endif
77275+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77276+ grsec_enable_chroot_pivot = 1;
77277+#endif
77278+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77279+ grsec_enable_chroot_chdir = 1;
77280+#endif
77281+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77282+ grsec_enable_chroot_chmod = 1;
77283+#endif
77284+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77285+ grsec_enable_chroot_mknod = 1;
77286+#endif
77287+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77288+ grsec_enable_chroot_nice = 1;
77289+#endif
77290+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77291+ grsec_enable_chroot_execlog = 1;
77292+#endif
77293+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77294+ grsec_enable_chroot_caps = 1;
77295+#endif
77296+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77297+ grsec_enable_chroot_rename = 1;
77298+#endif
77299+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77300+ grsec_enable_chroot_sysctl = 1;
77301+#endif
77302+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77303+ grsec_enable_symlinkown = 1;
77304+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77305+#endif
77306+#ifdef CONFIG_GRKERNSEC_TPE
77307+ grsec_enable_tpe = 1;
77308+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77309+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77310+ grsec_enable_tpe_all = 1;
77311+#endif
77312+#endif
77313+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77314+ grsec_enable_socket_all = 1;
77315+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77316+#endif
77317+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77318+ grsec_enable_socket_client = 1;
77319+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77320+#endif
77321+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77322+ grsec_enable_socket_server = 1;
77323+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77324+#endif
77325+#endif
77326+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77327+ grsec_deny_new_usb = 1;
77328+#endif
77329+
77330+ return;
77331+}
77332diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77333new file mode 100644
77334index 0000000..1773300
77335--- /dev/null
77336+++ b/grsecurity/grsec_ipc.c
77337@@ -0,0 +1,48 @@
77338+#include <linux/kernel.h>
77339+#include <linux/mm.h>
77340+#include <linux/sched.h>
77341+#include <linux/file.h>
77342+#include <linux/ipc.h>
77343+#include <linux/ipc_namespace.h>
77344+#include <linux/grsecurity.h>
77345+#include <linux/grinternal.h>
77346+
77347+int
77348+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77349+{
77350+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77351+ int write;
77352+ int orig_granted_mode;
77353+ kuid_t euid;
77354+ kgid_t egid;
77355+
77356+ if (!grsec_enable_harden_ipc)
77357+ return 1;
77358+
77359+ euid = current_euid();
77360+ egid = current_egid();
77361+
77362+ write = requested_mode & 00002;
77363+ orig_granted_mode = ipcp->mode;
77364+
77365+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77366+ orig_granted_mode >>= 6;
77367+ else {
77368+ /* if likely wrong permissions, lock to user */
77369+ if (orig_granted_mode & 0007)
77370+ orig_granted_mode = 0;
77371+ /* otherwise do a egid-only check */
77372+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77373+ orig_granted_mode >>= 3;
77374+ /* otherwise, no access */
77375+ else
77376+ orig_granted_mode = 0;
77377+ }
77378+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77379+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77380+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77381+ return 0;
77382+ }
77383+#endif
77384+ return 1;
77385+}
77386diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77387new file mode 100644
77388index 0000000..5e05e20
77389--- /dev/null
77390+++ b/grsecurity/grsec_link.c
77391@@ -0,0 +1,58 @@
77392+#include <linux/kernel.h>
77393+#include <linux/sched.h>
77394+#include <linux/fs.h>
77395+#include <linux/file.h>
77396+#include <linux/grinternal.h>
77397+
77398+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77399+{
77400+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77401+ const struct inode *link_inode = link->dentry->d_inode;
77402+
77403+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77404+ /* ignore root-owned links, e.g. /proc/self */
77405+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77406+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77407+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77408+ return 1;
77409+ }
77410+#endif
77411+ return 0;
77412+}
77413+
77414+int
77415+gr_handle_follow_link(const struct inode *parent,
77416+ const struct inode *inode,
77417+ const struct dentry *dentry, const struct vfsmount *mnt)
77418+{
77419+#ifdef CONFIG_GRKERNSEC_LINK
77420+ const struct cred *cred = current_cred();
77421+
77422+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77423+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77424+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77425+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77426+ return -EACCES;
77427+ }
77428+#endif
77429+ return 0;
77430+}
77431+
77432+int
77433+gr_handle_hardlink(const struct dentry *dentry,
77434+ const struct vfsmount *mnt,
77435+ struct inode *inode, const int mode, const struct filename *to)
77436+{
77437+#ifdef CONFIG_GRKERNSEC_LINK
77438+ const struct cred *cred = current_cred();
77439+
77440+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77441+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77442+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77443+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77444+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77445+ return -EPERM;
77446+ }
77447+#endif
77448+ return 0;
77449+}
77450diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77451new file mode 100644
77452index 0000000..dbe0a6b
77453--- /dev/null
77454+++ b/grsecurity/grsec_log.c
77455@@ -0,0 +1,341 @@
77456+#include <linux/kernel.h>
77457+#include <linux/sched.h>
77458+#include <linux/file.h>
77459+#include <linux/tty.h>
77460+#include <linux/fs.h>
77461+#include <linux/mm.h>
77462+#include <linux/grinternal.h>
77463+
77464+#ifdef CONFIG_TREE_PREEMPT_RCU
77465+#define DISABLE_PREEMPT() preempt_disable()
77466+#define ENABLE_PREEMPT() preempt_enable()
77467+#else
77468+#define DISABLE_PREEMPT()
77469+#define ENABLE_PREEMPT()
77470+#endif
77471+
77472+#define BEGIN_LOCKS(x) \
77473+ DISABLE_PREEMPT(); \
77474+ rcu_read_lock(); \
77475+ read_lock(&tasklist_lock); \
77476+ read_lock(&grsec_exec_file_lock); \
77477+ if (x != GR_DO_AUDIT) \
77478+ spin_lock(&grsec_alert_lock); \
77479+ else \
77480+ spin_lock(&grsec_audit_lock)
77481+
77482+#define END_LOCKS(x) \
77483+ if (x != GR_DO_AUDIT) \
77484+ spin_unlock(&grsec_alert_lock); \
77485+ else \
77486+ spin_unlock(&grsec_audit_lock); \
77487+ read_unlock(&grsec_exec_file_lock); \
77488+ read_unlock(&tasklist_lock); \
77489+ rcu_read_unlock(); \
77490+ ENABLE_PREEMPT(); \
77491+ if (x == GR_DONT_AUDIT) \
77492+ gr_handle_alertkill(current)
77493+
77494+enum {
77495+ FLOODING,
77496+ NO_FLOODING
77497+};
77498+
77499+extern char *gr_alert_log_fmt;
77500+extern char *gr_audit_log_fmt;
77501+extern char *gr_alert_log_buf;
77502+extern char *gr_audit_log_buf;
77503+
77504+static int gr_log_start(int audit)
77505+{
77506+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77507+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77508+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77509+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77510+ unsigned long curr_secs = get_seconds();
77511+
77512+ if (audit == GR_DO_AUDIT)
77513+ goto set_fmt;
77514+
77515+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77516+ grsec_alert_wtime = curr_secs;
77517+ grsec_alert_fyet = 0;
77518+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77519+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77520+ grsec_alert_fyet++;
77521+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77522+ grsec_alert_wtime = curr_secs;
77523+ grsec_alert_fyet++;
77524+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77525+ return FLOODING;
77526+ }
77527+ else return FLOODING;
77528+
77529+set_fmt:
77530+#endif
77531+ memset(buf, 0, PAGE_SIZE);
77532+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77533+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77534+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77535+ } else if (current->signal->curr_ip) {
77536+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77537+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77538+ } else if (gr_acl_is_enabled()) {
77539+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77540+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77541+ } else {
77542+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77543+ strcpy(buf, fmt);
77544+ }
77545+
77546+ return NO_FLOODING;
77547+}
77548+
77549+static void gr_log_middle(int audit, const char *msg, va_list ap)
77550+ __attribute__ ((format (printf, 2, 0)));
77551+
77552+static void gr_log_middle(int audit, const char *msg, va_list ap)
77553+{
77554+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77555+ unsigned int len = strlen(buf);
77556+
77557+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77558+
77559+ return;
77560+}
77561+
77562+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77563+ __attribute__ ((format (printf, 2, 3)));
77564+
77565+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77566+{
77567+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77568+ unsigned int len = strlen(buf);
77569+ va_list ap;
77570+
77571+ va_start(ap, msg);
77572+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77573+ va_end(ap);
77574+
77575+ return;
77576+}
77577+
77578+static void gr_log_end(int audit, int append_default)
77579+{
77580+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77581+ if (append_default) {
77582+ struct task_struct *task = current;
77583+ struct task_struct *parent = task->real_parent;
77584+ const struct cred *cred = __task_cred(task);
77585+ const struct cred *pcred = __task_cred(parent);
77586+ unsigned int len = strlen(buf);
77587+
77588+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77589+ }
77590+
77591+ printk("%s\n", buf);
77592+
77593+ return;
77594+}
77595+
77596+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77597+{
77598+ int logtype;
77599+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77600+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77601+ void *voidptr = NULL;
77602+ int num1 = 0, num2 = 0;
77603+ unsigned long ulong1 = 0, ulong2 = 0;
77604+ struct dentry *dentry = NULL;
77605+ struct vfsmount *mnt = NULL;
77606+ struct file *file = NULL;
77607+ struct task_struct *task = NULL;
77608+ struct vm_area_struct *vma = NULL;
77609+ const struct cred *cred, *pcred;
77610+ va_list ap;
77611+
77612+ BEGIN_LOCKS(audit);
77613+ logtype = gr_log_start(audit);
77614+ if (logtype == FLOODING) {
77615+ END_LOCKS(audit);
77616+ return;
77617+ }
77618+ va_start(ap, argtypes);
77619+ switch (argtypes) {
77620+ case GR_TTYSNIFF:
77621+ task = va_arg(ap, struct task_struct *);
77622+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77623+ break;
77624+ case GR_SYSCTL_HIDDEN:
77625+ str1 = va_arg(ap, char *);
77626+ gr_log_middle_varargs(audit, msg, result, str1);
77627+ break;
77628+ case GR_RBAC:
77629+ dentry = va_arg(ap, struct dentry *);
77630+ mnt = va_arg(ap, struct vfsmount *);
77631+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77632+ break;
77633+ case GR_RBAC_STR:
77634+ dentry = va_arg(ap, struct dentry *);
77635+ mnt = va_arg(ap, struct vfsmount *);
77636+ str1 = va_arg(ap, char *);
77637+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77638+ break;
77639+ case GR_STR_RBAC:
77640+ str1 = va_arg(ap, char *);
77641+ dentry = va_arg(ap, struct dentry *);
77642+ mnt = va_arg(ap, struct vfsmount *);
77643+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77644+ break;
77645+ case GR_RBAC_MODE2:
77646+ dentry = va_arg(ap, struct dentry *);
77647+ mnt = va_arg(ap, struct vfsmount *);
77648+ str1 = va_arg(ap, char *);
77649+ str2 = va_arg(ap, char *);
77650+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77651+ break;
77652+ case GR_RBAC_MODE3:
77653+ dentry = va_arg(ap, struct dentry *);
77654+ mnt = va_arg(ap, struct vfsmount *);
77655+ str1 = va_arg(ap, char *);
77656+ str2 = va_arg(ap, char *);
77657+ str3 = va_arg(ap, char *);
77658+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77659+ break;
77660+ case GR_FILENAME:
77661+ dentry = va_arg(ap, struct dentry *);
77662+ mnt = va_arg(ap, struct vfsmount *);
77663+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77664+ break;
77665+ case GR_STR_FILENAME:
77666+ str1 = va_arg(ap, char *);
77667+ dentry = va_arg(ap, struct dentry *);
77668+ mnt = va_arg(ap, struct vfsmount *);
77669+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77670+ break;
77671+ case GR_FILENAME_STR:
77672+ dentry = va_arg(ap, struct dentry *);
77673+ mnt = va_arg(ap, struct vfsmount *);
77674+ str1 = va_arg(ap, char *);
77675+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77676+ break;
77677+ case GR_FILENAME_TWO_INT:
77678+ dentry = va_arg(ap, struct dentry *);
77679+ mnt = va_arg(ap, struct vfsmount *);
77680+ num1 = va_arg(ap, int);
77681+ num2 = va_arg(ap, int);
77682+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77683+ break;
77684+ case GR_FILENAME_TWO_INT_STR:
77685+ dentry = va_arg(ap, struct dentry *);
77686+ mnt = va_arg(ap, struct vfsmount *);
77687+ num1 = va_arg(ap, int);
77688+ num2 = va_arg(ap, int);
77689+ str1 = va_arg(ap, char *);
77690+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77691+ break;
77692+ case GR_TEXTREL:
77693+ file = va_arg(ap, struct file *);
77694+ ulong1 = va_arg(ap, unsigned long);
77695+ ulong2 = va_arg(ap, unsigned long);
77696+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77697+ break;
77698+ case GR_PTRACE:
77699+ task = va_arg(ap, struct task_struct *);
77700+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77701+ break;
77702+ case GR_RESOURCE:
77703+ task = va_arg(ap, struct task_struct *);
77704+ cred = __task_cred(task);
77705+ pcred = __task_cred(task->real_parent);
77706+ ulong1 = va_arg(ap, unsigned long);
77707+ str1 = va_arg(ap, char *);
77708+ ulong2 = va_arg(ap, unsigned long);
77709+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77710+ break;
77711+ case GR_CAP:
77712+ task = va_arg(ap, struct task_struct *);
77713+ cred = __task_cred(task);
77714+ pcred = __task_cred(task->real_parent);
77715+ str1 = va_arg(ap, char *);
77716+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77717+ break;
77718+ case GR_SIG:
77719+ str1 = va_arg(ap, char *);
77720+ voidptr = va_arg(ap, void *);
77721+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77722+ break;
77723+ case GR_SIG2:
77724+ task = va_arg(ap, struct task_struct *);
77725+ cred = __task_cred(task);
77726+ pcred = __task_cred(task->real_parent);
77727+ num1 = va_arg(ap, int);
77728+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77729+ break;
77730+ case GR_CRASH1:
77731+ task = va_arg(ap, struct task_struct *);
77732+ cred = __task_cred(task);
77733+ pcred = __task_cred(task->real_parent);
77734+ ulong1 = va_arg(ap, unsigned long);
77735+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77736+ break;
77737+ case GR_CRASH2:
77738+ task = va_arg(ap, struct task_struct *);
77739+ cred = __task_cred(task);
77740+ pcred = __task_cred(task->real_parent);
77741+ ulong1 = va_arg(ap, unsigned long);
77742+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77743+ break;
77744+ case GR_RWXMAP:
77745+ file = va_arg(ap, struct file *);
77746+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77747+ break;
77748+ case GR_RWXMAPVMA:
77749+ vma = va_arg(ap, struct vm_area_struct *);
77750+ if (vma->vm_file)
77751+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77752+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77753+ str1 = "<stack>";
77754+ else if (vma->vm_start <= current->mm->brk &&
77755+ vma->vm_end >= current->mm->start_brk)
77756+ str1 = "<heap>";
77757+ else
77758+ str1 = "<anonymous mapping>";
77759+ gr_log_middle_varargs(audit, msg, str1);
77760+ break;
77761+ case GR_PSACCT:
77762+ {
77763+ unsigned int wday, cday;
77764+ __u8 whr, chr;
77765+ __u8 wmin, cmin;
77766+ __u8 wsec, csec;
77767+ char cur_tty[64] = { 0 };
77768+ char parent_tty[64] = { 0 };
77769+
77770+ task = va_arg(ap, struct task_struct *);
77771+ wday = va_arg(ap, unsigned int);
77772+ cday = va_arg(ap, unsigned int);
77773+ whr = va_arg(ap, int);
77774+ chr = va_arg(ap, int);
77775+ wmin = va_arg(ap, int);
77776+ cmin = va_arg(ap, int);
77777+ wsec = va_arg(ap, int);
77778+ csec = va_arg(ap, int);
77779+ ulong1 = va_arg(ap, unsigned long);
77780+ cred = __task_cred(task);
77781+ pcred = __task_cred(task->real_parent);
77782+
77783+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77784+ }
77785+ break;
77786+ default:
77787+ gr_log_middle(audit, msg, ap);
77788+ }
77789+ va_end(ap);
77790+ // these don't need DEFAULTSECARGS printed on the end
77791+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77792+ gr_log_end(audit, 0);
77793+ else
77794+ gr_log_end(audit, 1);
77795+ END_LOCKS(audit);
77796+}
77797diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77798new file mode 100644
77799index 0000000..0e39d8c
77800--- /dev/null
77801+++ b/grsecurity/grsec_mem.c
77802@@ -0,0 +1,48 @@
77803+#include <linux/kernel.h>
77804+#include <linux/sched.h>
77805+#include <linux/mm.h>
77806+#include <linux/mman.h>
77807+#include <linux/module.h>
77808+#include <linux/grinternal.h>
77809+
77810+void gr_handle_msr_write(void)
77811+{
77812+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77813+ return;
77814+}
77815+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77816+
77817+void
77818+gr_handle_ioperm(void)
77819+{
77820+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77821+ return;
77822+}
77823+
77824+void
77825+gr_handle_iopl(void)
77826+{
77827+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77828+ return;
77829+}
77830+
77831+void
77832+gr_handle_mem_readwrite(u64 from, u64 to)
77833+{
77834+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77835+ return;
77836+}
77837+
77838+void
77839+gr_handle_vm86(void)
77840+{
77841+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77842+ return;
77843+}
77844+
77845+void
77846+gr_log_badprocpid(const char *entry)
77847+{
77848+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77849+ return;
77850+}
77851diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77852new file mode 100644
77853index 0000000..6f9eb73
77854--- /dev/null
77855+++ b/grsecurity/grsec_mount.c
77856@@ -0,0 +1,65 @@
77857+#include <linux/kernel.h>
77858+#include <linux/sched.h>
77859+#include <linux/mount.h>
77860+#include <linux/major.h>
77861+#include <linux/grsecurity.h>
77862+#include <linux/grinternal.h>
77863+
77864+void
77865+gr_log_remount(const char *devname, const int retval)
77866+{
77867+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77868+ if (grsec_enable_mount && (retval >= 0))
77869+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77870+#endif
77871+ return;
77872+}
77873+
77874+void
77875+gr_log_unmount(const char *devname, const int retval)
77876+{
77877+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77878+ if (grsec_enable_mount && (retval >= 0))
77879+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77880+#endif
77881+ return;
77882+}
77883+
77884+void
77885+gr_log_mount(const char *from, struct path *to, const int retval)
77886+{
77887+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77888+ if (grsec_enable_mount && (retval >= 0))
77889+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
77890+#endif
77891+ return;
77892+}
77893+
77894+int
77895+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
77896+{
77897+#ifdef CONFIG_GRKERNSEC_ROFS
77898+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
77899+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
77900+ return -EPERM;
77901+ } else
77902+ return 0;
77903+#endif
77904+ return 0;
77905+}
77906+
77907+int
77908+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
77909+{
77910+#ifdef CONFIG_GRKERNSEC_ROFS
77911+ struct inode *inode = dentry->d_inode;
77912+
77913+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
77914+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
77915+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
77916+ return -EPERM;
77917+ } else
77918+ return 0;
77919+#endif
77920+ return 0;
77921+}
77922diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
77923new file mode 100644
77924index 0000000..6ee9d50
77925--- /dev/null
77926+++ b/grsecurity/grsec_pax.c
77927@@ -0,0 +1,45 @@
77928+#include <linux/kernel.h>
77929+#include <linux/sched.h>
77930+#include <linux/mm.h>
77931+#include <linux/file.h>
77932+#include <linux/grinternal.h>
77933+#include <linux/grsecurity.h>
77934+
77935+void
77936+gr_log_textrel(struct vm_area_struct * vma)
77937+{
77938+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77939+ if (grsec_enable_log_rwxmaps)
77940+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
77941+#endif
77942+ return;
77943+}
77944+
77945+void gr_log_ptgnustack(struct file *file)
77946+{
77947+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77948+ if (grsec_enable_log_rwxmaps)
77949+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
77950+#endif
77951+ return;
77952+}
77953+
77954+void
77955+gr_log_rwxmmap(struct file *file)
77956+{
77957+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77958+ if (grsec_enable_log_rwxmaps)
77959+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
77960+#endif
77961+ return;
77962+}
77963+
77964+void
77965+gr_log_rwxmprotect(struct vm_area_struct *vma)
77966+{
77967+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77968+ if (grsec_enable_log_rwxmaps)
77969+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
77970+#endif
77971+ return;
77972+}
77973diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
77974new file mode 100644
77975index 0000000..2005a3a
77976--- /dev/null
77977+++ b/grsecurity/grsec_proc.c
77978@@ -0,0 +1,20 @@
77979+#include <linux/kernel.h>
77980+#include <linux/sched.h>
77981+#include <linux/grsecurity.h>
77982+#include <linux/grinternal.h>
77983+
77984+int gr_proc_is_restricted(void)
77985+{
77986+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77987+ const struct cred *cred = current_cred();
77988+#endif
77989+
77990+#ifdef CONFIG_GRKERNSEC_PROC_USER
77991+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
77992+ return -EACCES;
77993+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77994+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
77995+ return -EACCES;
77996+#endif
77997+ return 0;
77998+}
77999diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78000new file mode 100644
78001index 0000000..f7f29aa
78002--- /dev/null
78003+++ b/grsecurity/grsec_ptrace.c
78004@@ -0,0 +1,30 @@
78005+#include <linux/kernel.h>
78006+#include <linux/sched.h>
78007+#include <linux/grinternal.h>
78008+#include <linux/security.h>
78009+
78010+void
78011+gr_audit_ptrace(struct task_struct *task)
78012+{
78013+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78014+ if (grsec_enable_audit_ptrace)
78015+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78016+#endif
78017+ return;
78018+}
78019+
78020+int
78021+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78022+{
78023+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78024+ const struct dentry *dentry = file->f_path.dentry;
78025+ const struct vfsmount *mnt = file->f_path.mnt;
78026+
78027+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78028+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78029+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78030+ return -EACCES;
78031+ }
78032+#endif
78033+ return 0;
78034+}
78035diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78036new file mode 100644
78037index 0000000..3860c7e
78038--- /dev/null
78039+++ b/grsecurity/grsec_sig.c
78040@@ -0,0 +1,236 @@
78041+#include <linux/kernel.h>
78042+#include <linux/sched.h>
78043+#include <linux/fs.h>
78044+#include <linux/delay.h>
78045+#include <linux/grsecurity.h>
78046+#include <linux/grinternal.h>
78047+#include <linux/hardirq.h>
78048+
78049+char *signames[] = {
78050+ [SIGSEGV] = "Segmentation fault",
78051+ [SIGILL] = "Illegal instruction",
78052+ [SIGABRT] = "Abort",
78053+ [SIGBUS] = "Invalid alignment/Bus error"
78054+};
78055+
78056+void
78057+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78058+{
78059+#ifdef CONFIG_GRKERNSEC_SIGNAL
78060+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78061+ (sig == SIGABRT) || (sig == SIGBUS))) {
78062+ if (task_pid_nr(t) == task_pid_nr(current)) {
78063+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78064+ } else {
78065+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78066+ }
78067+ }
78068+#endif
78069+ return;
78070+}
78071+
78072+int
78073+gr_handle_signal(const struct task_struct *p, const int sig)
78074+{
78075+#ifdef CONFIG_GRKERNSEC
78076+ /* ignore the 0 signal for protected task checks */
78077+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78078+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78079+ return -EPERM;
78080+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78081+ return -EPERM;
78082+ }
78083+#endif
78084+ return 0;
78085+}
78086+
78087+#ifdef CONFIG_GRKERNSEC
78088+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78089+
78090+int gr_fake_force_sig(int sig, struct task_struct *t)
78091+{
78092+ unsigned long int flags;
78093+ int ret, blocked, ignored;
78094+ struct k_sigaction *action;
78095+
78096+ spin_lock_irqsave(&t->sighand->siglock, flags);
78097+ action = &t->sighand->action[sig-1];
78098+ ignored = action->sa.sa_handler == SIG_IGN;
78099+ blocked = sigismember(&t->blocked, sig);
78100+ if (blocked || ignored) {
78101+ action->sa.sa_handler = SIG_DFL;
78102+ if (blocked) {
78103+ sigdelset(&t->blocked, sig);
78104+ recalc_sigpending_and_wake(t);
78105+ }
78106+ }
78107+ if (action->sa.sa_handler == SIG_DFL)
78108+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78109+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78110+
78111+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78112+
78113+ return ret;
78114+}
78115+#endif
78116+
78117+#define GR_USER_BAN_TIME (15 * 60)
78118+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78119+
78120+void gr_handle_brute_attach(int dumpable)
78121+{
78122+#ifdef CONFIG_GRKERNSEC_BRUTE
78123+ struct task_struct *p = current;
78124+ kuid_t uid = GLOBAL_ROOT_UID;
78125+ int daemon = 0;
78126+
78127+ if (!grsec_enable_brute)
78128+ return;
78129+
78130+ rcu_read_lock();
78131+ read_lock(&tasklist_lock);
78132+ read_lock(&grsec_exec_file_lock);
78133+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78134+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78135+ p->real_parent->brute = 1;
78136+ daemon = 1;
78137+ } else {
78138+ const struct cred *cred = __task_cred(p), *cred2;
78139+ struct task_struct *tsk, *tsk2;
78140+
78141+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78142+ struct user_struct *user;
78143+
78144+ uid = cred->uid;
78145+
78146+ /* this is put upon execution past expiration */
78147+ user = find_user(uid);
78148+ if (user == NULL)
78149+ goto unlock;
78150+ user->suid_banned = 1;
78151+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78152+ if (user->suid_ban_expires == ~0UL)
78153+ user->suid_ban_expires--;
78154+
78155+ /* only kill other threads of the same binary, from the same user */
78156+ do_each_thread(tsk2, tsk) {
78157+ cred2 = __task_cred(tsk);
78158+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78159+ gr_fake_force_sig(SIGKILL, tsk);
78160+ } while_each_thread(tsk2, tsk);
78161+ }
78162+ }
78163+unlock:
78164+ read_unlock(&grsec_exec_file_lock);
78165+ read_unlock(&tasklist_lock);
78166+ rcu_read_unlock();
78167+
78168+ if (gr_is_global_nonroot(uid))
78169+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78170+ else if (daemon)
78171+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78172+
78173+#endif
78174+ return;
78175+}
78176+
78177+void gr_handle_brute_check(void)
78178+{
78179+#ifdef CONFIG_GRKERNSEC_BRUTE
78180+ struct task_struct *p = current;
78181+
78182+ if (unlikely(p->brute)) {
78183+ if (!grsec_enable_brute)
78184+ p->brute = 0;
78185+ else if (time_before(get_seconds(), p->brute_expires))
78186+ msleep(30 * 1000);
78187+ }
78188+#endif
78189+ return;
78190+}
78191+
78192+void gr_handle_kernel_exploit(void)
78193+{
78194+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78195+ const struct cred *cred;
78196+ struct task_struct *tsk, *tsk2;
78197+ struct user_struct *user;
78198+ kuid_t uid;
78199+
78200+ if (in_irq() || in_serving_softirq() || in_nmi())
78201+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78202+
78203+ uid = current_uid();
78204+
78205+ if (gr_is_global_root(uid))
78206+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78207+ else {
78208+ /* kill all the processes of this user, hold a reference
78209+ to their creds struct, and prevent them from creating
78210+ another process until system reset
78211+ */
78212+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78213+ GR_GLOBAL_UID(uid));
78214+ /* we intentionally leak this ref */
78215+ user = get_uid(current->cred->user);
78216+ if (user)
78217+ user->kernel_banned = 1;
78218+
78219+ /* kill all processes of this user */
78220+ read_lock(&tasklist_lock);
78221+ do_each_thread(tsk2, tsk) {
78222+ cred = __task_cred(tsk);
78223+ if (uid_eq(cred->uid, uid))
78224+ gr_fake_force_sig(SIGKILL, tsk);
78225+ } while_each_thread(tsk2, tsk);
78226+ read_unlock(&tasklist_lock);
78227+ }
78228+#endif
78229+}
78230+
78231+#ifdef CONFIG_GRKERNSEC_BRUTE
78232+static bool suid_ban_expired(struct user_struct *user)
78233+{
78234+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78235+ user->suid_banned = 0;
78236+ user->suid_ban_expires = 0;
78237+ free_uid(user);
78238+ return true;
78239+ }
78240+
78241+ return false;
78242+}
78243+#endif
78244+
78245+int gr_process_kernel_exec_ban(void)
78246+{
78247+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78248+ if (unlikely(current->cred->user->kernel_banned))
78249+ return -EPERM;
78250+#endif
78251+ return 0;
78252+}
78253+
78254+int gr_process_kernel_setuid_ban(struct user_struct *user)
78255+{
78256+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78257+ if (unlikely(user->kernel_banned))
78258+ gr_fake_force_sig(SIGKILL, current);
78259+#endif
78260+ return 0;
78261+}
78262+
78263+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78264+{
78265+#ifdef CONFIG_GRKERNSEC_BRUTE
78266+ struct user_struct *user = current->cred->user;
78267+ if (unlikely(user->suid_banned)) {
78268+ if (suid_ban_expired(user))
78269+ return 0;
78270+ /* disallow execution of suid binaries only */
78271+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78272+ return -EPERM;
78273+ }
78274+#endif
78275+ return 0;
78276+}
78277diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78278new file mode 100644
78279index 0000000..e3650b6
78280--- /dev/null
78281+++ b/grsecurity/grsec_sock.c
78282@@ -0,0 +1,244 @@
78283+#include <linux/kernel.h>
78284+#include <linux/module.h>
78285+#include <linux/sched.h>
78286+#include <linux/file.h>
78287+#include <linux/net.h>
78288+#include <linux/in.h>
78289+#include <linux/ip.h>
78290+#include <net/sock.h>
78291+#include <net/inet_sock.h>
78292+#include <linux/grsecurity.h>
78293+#include <linux/grinternal.h>
78294+#include <linux/gracl.h>
78295+
78296+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78297+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78298+
78299+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78300+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78301+
78302+#ifdef CONFIG_UNIX_MODULE
78303+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78304+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78305+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78306+EXPORT_SYMBOL_GPL(gr_handle_create);
78307+#endif
78308+
78309+#ifdef CONFIG_GRKERNSEC
78310+#define gr_conn_table_size 32749
78311+struct conn_table_entry {
78312+ struct conn_table_entry *next;
78313+ struct signal_struct *sig;
78314+};
78315+
78316+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78317+DEFINE_SPINLOCK(gr_conn_table_lock);
78318+
78319+extern const char * gr_socktype_to_name(unsigned char type);
78320+extern const char * gr_proto_to_name(unsigned char proto);
78321+extern const char * gr_sockfamily_to_name(unsigned char family);
78322+
78323+static __inline__ int
78324+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78325+{
78326+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78327+}
78328+
78329+static __inline__ int
78330+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78331+ __u16 sport, __u16 dport)
78332+{
78333+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78334+ sig->gr_sport == sport && sig->gr_dport == dport))
78335+ return 1;
78336+ else
78337+ return 0;
78338+}
78339+
78340+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78341+{
78342+ struct conn_table_entry **match;
78343+ unsigned int index;
78344+
78345+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78346+ sig->gr_sport, sig->gr_dport,
78347+ gr_conn_table_size);
78348+
78349+ newent->sig = sig;
78350+
78351+ match = &gr_conn_table[index];
78352+ newent->next = *match;
78353+ *match = newent;
78354+
78355+ return;
78356+}
78357+
78358+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78359+{
78360+ struct conn_table_entry *match, *last = NULL;
78361+ unsigned int index;
78362+
78363+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78364+ sig->gr_sport, sig->gr_dport,
78365+ gr_conn_table_size);
78366+
78367+ match = gr_conn_table[index];
78368+ while (match && !conn_match(match->sig,
78369+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78370+ sig->gr_dport)) {
78371+ last = match;
78372+ match = match->next;
78373+ }
78374+
78375+ if (match) {
78376+ if (last)
78377+ last->next = match->next;
78378+ else
78379+ gr_conn_table[index] = NULL;
78380+ kfree(match);
78381+ }
78382+
78383+ return;
78384+}
78385+
78386+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78387+ __u16 sport, __u16 dport)
78388+{
78389+ struct conn_table_entry *match;
78390+ unsigned int index;
78391+
78392+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78393+
78394+ match = gr_conn_table[index];
78395+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78396+ match = match->next;
78397+
78398+ if (match)
78399+ return match->sig;
78400+ else
78401+ return NULL;
78402+}
78403+
78404+#endif
78405+
78406+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78407+{
78408+#ifdef CONFIG_GRKERNSEC
78409+ struct signal_struct *sig = current->signal;
78410+ struct conn_table_entry *newent;
78411+
78412+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78413+ if (newent == NULL)
78414+ return;
78415+ /* no bh lock needed since we are called with bh disabled */
78416+ spin_lock(&gr_conn_table_lock);
78417+ gr_del_task_from_ip_table_nolock(sig);
78418+ sig->gr_saddr = inet->inet_rcv_saddr;
78419+ sig->gr_daddr = inet->inet_daddr;
78420+ sig->gr_sport = inet->inet_sport;
78421+ sig->gr_dport = inet->inet_dport;
78422+ gr_add_to_task_ip_table_nolock(sig, newent);
78423+ spin_unlock(&gr_conn_table_lock);
78424+#endif
78425+ return;
78426+}
78427+
78428+void gr_del_task_from_ip_table(struct task_struct *task)
78429+{
78430+#ifdef CONFIG_GRKERNSEC
78431+ spin_lock_bh(&gr_conn_table_lock);
78432+ gr_del_task_from_ip_table_nolock(task->signal);
78433+ spin_unlock_bh(&gr_conn_table_lock);
78434+#endif
78435+ return;
78436+}
78437+
78438+void
78439+gr_attach_curr_ip(const struct sock *sk)
78440+{
78441+#ifdef CONFIG_GRKERNSEC
78442+ struct signal_struct *p, *set;
78443+ const struct inet_sock *inet = inet_sk(sk);
78444+
78445+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78446+ return;
78447+
78448+ set = current->signal;
78449+
78450+ spin_lock_bh(&gr_conn_table_lock);
78451+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78452+ inet->inet_dport, inet->inet_sport);
78453+ if (unlikely(p != NULL)) {
78454+ set->curr_ip = p->curr_ip;
78455+ set->used_accept = 1;
78456+ gr_del_task_from_ip_table_nolock(p);
78457+ spin_unlock_bh(&gr_conn_table_lock);
78458+ return;
78459+ }
78460+ spin_unlock_bh(&gr_conn_table_lock);
78461+
78462+ set->curr_ip = inet->inet_daddr;
78463+ set->used_accept = 1;
78464+#endif
78465+ return;
78466+}
78467+
78468+int
78469+gr_handle_sock_all(const int family, const int type, const int protocol)
78470+{
78471+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78472+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78473+ (family != AF_UNIX)) {
78474+ if (family == AF_INET)
78475+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78476+ else
78477+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78478+ return -EACCES;
78479+ }
78480+#endif
78481+ return 0;
78482+}
78483+
78484+int
78485+gr_handle_sock_server(const struct sockaddr *sck)
78486+{
78487+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78488+ if (grsec_enable_socket_server &&
78489+ in_group_p(grsec_socket_server_gid) &&
78490+ sck && (sck->sa_family != AF_UNIX) &&
78491+ (sck->sa_family != AF_LOCAL)) {
78492+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78493+ return -EACCES;
78494+ }
78495+#endif
78496+ return 0;
78497+}
78498+
78499+int
78500+gr_handle_sock_server_other(const struct sock *sck)
78501+{
78502+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78503+ if (grsec_enable_socket_server &&
78504+ in_group_p(grsec_socket_server_gid) &&
78505+ sck && (sck->sk_family != AF_UNIX) &&
78506+ (sck->sk_family != AF_LOCAL)) {
78507+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78508+ return -EACCES;
78509+ }
78510+#endif
78511+ return 0;
78512+}
78513+
78514+int
78515+gr_handle_sock_client(const struct sockaddr *sck)
78516+{
78517+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78518+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78519+ sck && (sck->sa_family != AF_UNIX) &&
78520+ (sck->sa_family != AF_LOCAL)) {
78521+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78522+ return -EACCES;
78523+ }
78524+#endif
78525+ return 0;
78526+}
78527diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78528new file mode 100644
78529index 0000000..cce889e
78530--- /dev/null
78531+++ b/grsecurity/grsec_sysctl.c
78532@@ -0,0 +1,488 @@
78533+#include <linux/kernel.h>
78534+#include <linux/sched.h>
78535+#include <linux/sysctl.h>
78536+#include <linux/grsecurity.h>
78537+#include <linux/grinternal.h>
78538+
78539+int
78540+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78541+{
78542+#ifdef CONFIG_GRKERNSEC_SYSCTL
78543+ if (dirname == NULL || name == NULL)
78544+ return 0;
78545+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78546+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78547+ return -EACCES;
78548+ }
78549+#endif
78550+ return 0;
78551+}
78552+
78553+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78554+static int __maybe_unused __read_only one = 1;
78555+#endif
78556+
78557+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78558+ defined(CONFIG_GRKERNSEC_DENYUSB)
78559+struct ctl_table grsecurity_table[] = {
78560+#ifdef CONFIG_GRKERNSEC_SYSCTL
78561+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78562+#ifdef CONFIG_GRKERNSEC_IO
78563+ {
78564+ .procname = "disable_priv_io",
78565+ .data = &grsec_disable_privio,
78566+ .maxlen = sizeof(int),
78567+ .mode = 0600,
78568+ .proc_handler = &proc_dointvec,
78569+ },
78570+#endif
78571+#endif
78572+#ifdef CONFIG_GRKERNSEC_LINK
78573+ {
78574+ .procname = "linking_restrictions",
78575+ .data = &grsec_enable_link,
78576+ .maxlen = sizeof(int),
78577+ .mode = 0600,
78578+ .proc_handler = &proc_dointvec,
78579+ },
78580+#endif
78581+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78582+ {
78583+ .procname = "enforce_symlinksifowner",
78584+ .data = &grsec_enable_symlinkown,
78585+ .maxlen = sizeof(int),
78586+ .mode = 0600,
78587+ .proc_handler = &proc_dointvec,
78588+ },
78589+ {
78590+ .procname = "symlinkown_gid",
78591+ .data = &grsec_symlinkown_gid,
78592+ .maxlen = sizeof(int),
78593+ .mode = 0600,
78594+ .proc_handler = &proc_dointvec,
78595+ },
78596+#endif
78597+#ifdef CONFIG_GRKERNSEC_BRUTE
78598+ {
78599+ .procname = "deter_bruteforce",
78600+ .data = &grsec_enable_brute,
78601+ .maxlen = sizeof(int),
78602+ .mode = 0600,
78603+ .proc_handler = &proc_dointvec,
78604+ },
78605+#endif
78606+#ifdef CONFIG_GRKERNSEC_FIFO
78607+ {
78608+ .procname = "fifo_restrictions",
78609+ .data = &grsec_enable_fifo,
78610+ .maxlen = sizeof(int),
78611+ .mode = 0600,
78612+ .proc_handler = &proc_dointvec,
78613+ },
78614+#endif
78615+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78616+ {
78617+ .procname = "ptrace_readexec",
78618+ .data = &grsec_enable_ptrace_readexec,
78619+ .maxlen = sizeof(int),
78620+ .mode = 0600,
78621+ .proc_handler = &proc_dointvec,
78622+ },
78623+#endif
78624+#ifdef CONFIG_GRKERNSEC_SETXID
78625+ {
78626+ .procname = "consistent_setxid",
78627+ .data = &grsec_enable_setxid,
78628+ .maxlen = sizeof(int),
78629+ .mode = 0600,
78630+ .proc_handler = &proc_dointvec,
78631+ },
78632+#endif
78633+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78634+ {
78635+ .procname = "ip_blackhole",
78636+ .data = &grsec_enable_blackhole,
78637+ .maxlen = sizeof(int),
78638+ .mode = 0600,
78639+ .proc_handler = &proc_dointvec,
78640+ },
78641+ {
78642+ .procname = "lastack_retries",
78643+ .data = &grsec_lastack_retries,
78644+ .maxlen = sizeof(int),
78645+ .mode = 0600,
78646+ .proc_handler = &proc_dointvec,
78647+ },
78648+#endif
78649+#ifdef CONFIG_GRKERNSEC_EXECLOG
78650+ {
78651+ .procname = "exec_logging",
78652+ .data = &grsec_enable_execlog,
78653+ .maxlen = sizeof(int),
78654+ .mode = 0600,
78655+ .proc_handler = &proc_dointvec,
78656+ },
78657+#endif
78658+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78659+ {
78660+ .procname = "rwxmap_logging",
78661+ .data = &grsec_enable_log_rwxmaps,
78662+ .maxlen = sizeof(int),
78663+ .mode = 0600,
78664+ .proc_handler = &proc_dointvec,
78665+ },
78666+#endif
78667+#ifdef CONFIG_GRKERNSEC_SIGNAL
78668+ {
78669+ .procname = "signal_logging",
78670+ .data = &grsec_enable_signal,
78671+ .maxlen = sizeof(int),
78672+ .mode = 0600,
78673+ .proc_handler = &proc_dointvec,
78674+ },
78675+#endif
78676+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78677+ {
78678+ .procname = "forkfail_logging",
78679+ .data = &grsec_enable_forkfail,
78680+ .maxlen = sizeof(int),
78681+ .mode = 0600,
78682+ .proc_handler = &proc_dointvec,
78683+ },
78684+#endif
78685+#ifdef CONFIG_GRKERNSEC_TIME
78686+ {
78687+ .procname = "timechange_logging",
78688+ .data = &grsec_enable_time,
78689+ .maxlen = sizeof(int),
78690+ .mode = 0600,
78691+ .proc_handler = &proc_dointvec,
78692+ },
78693+#endif
78694+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78695+ {
78696+ .procname = "chroot_deny_shmat",
78697+ .data = &grsec_enable_chroot_shmat,
78698+ .maxlen = sizeof(int),
78699+ .mode = 0600,
78700+ .proc_handler = &proc_dointvec,
78701+ },
78702+#endif
78703+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78704+ {
78705+ .procname = "chroot_deny_unix",
78706+ .data = &grsec_enable_chroot_unix,
78707+ .maxlen = sizeof(int),
78708+ .mode = 0600,
78709+ .proc_handler = &proc_dointvec,
78710+ },
78711+#endif
78712+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78713+ {
78714+ .procname = "chroot_deny_mount",
78715+ .data = &grsec_enable_chroot_mount,
78716+ .maxlen = sizeof(int),
78717+ .mode = 0600,
78718+ .proc_handler = &proc_dointvec,
78719+ },
78720+#endif
78721+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78722+ {
78723+ .procname = "chroot_deny_fchdir",
78724+ .data = &grsec_enable_chroot_fchdir,
78725+ .maxlen = sizeof(int),
78726+ .mode = 0600,
78727+ .proc_handler = &proc_dointvec,
78728+ },
78729+#endif
78730+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78731+ {
78732+ .procname = "chroot_deny_chroot",
78733+ .data = &grsec_enable_chroot_double,
78734+ .maxlen = sizeof(int),
78735+ .mode = 0600,
78736+ .proc_handler = &proc_dointvec,
78737+ },
78738+#endif
78739+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78740+ {
78741+ .procname = "chroot_deny_pivot",
78742+ .data = &grsec_enable_chroot_pivot,
78743+ .maxlen = sizeof(int),
78744+ .mode = 0600,
78745+ .proc_handler = &proc_dointvec,
78746+ },
78747+#endif
78748+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78749+ {
78750+ .procname = "chroot_enforce_chdir",
78751+ .data = &grsec_enable_chroot_chdir,
78752+ .maxlen = sizeof(int),
78753+ .mode = 0600,
78754+ .proc_handler = &proc_dointvec,
78755+ },
78756+#endif
78757+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78758+ {
78759+ .procname = "chroot_deny_chmod",
78760+ .data = &grsec_enable_chroot_chmod,
78761+ .maxlen = sizeof(int),
78762+ .mode = 0600,
78763+ .proc_handler = &proc_dointvec,
78764+ },
78765+#endif
78766+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78767+ {
78768+ .procname = "chroot_deny_mknod",
78769+ .data = &grsec_enable_chroot_mknod,
78770+ .maxlen = sizeof(int),
78771+ .mode = 0600,
78772+ .proc_handler = &proc_dointvec,
78773+ },
78774+#endif
78775+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78776+ {
78777+ .procname = "chroot_restrict_nice",
78778+ .data = &grsec_enable_chroot_nice,
78779+ .maxlen = sizeof(int),
78780+ .mode = 0600,
78781+ .proc_handler = &proc_dointvec,
78782+ },
78783+#endif
78784+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78785+ {
78786+ .procname = "chroot_execlog",
78787+ .data = &grsec_enable_chroot_execlog,
78788+ .maxlen = sizeof(int),
78789+ .mode = 0600,
78790+ .proc_handler = &proc_dointvec,
78791+ },
78792+#endif
78793+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78794+ {
78795+ .procname = "chroot_caps",
78796+ .data = &grsec_enable_chroot_caps,
78797+ .maxlen = sizeof(int),
78798+ .mode = 0600,
78799+ .proc_handler = &proc_dointvec,
78800+ },
78801+#endif
78802+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78803+ {
78804+ .procname = "chroot_deny_bad_rename",
78805+ .data = &grsec_enable_chroot_rename,
78806+ .maxlen = sizeof(int),
78807+ .mode = 0600,
78808+ .proc_handler = &proc_dointvec,
78809+ },
78810+#endif
78811+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78812+ {
78813+ .procname = "chroot_deny_sysctl",
78814+ .data = &grsec_enable_chroot_sysctl,
78815+ .maxlen = sizeof(int),
78816+ .mode = 0600,
78817+ .proc_handler = &proc_dointvec,
78818+ },
78819+#endif
78820+#ifdef CONFIG_GRKERNSEC_TPE
78821+ {
78822+ .procname = "tpe",
78823+ .data = &grsec_enable_tpe,
78824+ .maxlen = sizeof(int),
78825+ .mode = 0600,
78826+ .proc_handler = &proc_dointvec,
78827+ },
78828+ {
78829+ .procname = "tpe_gid",
78830+ .data = &grsec_tpe_gid,
78831+ .maxlen = sizeof(int),
78832+ .mode = 0600,
78833+ .proc_handler = &proc_dointvec,
78834+ },
78835+#endif
78836+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78837+ {
78838+ .procname = "tpe_invert",
78839+ .data = &grsec_enable_tpe_invert,
78840+ .maxlen = sizeof(int),
78841+ .mode = 0600,
78842+ .proc_handler = &proc_dointvec,
78843+ },
78844+#endif
78845+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78846+ {
78847+ .procname = "tpe_restrict_all",
78848+ .data = &grsec_enable_tpe_all,
78849+ .maxlen = sizeof(int),
78850+ .mode = 0600,
78851+ .proc_handler = &proc_dointvec,
78852+ },
78853+#endif
78854+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78855+ {
78856+ .procname = "socket_all",
78857+ .data = &grsec_enable_socket_all,
78858+ .maxlen = sizeof(int),
78859+ .mode = 0600,
78860+ .proc_handler = &proc_dointvec,
78861+ },
78862+ {
78863+ .procname = "socket_all_gid",
78864+ .data = &grsec_socket_all_gid,
78865+ .maxlen = sizeof(int),
78866+ .mode = 0600,
78867+ .proc_handler = &proc_dointvec,
78868+ },
78869+#endif
78870+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78871+ {
78872+ .procname = "socket_client",
78873+ .data = &grsec_enable_socket_client,
78874+ .maxlen = sizeof(int),
78875+ .mode = 0600,
78876+ .proc_handler = &proc_dointvec,
78877+ },
78878+ {
78879+ .procname = "socket_client_gid",
78880+ .data = &grsec_socket_client_gid,
78881+ .maxlen = sizeof(int),
78882+ .mode = 0600,
78883+ .proc_handler = &proc_dointvec,
78884+ },
78885+#endif
78886+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78887+ {
78888+ .procname = "socket_server",
78889+ .data = &grsec_enable_socket_server,
78890+ .maxlen = sizeof(int),
78891+ .mode = 0600,
78892+ .proc_handler = &proc_dointvec,
78893+ },
78894+ {
78895+ .procname = "socket_server_gid",
78896+ .data = &grsec_socket_server_gid,
78897+ .maxlen = sizeof(int),
78898+ .mode = 0600,
78899+ .proc_handler = &proc_dointvec,
78900+ },
78901+#endif
78902+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78903+ {
78904+ .procname = "audit_group",
78905+ .data = &grsec_enable_group,
78906+ .maxlen = sizeof(int),
78907+ .mode = 0600,
78908+ .proc_handler = &proc_dointvec,
78909+ },
78910+ {
78911+ .procname = "audit_gid",
78912+ .data = &grsec_audit_gid,
78913+ .maxlen = sizeof(int),
78914+ .mode = 0600,
78915+ .proc_handler = &proc_dointvec,
78916+ },
78917+#endif
78918+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78919+ {
78920+ .procname = "audit_chdir",
78921+ .data = &grsec_enable_chdir,
78922+ .maxlen = sizeof(int),
78923+ .mode = 0600,
78924+ .proc_handler = &proc_dointvec,
78925+ },
78926+#endif
78927+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78928+ {
78929+ .procname = "audit_mount",
78930+ .data = &grsec_enable_mount,
78931+ .maxlen = sizeof(int),
78932+ .mode = 0600,
78933+ .proc_handler = &proc_dointvec,
78934+ },
78935+#endif
78936+#ifdef CONFIG_GRKERNSEC_DMESG
78937+ {
78938+ .procname = "dmesg",
78939+ .data = &grsec_enable_dmesg,
78940+ .maxlen = sizeof(int),
78941+ .mode = 0600,
78942+ .proc_handler = &proc_dointvec,
78943+ },
78944+#endif
78945+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78946+ {
78947+ .procname = "chroot_findtask",
78948+ .data = &grsec_enable_chroot_findtask,
78949+ .maxlen = sizeof(int),
78950+ .mode = 0600,
78951+ .proc_handler = &proc_dointvec,
78952+ },
78953+#endif
78954+#ifdef CONFIG_GRKERNSEC_RESLOG
78955+ {
78956+ .procname = "resource_logging",
78957+ .data = &grsec_resource_logging,
78958+ .maxlen = sizeof(int),
78959+ .mode = 0600,
78960+ .proc_handler = &proc_dointvec,
78961+ },
78962+#endif
78963+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78964+ {
78965+ .procname = "audit_ptrace",
78966+ .data = &grsec_enable_audit_ptrace,
78967+ .maxlen = sizeof(int),
78968+ .mode = 0600,
78969+ .proc_handler = &proc_dointvec,
78970+ },
78971+#endif
78972+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78973+ {
78974+ .procname = "harden_ptrace",
78975+ .data = &grsec_enable_harden_ptrace,
78976+ .maxlen = sizeof(int),
78977+ .mode = 0600,
78978+ .proc_handler = &proc_dointvec,
78979+ },
78980+#endif
78981+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78982+ {
78983+ .procname = "harden_ipc",
78984+ .data = &grsec_enable_harden_ipc,
78985+ .maxlen = sizeof(int),
78986+ .mode = 0600,
78987+ .proc_handler = &proc_dointvec,
78988+ },
78989+#endif
78990+ {
78991+ .procname = "grsec_lock",
78992+ .data = &grsec_lock,
78993+ .maxlen = sizeof(int),
78994+ .mode = 0600,
78995+ .proc_handler = &proc_dointvec,
78996+ },
78997+#endif
78998+#ifdef CONFIG_GRKERNSEC_ROFS
78999+ {
79000+ .procname = "romount_protect",
79001+ .data = &grsec_enable_rofs,
79002+ .maxlen = sizeof(int),
79003+ .mode = 0600,
79004+ .proc_handler = &proc_dointvec_minmax,
79005+ .extra1 = &one,
79006+ .extra2 = &one,
79007+ },
79008+#endif
79009+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79010+ {
79011+ .procname = "deny_new_usb",
79012+ .data = &grsec_deny_new_usb,
79013+ .maxlen = sizeof(int),
79014+ .mode = 0600,
79015+ .proc_handler = &proc_dointvec,
79016+ },
79017+#endif
79018+ { }
79019+};
79020+#endif
79021diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79022new file mode 100644
79023index 0000000..61b514e
79024--- /dev/null
79025+++ b/grsecurity/grsec_time.c
79026@@ -0,0 +1,16 @@
79027+#include <linux/kernel.h>
79028+#include <linux/sched.h>
79029+#include <linux/grinternal.h>
79030+#include <linux/module.h>
79031+
79032+void
79033+gr_log_timechange(void)
79034+{
79035+#ifdef CONFIG_GRKERNSEC_TIME
79036+ if (grsec_enable_time)
79037+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79038+#endif
79039+ return;
79040+}
79041+
79042+EXPORT_SYMBOL_GPL(gr_log_timechange);
79043diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79044new file mode 100644
79045index 0000000..d1953de
79046--- /dev/null
79047+++ b/grsecurity/grsec_tpe.c
79048@@ -0,0 +1,78 @@
79049+#include <linux/kernel.h>
79050+#include <linux/sched.h>
79051+#include <linux/file.h>
79052+#include <linux/fs.h>
79053+#include <linux/grinternal.h>
79054+
79055+extern int gr_acl_tpe_check(void);
79056+
79057+int
79058+gr_tpe_allow(const struct file *file)
79059+{
79060+#ifdef CONFIG_GRKERNSEC
79061+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79062+ struct inode *file_inode = file->f_path.dentry->d_inode;
79063+ const struct cred *cred = current_cred();
79064+ char *msg = NULL;
79065+ char *msg2 = NULL;
79066+
79067+ // never restrict root
79068+ if (gr_is_global_root(cred->uid))
79069+ return 1;
79070+
79071+ if (grsec_enable_tpe) {
79072+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79073+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79074+ msg = "not being in trusted group";
79075+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79076+ msg = "being in untrusted group";
79077+#else
79078+ if (in_group_p(grsec_tpe_gid))
79079+ msg = "being in untrusted group";
79080+#endif
79081+ }
79082+ if (!msg && gr_acl_tpe_check())
79083+ msg = "being in untrusted role";
79084+
79085+ // not in any affected group/role
79086+ if (!msg)
79087+ goto next_check;
79088+
79089+ if (gr_is_global_nonroot(inode->i_uid))
79090+ msg2 = "file in non-root-owned directory";
79091+ else if (inode->i_mode & S_IWOTH)
79092+ msg2 = "file in world-writable directory";
79093+ else if (inode->i_mode & S_IWGRP)
79094+ msg2 = "file in group-writable directory";
79095+ else if (file_inode->i_mode & S_IWOTH)
79096+ msg2 = "file is world-writable";
79097+
79098+ if (msg && msg2) {
79099+ char fullmsg[70] = {0};
79100+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79101+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79102+ return 0;
79103+ }
79104+ msg = NULL;
79105+next_check:
79106+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79107+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79108+ return 1;
79109+
79110+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79111+ msg = "directory not owned by user";
79112+ else if (inode->i_mode & S_IWOTH)
79113+ msg = "file in world-writable directory";
79114+ else if (inode->i_mode & S_IWGRP)
79115+ msg = "file in group-writable directory";
79116+ else if (file_inode->i_mode & S_IWOTH)
79117+ msg = "file is world-writable";
79118+
79119+ if (msg) {
79120+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79121+ return 0;
79122+ }
79123+#endif
79124+#endif
79125+ return 1;
79126+}
79127diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79128new file mode 100644
79129index 0000000..ae02d8e
79130--- /dev/null
79131+++ b/grsecurity/grsec_usb.c
79132@@ -0,0 +1,15 @@
79133+#include <linux/kernel.h>
79134+#include <linux/grinternal.h>
79135+#include <linux/module.h>
79136+
79137+int gr_handle_new_usb(void)
79138+{
79139+#ifdef CONFIG_GRKERNSEC_DENYUSB
79140+ if (grsec_deny_new_usb) {
79141+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79142+ return 1;
79143+ }
79144+#endif
79145+ return 0;
79146+}
79147+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79148diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79149new file mode 100644
79150index 0000000..158b330
79151--- /dev/null
79152+++ b/grsecurity/grsum.c
79153@@ -0,0 +1,64 @@
79154+#include <linux/err.h>
79155+#include <linux/kernel.h>
79156+#include <linux/sched.h>
79157+#include <linux/mm.h>
79158+#include <linux/scatterlist.h>
79159+#include <linux/crypto.h>
79160+#include <linux/gracl.h>
79161+
79162+
79163+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79164+#error "crypto and sha256 must be built into the kernel"
79165+#endif
79166+
79167+int
79168+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79169+{
79170+ struct crypto_hash *tfm;
79171+ struct hash_desc desc;
79172+ struct scatterlist sg[2];
79173+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79174+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79175+ unsigned long *sumptr = (unsigned long *)sum;
79176+ int cryptres;
79177+ int retval = 1;
79178+ volatile int mismatched = 0;
79179+ volatile int dummy = 0;
79180+ unsigned int i;
79181+
79182+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79183+ if (IS_ERR(tfm)) {
79184+ /* should never happen, since sha256 should be built in */
79185+ memset(entry->pw, 0, GR_PW_LEN);
79186+ return 1;
79187+ }
79188+
79189+ sg_init_table(sg, 2);
79190+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79191+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79192+
79193+ desc.tfm = tfm;
79194+ desc.flags = 0;
79195+
79196+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79197+ temp_sum);
79198+
79199+ memset(entry->pw, 0, GR_PW_LEN);
79200+
79201+ if (cryptres)
79202+ goto out;
79203+
79204+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79205+ if (sumptr[i] != tmpsumptr[i])
79206+ mismatched = 1;
79207+ else
79208+ dummy = 1; // waste a cycle
79209+
79210+ if (!mismatched)
79211+ retval = dummy - 1;
79212+
79213+out:
79214+ crypto_free_hash(tfm);
79215+
79216+ return retval;
79217+}
79218diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79219index 77ff547..181834f 100644
79220--- a/include/asm-generic/4level-fixup.h
79221+++ b/include/asm-generic/4level-fixup.h
79222@@ -13,8 +13,10 @@
79223 #define pmd_alloc(mm, pud, address) \
79224 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79225 NULL: pmd_offset(pud, address))
79226+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79227
79228 #define pud_alloc(mm, pgd, address) (pgd)
79229+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79230 #define pud_offset(pgd, start) (pgd)
79231 #define pud_none(pud) 0
79232 #define pud_bad(pud) 0
79233diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79234index b7babf0..1e4b4f1 100644
79235--- a/include/asm-generic/atomic-long.h
79236+++ b/include/asm-generic/atomic-long.h
79237@@ -22,6 +22,12 @@
79238
79239 typedef atomic64_t atomic_long_t;
79240
79241+#ifdef CONFIG_PAX_REFCOUNT
79242+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79243+#else
79244+typedef atomic64_t atomic_long_unchecked_t;
79245+#endif
79246+
79247 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79248
79249 static inline long atomic_long_read(atomic_long_t *l)
79250@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79251 return (long)atomic64_read(v);
79252 }
79253
79254+#ifdef CONFIG_PAX_REFCOUNT
79255+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79256+{
79257+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79258+
79259+ return (long)atomic64_read_unchecked(v);
79260+}
79261+#endif
79262+
79263 static inline void atomic_long_set(atomic_long_t *l, long i)
79264 {
79265 atomic64_t *v = (atomic64_t *)l;
79266@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79267 atomic64_set(v, i);
79268 }
79269
79270+#ifdef CONFIG_PAX_REFCOUNT
79271+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79272+{
79273+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79274+
79275+ atomic64_set_unchecked(v, i);
79276+}
79277+#endif
79278+
79279 static inline void atomic_long_inc(atomic_long_t *l)
79280 {
79281 atomic64_t *v = (atomic64_t *)l;
79282@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79283 atomic64_inc(v);
79284 }
79285
79286+#ifdef CONFIG_PAX_REFCOUNT
79287+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79288+{
79289+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79290+
79291+ atomic64_inc_unchecked(v);
79292+}
79293+#endif
79294+
79295 static inline void atomic_long_dec(atomic_long_t *l)
79296 {
79297 atomic64_t *v = (atomic64_t *)l;
79298@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79299 atomic64_dec(v);
79300 }
79301
79302+#ifdef CONFIG_PAX_REFCOUNT
79303+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79304+{
79305+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79306+
79307+ atomic64_dec_unchecked(v);
79308+}
79309+#endif
79310+
79311 static inline void atomic_long_add(long i, atomic_long_t *l)
79312 {
79313 atomic64_t *v = (atomic64_t *)l;
79314@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79315 atomic64_add(i, v);
79316 }
79317
79318+#ifdef CONFIG_PAX_REFCOUNT
79319+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79320+{
79321+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79322+
79323+ atomic64_add_unchecked(i, v);
79324+}
79325+#endif
79326+
79327 static inline void atomic_long_sub(long i, atomic_long_t *l)
79328 {
79329 atomic64_t *v = (atomic64_t *)l;
79330@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79331 atomic64_sub(i, v);
79332 }
79333
79334+#ifdef CONFIG_PAX_REFCOUNT
79335+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79336+{
79337+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79338+
79339+ atomic64_sub_unchecked(i, v);
79340+}
79341+#endif
79342+
79343 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79344 {
79345 atomic64_t *v = (atomic64_t *)l;
79346@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79347 return atomic64_add_negative(i, v);
79348 }
79349
79350-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79351+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79352 {
79353 atomic64_t *v = (atomic64_t *)l;
79354
79355 return (long)atomic64_add_return(i, v);
79356 }
79357
79358+#ifdef CONFIG_PAX_REFCOUNT
79359+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79360+{
79361+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79362+
79363+ return (long)atomic64_add_return_unchecked(i, v);
79364+}
79365+#endif
79366+
79367 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79368 {
79369 atomic64_t *v = (atomic64_t *)l;
79370@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79371 return (long)atomic64_inc_return(v);
79372 }
79373
79374+#ifdef CONFIG_PAX_REFCOUNT
79375+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79376+{
79377+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79378+
79379+ return (long)atomic64_inc_return_unchecked(v);
79380+}
79381+#endif
79382+
79383 static inline long atomic_long_dec_return(atomic_long_t *l)
79384 {
79385 atomic64_t *v = (atomic64_t *)l;
79386@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79387
79388 typedef atomic_t atomic_long_t;
79389
79390+#ifdef CONFIG_PAX_REFCOUNT
79391+typedef atomic_unchecked_t atomic_long_unchecked_t;
79392+#else
79393+typedef atomic_t atomic_long_unchecked_t;
79394+#endif
79395+
79396 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79397 static inline long atomic_long_read(atomic_long_t *l)
79398 {
79399@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79400 return (long)atomic_read(v);
79401 }
79402
79403+#ifdef CONFIG_PAX_REFCOUNT
79404+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79405+{
79406+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79407+
79408+ return (long)atomic_read_unchecked(v);
79409+}
79410+#endif
79411+
79412 static inline void atomic_long_set(atomic_long_t *l, long i)
79413 {
79414 atomic_t *v = (atomic_t *)l;
79415@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79416 atomic_set(v, i);
79417 }
79418
79419+#ifdef CONFIG_PAX_REFCOUNT
79420+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79421+{
79422+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79423+
79424+ atomic_set_unchecked(v, i);
79425+}
79426+#endif
79427+
79428 static inline void atomic_long_inc(atomic_long_t *l)
79429 {
79430 atomic_t *v = (atomic_t *)l;
79431@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79432 atomic_inc(v);
79433 }
79434
79435+#ifdef CONFIG_PAX_REFCOUNT
79436+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79437+{
79438+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79439+
79440+ atomic_inc_unchecked(v);
79441+}
79442+#endif
79443+
79444 static inline void atomic_long_dec(atomic_long_t *l)
79445 {
79446 atomic_t *v = (atomic_t *)l;
79447@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79448 atomic_dec(v);
79449 }
79450
79451+#ifdef CONFIG_PAX_REFCOUNT
79452+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79453+{
79454+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79455+
79456+ atomic_dec_unchecked(v);
79457+}
79458+#endif
79459+
79460 static inline void atomic_long_add(long i, atomic_long_t *l)
79461 {
79462 atomic_t *v = (atomic_t *)l;
79463@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79464 atomic_add(i, v);
79465 }
79466
79467+#ifdef CONFIG_PAX_REFCOUNT
79468+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79469+{
79470+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79471+
79472+ atomic_add_unchecked(i, v);
79473+}
79474+#endif
79475+
79476 static inline void atomic_long_sub(long i, atomic_long_t *l)
79477 {
79478 atomic_t *v = (atomic_t *)l;
79479@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79480 atomic_sub(i, v);
79481 }
79482
79483+#ifdef CONFIG_PAX_REFCOUNT
79484+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79485+{
79486+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79487+
79488+ atomic_sub_unchecked(i, v);
79489+}
79490+#endif
79491+
79492 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79493 {
79494 atomic_t *v = (atomic_t *)l;
79495@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79496 return atomic_add_negative(i, v);
79497 }
79498
79499-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79500+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79501 {
79502 atomic_t *v = (atomic_t *)l;
79503
79504 return (long)atomic_add_return(i, v);
79505 }
79506
79507+#ifdef CONFIG_PAX_REFCOUNT
79508+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79509+{
79510+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79511+
79512+ return (long)atomic_add_return_unchecked(i, v);
79513+}
79514+
79515+#endif
79516+
79517 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79518 {
79519 atomic_t *v = (atomic_t *)l;
79520@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79521 return (long)atomic_inc_return(v);
79522 }
79523
79524+#ifdef CONFIG_PAX_REFCOUNT
79525+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79526+{
79527+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79528+
79529+ return (long)atomic_inc_return_unchecked(v);
79530+}
79531+#endif
79532+
79533 static inline long atomic_long_dec_return(atomic_long_t *l)
79534 {
79535 atomic_t *v = (atomic_t *)l;
79536@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79537
79538 #endif /* BITS_PER_LONG == 64 */
79539
79540+#ifdef CONFIG_PAX_REFCOUNT
79541+static inline void pax_refcount_needs_these_functions(void)
79542+{
79543+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79544+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79545+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79546+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79547+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79548+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79549+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79550+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79551+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79552+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79553+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79554+#ifdef CONFIG_X86
79555+ atomic_clear_mask_unchecked(0, NULL);
79556+ atomic_set_mask_unchecked(0, NULL);
79557+#endif
79558+
79559+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79560+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79561+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79562+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79563+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79564+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79565+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79566+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79567+}
79568+#else
79569+#define atomic_read_unchecked(v) atomic_read(v)
79570+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79571+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79572+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79573+#define atomic_inc_unchecked(v) atomic_inc(v)
79574+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79575+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79576+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79577+#define atomic_dec_unchecked(v) atomic_dec(v)
79578+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79579+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79580+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79581+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79582+
79583+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79584+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79585+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79586+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79587+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79588+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79589+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79590+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79591+#endif
79592+
79593 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79594diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79595index 30ad9c8..c70c170 100644
79596--- a/include/asm-generic/atomic64.h
79597+++ b/include/asm-generic/atomic64.h
79598@@ -16,6 +16,8 @@ typedef struct {
79599 long long counter;
79600 } atomic64_t;
79601
79602+typedef atomic64_t atomic64_unchecked_t;
79603+
79604 #define ATOMIC64_INIT(i) { (i) }
79605
79606 extern long long atomic64_read(const atomic64_t *v);
79607@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79608 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79609 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79610
79611+#define atomic64_read_unchecked(v) atomic64_read(v)
79612+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79613+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79614+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79615+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79616+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79617+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79618+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79619+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79620+
79621 #endif /* _ASM_GENERIC_ATOMIC64_H */
79622diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79623index f5c40b0..e902f9d 100644
79624--- a/include/asm-generic/barrier.h
79625+++ b/include/asm-generic/barrier.h
79626@@ -82,7 +82,7 @@
79627 do { \
79628 compiletime_assert_atomic_type(*p); \
79629 smp_mb(); \
79630- ACCESS_ONCE(*p) = (v); \
79631+ ACCESS_ONCE_RW(*p) = (v); \
79632 } while (0)
79633
79634 #define smp_load_acquire(p) \
79635diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79636index a60a7cc..0fe12f2 100644
79637--- a/include/asm-generic/bitops/__fls.h
79638+++ b/include/asm-generic/bitops/__fls.h
79639@@ -9,7 +9,7 @@
79640 *
79641 * Undefined if no set bit exists, so code should check against 0 first.
79642 */
79643-static __always_inline unsigned long __fls(unsigned long word)
79644+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79645 {
79646 int num = BITS_PER_LONG - 1;
79647
79648diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79649index 0576d1f..dad6c71 100644
79650--- a/include/asm-generic/bitops/fls.h
79651+++ b/include/asm-generic/bitops/fls.h
79652@@ -9,7 +9,7 @@
79653 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79654 */
79655
79656-static __always_inline int fls(int x)
79657+static __always_inline int __intentional_overflow(-1) fls(int x)
79658 {
79659 int r = 32;
79660
79661diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79662index b097cf8..3d40e14 100644
79663--- a/include/asm-generic/bitops/fls64.h
79664+++ b/include/asm-generic/bitops/fls64.h
79665@@ -15,7 +15,7 @@
79666 * at position 64.
79667 */
79668 #if BITS_PER_LONG == 32
79669-static __always_inline int fls64(__u64 x)
79670+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79671 {
79672 __u32 h = x >> 32;
79673 if (h)
79674@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79675 return fls(x);
79676 }
79677 #elif BITS_PER_LONG == 64
79678-static __always_inline int fls64(__u64 x)
79679+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79680 {
79681 if (x == 0)
79682 return 0;
79683diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79684index 1bfcfe5..e04c5c9 100644
79685--- a/include/asm-generic/cache.h
79686+++ b/include/asm-generic/cache.h
79687@@ -6,7 +6,7 @@
79688 * cache lines need to provide their own cache.h.
79689 */
79690
79691-#define L1_CACHE_SHIFT 5
79692-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79693+#define L1_CACHE_SHIFT 5UL
79694+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79695
79696 #endif /* __ASM_GENERIC_CACHE_H */
79697diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79698index 0d68a1e..b74a761 100644
79699--- a/include/asm-generic/emergency-restart.h
79700+++ b/include/asm-generic/emergency-restart.h
79701@@ -1,7 +1,7 @@
79702 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79703 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79704
79705-static inline void machine_emergency_restart(void)
79706+static inline __noreturn void machine_emergency_restart(void)
79707 {
79708 machine_restart(NULL);
79709 }
79710diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79711index 90f99c7..00ce236 100644
79712--- a/include/asm-generic/kmap_types.h
79713+++ b/include/asm-generic/kmap_types.h
79714@@ -2,9 +2,9 @@
79715 #define _ASM_GENERIC_KMAP_TYPES_H
79716
79717 #ifdef __WITH_KM_FENCE
79718-# define KM_TYPE_NR 41
79719+# define KM_TYPE_NR 42
79720 #else
79721-# define KM_TYPE_NR 20
79722+# define KM_TYPE_NR 21
79723 #endif
79724
79725 #endif
79726diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79727index 9ceb03b..62b0b8f 100644
79728--- a/include/asm-generic/local.h
79729+++ b/include/asm-generic/local.h
79730@@ -23,24 +23,37 @@ typedef struct
79731 atomic_long_t a;
79732 } local_t;
79733
79734+typedef struct {
79735+ atomic_long_unchecked_t a;
79736+} local_unchecked_t;
79737+
79738 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79739
79740 #define local_read(l) atomic_long_read(&(l)->a)
79741+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79742 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79743+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79744 #define local_inc(l) atomic_long_inc(&(l)->a)
79745+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79746 #define local_dec(l) atomic_long_dec(&(l)->a)
79747+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79748 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79749+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79750 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79751+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79752
79753 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79754 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79755 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79756 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79757 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79758+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79759 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79760 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79761+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79762
79763 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79764+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79765 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79766 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79767 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79768diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79769index 725612b..9cc513a 100644
79770--- a/include/asm-generic/pgtable-nopmd.h
79771+++ b/include/asm-generic/pgtable-nopmd.h
79772@@ -1,14 +1,19 @@
79773 #ifndef _PGTABLE_NOPMD_H
79774 #define _PGTABLE_NOPMD_H
79775
79776-#ifndef __ASSEMBLY__
79777-
79778 #include <asm-generic/pgtable-nopud.h>
79779
79780-struct mm_struct;
79781-
79782 #define __PAGETABLE_PMD_FOLDED
79783
79784+#define PMD_SHIFT PUD_SHIFT
79785+#define PTRS_PER_PMD 1
79786+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79787+#define PMD_MASK (~(PMD_SIZE-1))
79788+
79789+#ifndef __ASSEMBLY__
79790+
79791+struct mm_struct;
79792+
79793 /*
79794 * Having the pmd type consist of a pud gets the size right, and allows
79795 * us to conceptually access the pud entry that this pmd is folded into
79796@@ -16,11 +21,6 @@ struct mm_struct;
79797 */
79798 typedef struct { pud_t pud; } pmd_t;
79799
79800-#define PMD_SHIFT PUD_SHIFT
79801-#define PTRS_PER_PMD 1
79802-#define PMD_SIZE (1UL << PMD_SHIFT)
79803-#define PMD_MASK (~(PMD_SIZE-1))
79804-
79805 /*
79806 * The "pud_xxx()" functions here are trivial for a folded two-level
79807 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79808diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79809index 810431d..0ec4804f 100644
79810--- a/include/asm-generic/pgtable-nopud.h
79811+++ b/include/asm-generic/pgtable-nopud.h
79812@@ -1,10 +1,15 @@
79813 #ifndef _PGTABLE_NOPUD_H
79814 #define _PGTABLE_NOPUD_H
79815
79816-#ifndef __ASSEMBLY__
79817-
79818 #define __PAGETABLE_PUD_FOLDED
79819
79820+#define PUD_SHIFT PGDIR_SHIFT
79821+#define PTRS_PER_PUD 1
79822+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79823+#define PUD_MASK (~(PUD_SIZE-1))
79824+
79825+#ifndef __ASSEMBLY__
79826+
79827 /*
79828 * Having the pud type consist of a pgd gets the size right, and allows
79829 * us to conceptually access the pgd entry that this pud is folded into
79830@@ -12,11 +17,6 @@
79831 */
79832 typedef struct { pgd_t pgd; } pud_t;
79833
79834-#define PUD_SHIFT PGDIR_SHIFT
79835-#define PTRS_PER_PUD 1
79836-#define PUD_SIZE (1UL << PUD_SHIFT)
79837-#define PUD_MASK (~(PUD_SIZE-1))
79838-
79839 /*
79840 * The "pgd_xxx()" functions here are trivial for a folded two-level
79841 * setup: the pud is never bad, and a pud always exists (as it's folded
79842@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79843 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79844
79845 #define pgd_populate(mm, pgd, pud) do { } while (0)
79846+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79847 /*
79848 * (puds are folded into pgds so this doesn't get actually called,
79849 * but the define is needed for a generic inline function.)
79850diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79851index 177d597..2826237 100644
79852--- a/include/asm-generic/pgtable.h
79853+++ b/include/asm-generic/pgtable.h
79854@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79855 }
79856 #endif /* CONFIG_NUMA_BALANCING */
79857
79858+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79859+#ifdef CONFIG_PAX_KERNEXEC
79860+#error KERNEXEC requires pax_open_kernel
79861+#else
79862+static inline unsigned long pax_open_kernel(void) { return 0; }
79863+#endif
79864+#endif
79865+
79866+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
79867+#ifdef CONFIG_PAX_KERNEXEC
79868+#error KERNEXEC requires pax_close_kernel
79869+#else
79870+static inline unsigned long pax_close_kernel(void) { return 0; }
79871+#endif
79872+#endif
79873+
79874 #endif /* CONFIG_MMU */
79875
79876 #endif /* !__ASSEMBLY__ */
79877diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
79878index 72d8803..cb9749c 100644
79879--- a/include/asm-generic/uaccess.h
79880+++ b/include/asm-generic/uaccess.h
79881@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
79882 return __clear_user(to, n);
79883 }
79884
79885+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
79886+#ifdef CONFIG_PAX_MEMORY_UDEREF
79887+#error UDEREF requires pax_open_userland
79888+#else
79889+static inline unsigned long pax_open_userland(void) { return 0; }
79890+#endif
79891+#endif
79892+
79893+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
79894+#ifdef CONFIG_PAX_MEMORY_UDEREF
79895+#error UDEREF requires pax_close_userland
79896+#else
79897+static inline unsigned long pax_close_userland(void) { return 0; }
79898+#endif
79899+#endif
79900+
79901 #endif /* __ASM_GENERIC_UACCESS_H */
79902diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
79903index bee5d68..8d362d1 100644
79904--- a/include/asm-generic/vmlinux.lds.h
79905+++ b/include/asm-generic/vmlinux.lds.h
79906@@ -234,6 +234,7 @@
79907 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
79908 VMLINUX_SYMBOL(__start_rodata) = .; \
79909 *(.rodata) *(.rodata.*) \
79910+ *(.data..read_only) \
79911 *(__vermagic) /* Kernel version magic */ \
79912 . = ALIGN(8); \
79913 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
79914@@ -726,17 +727,18 @@
79915 * section in the linker script will go there too. @phdr should have
79916 * a leading colon.
79917 *
79918- * Note that this macros defines __per_cpu_load as an absolute symbol.
79919+ * Note that this macros defines per_cpu_load as an absolute symbol.
79920 * If there is no need to put the percpu section at a predetermined
79921 * address, use PERCPU_SECTION.
79922 */
79923 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
79924- VMLINUX_SYMBOL(__per_cpu_load) = .; \
79925- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
79926+ per_cpu_load = .; \
79927+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
79928 - LOAD_OFFSET) { \
79929+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
79930 PERCPU_INPUT(cacheline) \
79931 } phdr \
79932- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
79933+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
79934
79935 /**
79936 * PERCPU_SECTION - define output section for percpu area, simple version
79937diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
79938index 623a59c..1e79ab9 100644
79939--- a/include/crypto/algapi.h
79940+++ b/include/crypto/algapi.h
79941@@ -34,7 +34,7 @@ struct crypto_type {
79942 unsigned int maskclear;
79943 unsigned int maskset;
79944 unsigned int tfmsize;
79945-};
79946+} __do_const;
79947
79948 struct crypto_instance {
79949 struct crypto_alg alg;
79950diff --git a/include/drm/drmP.h b/include/drm/drmP.h
79951index e1b2e8b..2697bd2 100644
79952--- a/include/drm/drmP.h
79953+++ b/include/drm/drmP.h
79954@@ -59,6 +59,7 @@
79955
79956 #include <asm/mman.h>
79957 #include <asm/pgalloc.h>
79958+#include <asm/local.h>
79959 #include <asm/uaccess.h>
79960
79961 #include <uapi/drm/drm.h>
79962@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
79963 * \param cmd command.
79964 * \param arg argument.
79965 */
79966-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
79967+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
79968+ struct drm_file *file_priv);
79969+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
79970 struct drm_file *file_priv);
79971
79972-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79973+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
79974 unsigned long arg);
79975
79976 #define DRM_IOCTL_NR(n) _IOC_NR(n)
79977@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79978 struct drm_ioctl_desc {
79979 unsigned int cmd;
79980 int flags;
79981- drm_ioctl_t *func;
79982+ drm_ioctl_t func;
79983 unsigned int cmd_drv;
79984 const char *name;
79985-};
79986+} __do_const;
79987
79988 /**
79989 * Creates a driver or general drm_ioctl_desc array entry for the given
79990@@ -629,7 +632,8 @@ struct drm_info_list {
79991 int (*show)(struct seq_file*, void*); /** show callback */
79992 u32 driver_features; /**< Required driver features for this entry */
79993 void *data;
79994-};
79995+} __do_const;
79996+typedef struct drm_info_list __no_const drm_info_list_no_const;
79997
79998 /**
79999 * debugfs node structure. This structure represents a debugfs file.
80000@@ -713,7 +717,7 @@ struct drm_device {
80001
80002 /** \name Usage Counters */
80003 /*@{ */
80004- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80005+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80006 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80007 int buf_use; /**< Buffers in use -- cannot alloc */
80008 atomic_t buf_alloc; /**< Buffer allocation in progress */
80009diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80010index 7adbb65..2a1eb1f 100644
80011--- a/include/drm/drm_crtc_helper.h
80012+++ b/include/drm/drm_crtc_helper.h
80013@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
80014 struct drm_connector *connector);
80015 /* disable encoder when not in use - more explicit than dpms off */
80016 void (*disable)(struct drm_encoder *encoder);
80017-};
80018+} __no_const;
80019
80020 /**
80021 * drm_connector_helper_funcs - helper operations for connectors
80022diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80023index 180ad0e..53cdacf 100644
80024--- a/include/drm/i915_pciids.h
80025+++ b/include/drm/i915_pciids.h
80026@@ -37,7 +37,7 @@
80027 */
80028 #define INTEL_VGA_DEVICE(id, info) { \
80029 0x8086, id, \
80030- ~0, ~0, \
80031+ PCI_ANY_ID, PCI_ANY_ID, \
80032 0x030000, 0xff0000, \
80033 (unsigned long) info }
80034
80035diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80036index 72dcbe8..8db58d7 100644
80037--- a/include/drm/ttm/ttm_memory.h
80038+++ b/include/drm/ttm/ttm_memory.h
80039@@ -48,7 +48,7 @@
80040
80041 struct ttm_mem_shrink {
80042 int (*do_shrink) (struct ttm_mem_shrink *);
80043-};
80044+} __no_const;
80045
80046 /**
80047 * struct ttm_mem_global - Global memory accounting structure.
80048diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80049index 49a8284..9643967 100644
80050--- a/include/drm/ttm/ttm_page_alloc.h
80051+++ b/include/drm/ttm/ttm_page_alloc.h
80052@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80053 */
80054 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80055
80056+struct device;
80057 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80058 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80059
80060diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80061index 4b840e8..155d235 100644
80062--- a/include/keys/asymmetric-subtype.h
80063+++ b/include/keys/asymmetric-subtype.h
80064@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80065 /* Verify the signature on a key of this subtype (optional) */
80066 int (*verify_signature)(const struct key *key,
80067 const struct public_key_signature *sig);
80068-};
80069+} __do_const;
80070
80071 /**
80072 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80073diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80074index c1da539..1dcec55 100644
80075--- a/include/linux/atmdev.h
80076+++ b/include/linux/atmdev.h
80077@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80078 #endif
80079
80080 struct k_atm_aal_stats {
80081-#define __HANDLE_ITEM(i) atomic_t i
80082+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80083 __AAL_STAT_ITEMS
80084 #undef __HANDLE_ITEM
80085 };
80086@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80087 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80088 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80089 struct module *owner;
80090-};
80091+} __do_const ;
80092
80093 struct atmphy_ops {
80094 int (*start)(struct atm_dev *dev);
80095diff --git a/include/linux/atomic.h b/include/linux/atomic.h
80096index 5b08a85..60922fb 100644
80097--- a/include/linux/atomic.h
80098+++ b/include/linux/atomic.h
80099@@ -12,7 +12,7 @@
80100 * Atomically adds @a to @v, so long as @v was not already @u.
80101 * Returns non-zero if @v was not @u, and zero otherwise.
80102 */
80103-static inline int atomic_add_unless(atomic_t *v, int a, int u)
80104+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
80105 {
80106 return __atomic_add_unless(v, a, u) != u;
80107 }
80108diff --git a/include/linux/audit.h b/include/linux/audit.h
80109index af84234..4177a40 100644
80110--- a/include/linux/audit.h
80111+++ b/include/linux/audit.h
80112@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
80113 extern unsigned int audit_serial(void);
80114 extern int auditsc_get_stamp(struct audit_context *ctx,
80115 struct timespec *t, unsigned int *serial);
80116-extern int audit_set_loginuid(kuid_t loginuid);
80117+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80118
80119 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80120 {
80121diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80122index 576e463..28fd926 100644
80123--- a/include/linux/binfmts.h
80124+++ b/include/linux/binfmts.h
80125@@ -44,7 +44,7 @@ struct linux_binprm {
80126 unsigned interp_flags;
80127 unsigned interp_data;
80128 unsigned long loader, exec;
80129-};
80130+} __randomize_layout;
80131
80132 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80133 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80134@@ -77,8 +77,10 @@ struct linux_binfmt {
80135 int (*load_binary)(struct linux_binprm *);
80136 int (*load_shlib)(struct file *);
80137 int (*core_dump)(struct coredump_params *cprm);
80138+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80139+ void (*handle_mmap)(struct file *);
80140 unsigned long min_coredump; /* minimal dump size */
80141-};
80142+} __do_const __randomize_layout;
80143
80144 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80145
80146diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80147index 202e403..16e6617 100644
80148--- a/include/linux/bitmap.h
80149+++ b/include/linux/bitmap.h
80150@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80151 return __bitmap_full(src, nbits);
80152 }
80153
80154-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80155+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80156 {
80157 if (small_const_nbits(nbits))
80158 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80159diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80160index 5d858e0..336c1d9 100644
80161--- a/include/linux/bitops.h
80162+++ b/include/linux/bitops.h
80163@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80164 * @word: value to rotate
80165 * @shift: bits to roll
80166 */
80167-static inline __u32 rol32(__u32 word, unsigned int shift)
80168+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80169 {
80170 return (word << shift) | (word >> (32 - shift));
80171 }
80172@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80173 * @word: value to rotate
80174 * @shift: bits to roll
80175 */
80176-static inline __u32 ror32(__u32 word, unsigned int shift)
80177+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80178 {
80179 return (word >> shift) | (word << (32 - shift));
80180 }
80181@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80182 return (__s32)(value << shift) >> shift;
80183 }
80184
80185-static inline unsigned fls_long(unsigned long l)
80186+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80187 {
80188 if (sizeof(l) == 4)
80189 return fls(l);
80190diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80191index 92f4b4b..483d537 100644
80192--- a/include/linux/blkdev.h
80193+++ b/include/linux/blkdev.h
80194@@ -1613,7 +1613,7 @@ struct block_device_operations {
80195 /* this callback is with swap_lock and sometimes page table lock held */
80196 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80197 struct module *owner;
80198-};
80199+} __do_const;
80200
80201 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80202 unsigned long);
80203diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80204index afc1343..9735539 100644
80205--- a/include/linux/blktrace_api.h
80206+++ b/include/linux/blktrace_api.h
80207@@ -25,7 +25,7 @@ struct blk_trace {
80208 struct dentry *dropped_file;
80209 struct dentry *msg_file;
80210 struct list_head running_list;
80211- atomic_t dropped;
80212+ atomic_unchecked_t dropped;
80213 };
80214
80215 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80216diff --git a/include/linux/cache.h b/include/linux/cache.h
80217index 17e7e82..1d7da26 100644
80218--- a/include/linux/cache.h
80219+++ b/include/linux/cache.h
80220@@ -16,6 +16,14 @@
80221 #define __read_mostly
80222 #endif
80223
80224+#ifndef __read_only
80225+#ifdef CONFIG_PAX_KERNEXEC
80226+#error KERNEXEC requires __read_only
80227+#else
80228+#define __read_only __read_mostly
80229+#endif
80230+#endif
80231+
80232 #ifndef ____cacheline_aligned
80233 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80234 #endif
80235diff --git a/include/linux/capability.h b/include/linux/capability.h
80236index aa93e5e..985a1b0 100644
80237--- a/include/linux/capability.h
80238+++ b/include/linux/capability.h
80239@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80240 extern bool capable(int cap);
80241 extern bool ns_capable(struct user_namespace *ns, int cap);
80242 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80243+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80244 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80245+extern bool capable_nolog(int cap);
80246+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80247
80248 /* audit system wants to get cap info from files as well */
80249 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80250
80251+extern int is_privileged_binary(const struct dentry *dentry);
80252+
80253 #endif /* !_LINUX_CAPABILITY_H */
80254diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80255index 8609d57..86e4d79 100644
80256--- a/include/linux/cdrom.h
80257+++ b/include/linux/cdrom.h
80258@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80259
80260 /* driver specifications */
80261 const int capability; /* capability flags */
80262- int n_minors; /* number of active minor devices */
80263 /* handle uniform packets for scsi type devices (scsi,atapi) */
80264 int (*generic_packet) (struct cdrom_device_info *,
80265 struct packet_command *);
80266diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80267index 4ce9056..86caac6 100644
80268--- a/include/linux/cleancache.h
80269+++ b/include/linux/cleancache.h
80270@@ -31,7 +31,7 @@ struct cleancache_ops {
80271 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80272 void (*invalidate_inode)(int, struct cleancache_filekey);
80273 void (*invalidate_fs)(int);
80274-};
80275+} __no_const;
80276
80277 extern struct cleancache_ops *
80278 cleancache_register_ops(struct cleancache_ops *ops);
80279diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80280index d936409..ce9f842 100644
80281--- a/include/linux/clk-provider.h
80282+++ b/include/linux/clk-provider.h
80283@@ -191,6 +191,7 @@ struct clk_ops {
80284 void (*init)(struct clk_hw *hw);
80285 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80286 };
80287+typedef struct clk_ops __no_const clk_ops_no_const;
80288
80289 /**
80290 * struct clk_init_data - holds init data that's common to all clocks and is
80291diff --git a/include/linux/compat.h b/include/linux/compat.h
80292index 7450ca2..a824b81 100644
80293--- a/include/linux/compat.h
80294+++ b/include/linux/compat.h
80295@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80296 compat_size_t __user *len_ptr);
80297
80298 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80299-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80300+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80301 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80302 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80303 compat_ssize_t msgsz, int msgflg);
80304@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80305 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80306 compat_ulong_t addr, compat_ulong_t data);
80307 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80308- compat_long_t addr, compat_long_t data);
80309+ compat_ulong_t addr, compat_ulong_t data);
80310
80311 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80312 /*
80313diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80314index d1a5582..4424efa 100644
80315--- a/include/linux/compiler-gcc4.h
80316+++ b/include/linux/compiler-gcc4.h
80317@@ -39,9 +39,34 @@
80318 # define __compiletime_warning(message) __attribute__((warning(message)))
80319 # define __compiletime_error(message) __attribute__((error(message)))
80320 #endif /* __CHECKER__ */
80321+
80322+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80323+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80324+#define __bos0(ptr) __bos((ptr), 0)
80325+#define __bos1(ptr) __bos((ptr), 1)
80326 #endif /* GCC_VERSION >= 40300 */
80327
80328 #if GCC_VERSION >= 40500
80329+
80330+#ifdef RANDSTRUCT_PLUGIN
80331+#define __randomize_layout __attribute__((randomize_layout))
80332+#define __no_randomize_layout __attribute__((no_randomize_layout))
80333+#endif
80334+
80335+#ifdef CONSTIFY_PLUGIN
80336+#define __no_const __attribute__((no_const))
80337+#define __do_const __attribute__((do_const))
80338+#endif
80339+
80340+#ifdef SIZE_OVERFLOW_PLUGIN
80341+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80342+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80343+#endif
80344+
80345+#ifdef LATENT_ENTROPY_PLUGIN
80346+#define __latent_entropy __attribute__((latent_entropy))
80347+#endif
80348+
80349 /*
80350 * Mark a position in code as unreachable. This can be used to
80351 * suppress control flow warnings after asm blocks that transfer
80352diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80353index c8c5659..d09f2ad 100644
80354--- a/include/linux/compiler-gcc5.h
80355+++ b/include/linux/compiler-gcc5.h
80356@@ -28,6 +28,28 @@
80357 # define __compiletime_error(message) __attribute__((error(message)))
80358 #endif /* __CHECKER__ */
80359
80360+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80361+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80362+#define __bos0(ptr) __bos((ptr), 0)
80363+#define __bos1(ptr) __bos((ptr), 1)
80364+
80365+#ifdef CONSTIFY_PLUGIN
80366+#error not yet
80367+#define __no_const __attribute__((no_const))
80368+#define __do_const __attribute__((do_const))
80369+#endif
80370+
80371+#ifdef SIZE_OVERFLOW_PLUGIN
80372+#error not yet
80373+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80374+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80375+#endif
80376+
80377+#ifdef LATENT_ENTROPY_PLUGIN
80378+#error not yet
80379+#define __latent_entropy __attribute__((latent_entropy))
80380+#endif
80381+
80382 /*
80383 * Mark a position in code as unreachable. This can be used to
80384 * suppress control flow warnings after asm blocks that transfer
80385diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80386index fa6a314..752a6ef 100644
80387--- a/include/linux/compiler.h
80388+++ b/include/linux/compiler.h
80389@@ -5,11 +5,14 @@
80390
80391 #ifdef __CHECKER__
80392 # define __user __attribute__((noderef, address_space(1)))
80393+# define __force_user __force __user
80394 # define __kernel __attribute__((address_space(0)))
80395+# define __force_kernel __force __kernel
80396 # define __safe __attribute__((safe))
80397 # define __force __attribute__((force))
80398 # define __nocast __attribute__((nocast))
80399 # define __iomem __attribute__((noderef, address_space(2)))
80400+# define __force_iomem __force __iomem
80401 # define __must_hold(x) __attribute__((context(x,1,1)))
80402 # define __acquires(x) __attribute__((context(x,0,1)))
80403 # define __releases(x) __attribute__((context(x,1,0)))
80404@@ -17,20 +20,37 @@
80405 # define __release(x) __context__(x,-1)
80406 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80407 # define __percpu __attribute__((noderef, address_space(3)))
80408+# define __force_percpu __force __percpu
80409 #ifdef CONFIG_SPARSE_RCU_POINTER
80410 # define __rcu __attribute__((noderef, address_space(4)))
80411+# define __force_rcu __force __rcu
80412 #else
80413 # define __rcu
80414+# define __force_rcu
80415 #endif
80416 extern void __chk_user_ptr(const volatile void __user *);
80417 extern void __chk_io_ptr(const volatile void __iomem *);
80418 #else
80419-# define __user
80420-# define __kernel
80421+# ifdef CHECKER_PLUGIN
80422+//# define __user
80423+//# define __force_user
80424+//# define __kernel
80425+//# define __force_kernel
80426+# else
80427+# ifdef STRUCTLEAK_PLUGIN
80428+# define __user __attribute__((user))
80429+# else
80430+# define __user
80431+# endif
80432+# define __force_user
80433+# define __kernel
80434+# define __force_kernel
80435+# endif
80436 # define __safe
80437 # define __force
80438 # define __nocast
80439 # define __iomem
80440+# define __force_iomem
80441 # define __chk_user_ptr(x) (void)0
80442 # define __chk_io_ptr(x) (void)0
80443 # define __builtin_warning(x, y...) (1)
80444@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80445 # define __release(x) (void)0
80446 # define __cond_lock(x,c) (c)
80447 # define __percpu
80448+# define __force_percpu
80449 # define __rcu
80450+# define __force_rcu
80451 #endif
80452
80453 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80454@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80455 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80456 {
80457 switch (size) {
80458- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80459- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80460- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80461+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80462+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80463+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80464 #ifdef CONFIG_64BIT
80465- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80466+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80467 #endif
80468 default:
80469 barrier();
80470- __builtin_memcpy((void *)res, (const void *)p, size);
80471+ __builtin_memcpy(res, (const void *)p, size);
80472 data_access_exceeds_word_size();
80473 barrier();
80474 }
80475 }
80476
80477-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80478+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80479 {
80480 switch (size) {
80481- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80482- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80483- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80484+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80485+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80486+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
80487 #ifdef CONFIG_64BIT
80488- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
80489+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
80490 #endif
80491 default:
80492 barrier();
80493- __builtin_memcpy((void *)p, (const void *)res, size);
80494+ __builtin_memcpy((void *)p, res, size);
80495 data_access_exceeds_word_size();
80496 barrier();
80497 }
80498@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80499 # define __attribute_const__ /* unimplemented */
80500 #endif
80501
80502+#ifndef __randomize_layout
80503+# define __randomize_layout
80504+#endif
80505+
80506+#ifndef __no_randomize_layout
80507+# define __no_randomize_layout
80508+#endif
80509+
80510+#ifndef __no_const
80511+# define __no_const
80512+#endif
80513+
80514+#ifndef __do_const
80515+# define __do_const
80516+#endif
80517+
80518+#ifndef __size_overflow
80519+# define __size_overflow(...)
80520+#endif
80521+
80522+#ifndef __intentional_overflow
80523+# define __intentional_overflow(...)
80524+#endif
80525+
80526+#ifndef __latent_entropy
80527+# define __latent_entropy
80528+#endif
80529+
80530 /*
80531 * Tell gcc if a function is cold. The compiler will assume any path
80532 * directly leading to the call is unlikely.
80533@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80534 #define __cold
80535 #endif
80536
80537+#ifndef __alloc_size
80538+#define __alloc_size(...)
80539+#endif
80540+
80541+#ifndef __bos
80542+#define __bos(ptr, arg)
80543+#endif
80544+
80545+#ifndef __bos0
80546+#define __bos0(ptr)
80547+#endif
80548+
80549+#ifndef __bos1
80550+#define __bos1(ptr)
80551+#endif
80552+
80553 /* Simple shorthand for a section definition */
80554 #ifndef __section
80555 # define __section(S) __attribute__ ((__section__(#S)))
80556@@ -462,8 +528,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80557 */
80558 #define __ACCESS_ONCE(x) ({ \
80559 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
80560- (volatile typeof(x) *)&(x); })
80561+ (volatile const typeof(x) *)&(x); })
80562 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
80563+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80564
80565 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80566 #ifdef CONFIG_KPROBES
80567diff --git a/include/linux/completion.h b/include/linux/completion.h
80568index 5d5aaae..0ea9b84 100644
80569--- a/include/linux/completion.h
80570+++ b/include/linux/completion.h
80571@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80572
80573 extern void wait_for_completion(struct completion *);
80574 extern void wait_for_completion_io(struct completion *);
80575-extern int wait_for_completion_interruptible(struct completion *x);
80576-extern int wait_for_completion_killable(struct completion *x);
80577+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80578+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80579 extern unsigned long wait_for_completion_timeout(struct completion *x,
80580- unsigned long timeout);
80581+ unsigned long timeout) __intentional_overflow(-1);
80582 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80583- unsigned long timeout);
80584+ unsigned long timeout) __intentional_overflow(-1);
80585 extern long wait_for_completion_interruptible_timeout(
80586- struct completion *x, unsigned long timeout);
80587+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80588 extern long wait_for_completion_killable_timeout(
80589- struct completion *x, unsigned long timeout);
80590+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80591 extern bool try_wait_for_completion(struct completion *x);
80592 extern bool completion_done(struct completion *x);
80593
80594diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80595index 34025df..d94bbbc 100644
80596--- a/include/linux/configfs.h
80597+++ b/include/linux/configfs.h
80598@@ -125,7 +125,7 @@ struct configfs_attribute {
80599 const char *ca_name;
80600 struct module *ca_owner;
80601 umode_t ca_mode;
80602-};
80603+} __do_const;
80604
80605 /*
80606 * Users often need to create attribute structures for their configurable
80607diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80608index 4d078ce..c970f4d 100644
80609--- a/include/linux/cpufreq.h
80610+++ b/include/linux/cpufreq.h
80611@@ -206,6 +206,7 @@ struct global_attr {
80612 ssize_t (*store)(struct kobject *a, struct attribute *b,
80613 const char *c, size_t count);
80614 };
80615+typedef struct global_attr __no_const global_attr_no_const;
80616
80617 #define define_one_global_ro(_name) \
80618 static struct global_attr _name = \
80619@@ -277,7 +278,7 @@ struct cpufreq_driver {
80620 bool boost_supported;
80621 bool boost_enabled;
80622 int (*set_boost)(int state);
80623-};
80624+} __do_const;
80625
80626 /* flags */
80627 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80628diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80629index ab70f3b..3ef7771 100644
80630--- a/include/linux/cpuidle.h
80631+++ b/include/linux/cpuidle.h
80632@@ -50,7 +50,8 @@ struct cpuidle_state {
80633 int index);
80634
80635 int (*enter_dead) (struct cpuidle_device *dev, int index);
80636-};
80637+} __do_const;
80638+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80639
80640 /* Idle State Flags */
80641 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
80642@@ -206,7 +207,7 @@ struct cpuidle_governor {
80643 void (*reflect) (struct cpuidle_device *dev, int index);
80644
80645 struct module *owner;
80646-};
80647+} __do_const;
80648
80649 #ifdef CONFIG_CPU_IDLE
80650 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80651diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80652index b950e9d..63810aa 100644
80653--- a/include/linux/cpumask.h
80654+++ b/include/linux/cpumask.h
80655@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80656 }
80657
80658 /* Valid inputs for n are -1 and 0. */
80659-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80660+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80661 {
80662 return n+1;
80663 }
80664
80665-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80666+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80667 {
80668 return n+1;
80669 }
80670
80671-static inline unsigned int cpumask_next_and(int n,
80672+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80673 const struct cpumask *srcp,
80674 const struct cpumask *andp)
80675 {
80676@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80677 *
80678 * Returns >= nr_cpu_ids if no further cpus set.
80679 */
80680-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80681+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80682 {
80683 /* -1 is a legal arg here. */
80684 if (n != -1)
80685@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80686 *
80687 * Returns >= nr_cpu_ids if no further cpus unset.
80688 */
80689-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80690+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80691 {
80692 /* -1 is a legal arg here. */
80693 if (n != -1)
80694@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80695 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80696 }
80697
80698-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80699+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80700 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80701 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80702
80703@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
80704 * cpumask_weight - Count of bits in *srcp
80705 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
80706 */
80707-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
80708+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
80709 {
80710 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
80711 }
80712diff --git a/include/linux/cred.h b/include/linux/cred.h
80713index 2fb2ca2..d6a3340 100644
80714--- a/include/linux/cred.h
80715+++ b/include/linux/cred.h
80716@@ -35,7 +35,7 @@ struct group_info {
80717 int nblocks;
80718 kgid_t small_block[NGROUPS_SMALL];
80719 kgid_t *blocks[0];
80720-};
80721+} __randomize_layout;
80722
80723 /**
80724 * get_group_info - Get a reference to a group info structure
80725@@ -137,7 +137,7 @@ struct cred {
80726 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80727 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80728 struct rcu_head rcu; /* RCU deletion hook */
80729-};
80730+} __randomize_layout;
80731
80732 extern void __put_cred(struct cred *);
80733 extern void exit_creds(struct task_struct *);
80734@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80735 static inline void validate_process_creds(void)
80736 {
80737 }
80738+static inline void validate_task_creds(struct task_struct *task)
80739+{
80740+}
80741 #endif
80742
80743 /**
80744@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
80745
80746 #define task_uid(task) (task_cred_xxx((task), uid))
80747 #define task_euid(task) (task_cred_xxx((task), euid))
80748+#define task_securebits(task) (task_cred_xxx((task), securebits))
80749
80750 #define current_cred_xxx(xxx) \
80751 ({ \
80752diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80753index 9c8776d..8c526c2 100644
80754--- a/include/linux/crypto.h
80755+++ b/include/linux/crypto.h
80756@@ -626,7 +626,7 @@ struct cipher_tfm {
80757 const u8 *key, unsigned int keylen);
80758 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80759 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80760-};
80761+} __no_const;
80762
80763 struct hash_tfm {
80764 int (*init)(struct hash_desc *desc);
80765@@ -647,13 +647,13 @@ struct compress_tfm {
80766 int (*cot_decompress)(struct crypto_tfm *tfm,
80767 const u8 *src, unsigned int slen,
80768 u8 *dst, unsigned int *dlen);
80769-};
80770+} __no_const;
80771
80772 struct rng_tfm {
80773 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80774 unsigned int dlen);
80775 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80776-};
80777+} __no_const;
80778
80779 #define crt_ablkcipher crt_u.ablkcipher
80780 #define crt_aead crt_u.aead
80781diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80782index 653589e..4ef254a 100644
80783--- a/include/linux/ctype.h
80784+++ b/include/linux/ctype.h
80785@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80786 * Fast implementation of tolower() for internal usage. Do not use in your
80787 * code.
80788 */
80789-static inline char _tolower(const char c)
80790+static inline unsigned char _tolower(const unsigned char c)
80791 {
80792 return c | 0x20;
80793 }
80794diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80795index 5a81398..6bbee30 100644
80796--- a/include/linux/dcache.h
80797+++ b/include/linux/dcache.h
80798@@ -123,6 +123,9 @@ struct dentry {
80799 unsigned long d_time; /* used by d_revalidate */
80800 void *d_fsdata; /* fs-specific data */
80801
80802+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80803+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
80804+#endif
80805 struct list_head d_lru; /* LRU list */
80806 struct list_head d_child; /* child of parent list */
80807 struct list_head d_subdirs; /* our children */
80808@@ -133,7 +136,7 @@ struct dentry {
80809 struct hlist_node d_alias; /* inode alias list */
80810 struct rcu_head d_rcu;
80811 } d_u;
80812-};
80813+} __randomize_layout;
80814
80815 /*
80816 * dentry->d_lock spinlock nesting subclasses:
80817diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80818index 7925bf0..d5143d2 100644
80819--- a/include/linux/decompress/mm.h
80820+++ b/include/linux/decompress/mm.h
80821@@ -77,7 +77,7 @@ static void free(void *where)
80822 * warnings when not needed (indeed large_malloc / large_free are not
80823 * needed by inflate */
80824
80825-#define malloc(a) kmalloc(a, GFP_KERNEL)
80826+#define malloc(a) kmalloc((a), GFP_KERNEL)
80827 #define free(a) kfree(a)
80828
80829 #define large_malloc(a) vmalloc(a)
80830diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80831index ce447f0..83c66bd 100644
80832--- a/include/linux/devfreq.h
80833+++ b/include/linux/devfreq.h
80834@@ -114,7 +114,7 @@ struct devfreq_governor {
80835 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80836 int (*event_handler)(struct devfreq *devfreq,
80837 unsigned int event, void *data);
80838-};
80839+} __do_const;
80840
80841 /**
80842 * struct devfreq - Device devfreq structure
80843diff --git a/include/linux/device.h b/include/linux/device.h
80844index fb50673..ec0b35b 100644
80845--- a/include/linux/device.h
80846+++ b/include/linux/device.h
80847@@ -311,7 +311,7 @@ struct subsys_interface {
80848 struct list_head node;
80849 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80850 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80851-};
80852+} __do_const;
80853
80854 int subsys_interface_register(struct subsys_interface *sif);
80855 void subsys_interface_unregister(struct subsys_interface *sif);
80856@@ -507,7 +507,7 @@ struct device_type {
80857 void (*release)(struct device *dev);
80858
80859 const struct dev_pm_ops *pm;
80860-};
80861+} __do_const;
80862
80863 /* interface for exporting device attributes */
80864 struct device_attribute {
80865@@ -517,11 +517,12 @@ struct device_attribute {
80866 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80867 const char *buf, size_t count);
80868 };
80869+typedef struct device_attribute __no_const device_attribute_no_const;
80870
80871 struct dev_ext_attribute {
80872 struct device_attribute attr;
80873 void *var;
80874-};
80875+} __do_const;
80876
80877 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80878 char *buf);
80879diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80880index c3007cb..43efc8c 100644
80881--- a/include/linux/dma-mapping.h
80882+++ b/include/linux/dma-mapping.h
80883@@ -60,7 +60,7 @@ struct dma_map_ops {
80884 u64 (*get_required_mask)(struct device *dev);
80885 #endif
80886 int is_phys;
80887-};
80888+} __do_const;
80889
80890 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80891
80892diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80893index 40cd75e..38572a9 100644
80894--- a/include/linux/dmaengine.h
80895+++ b/include/linux/dmaengine.h
80896@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
80897 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80898 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80899
80900-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80901+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80902 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80903-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80904+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80905 struct dma_pinned_list *pinned_list, struct page *page,
80906 unsigned int offset, size_t len);
80907
80908diff --git a/include/linux/efi.h b/include/linux/efi.h
80909index 0238d61..34a758f 100644
80910--- a/include/linux/efi.h
80911+++ b/include/linux/efi.h
80912@@ -1054,6 +1054,7 @@ struct efivar_operations {
80913 efi_set_variable_nonblocking_t *set_variable_nonblocking;
80914 efi_query_variable_store_t *query_variable_store;
80915 };
80916+typedef struct efivar_operations __no_const efivar_operations_no_const;
80917
80918 struct efivars {
80919 /*
80920diff --git a/include/linux/elf.h b/include/linux/elf.h
80921index 20fa8d8..3d0dd18 100644
80922--- a/include/linux/elf.h
80923+++ b/include/linux/elf.h
80924@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
80925 #define elf_note elf32_note
80926 #define elf_addr_t Elf32_Off
80927 #define Elf_Half Elf32_Half
80928+#define elf_dyn Elf32_Dyn
80929
80930 #else
80931
80932@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
80933 #define elf_note elf64_note
80934 #define elf_addr_t Elf64_Off
80935 #define Elf_Half Elf64_Half
80936+#define elf_dyn Elf64_Dyn
80937
80938 #endif
80939
80940diff --git a/include/linux/err.h b/include/linux/err.h
80941index a729120..6ede2c9 100644
80942--- a/include/linux/err.h
80943+++ b/include/linux/err.h
80944@@ -20,12 +20,12 @@
80945
80946 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
80947
80948-static inline void * __must_check ERR_PTR(long error)
80949+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
80950 {
80951 return (void *) error;
80952 }
80953
80954-static inline long __must_check PTR_ERR(__force const void *ptr)
80955+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
80956 {
80957 return (long) ptr;
80958 }
80959diff --git a/include/linux/extcon.h b/include/linux/extcon.h
80960index 36f49c4..a2a1f4c 100644
80961--- a/include/linux/extcon.h
80962+++ b/include/linux/extcon.h
80963@@ -135,7 +135,7 @@ struct extcon_dev {
80964 /* /sys/class/extcon/.../mutually_exclusive/... */
80965 struct attribute_group attr_g_muex;
80966 struct attribute **attrs_muex;
80967- struct device_attribute *d_attrs_muex;
80968+ device_attribute_no_const *d_attrs_muex;
80969 };
80970
80971 /**
80972diff --git a/include/linux/fb.h b/include/linux/fb.h
80973index 09bb7a1..d98870a 100644
80974--- a/include/linux/fb.h
80975+++ b/include/linux/fb.h
80976@@ -305,7 +305,7 @@ struct fb_ops {
80977 /* called at KDB enter and leave time to prepare the console */
80978 int (*fb_debug_enter)(struct fb_info *info);
80979 int (*fb_debug_leave)(struct fb_info *info);
80980-};
80981+} __do_const;
80982
80983 #ifdef CONFIG_FB_TILEBLITTING
80984 #define FB_TILE_CURSOR_NONE 0
80985diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
80986index 230f87b..1fd0485 100644
80987--- a/include/linux/fdtable.h
80988+++ b/include/linux/fdtable.h
80989@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
80990 void put_files_struct(struct files_struct *fs);
80991 void reset_files_struct(struct files_struct *);
80992 int unshare_files(struct files_struct **);
80993-struct files_struct *dup_fd(struct files_struct *, int *);
80994+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
80995 void do_close_on_exec(struct files_struct *);
80996 int iterate_fd(struct files_struct *, unsigned,
80997 int (*)(const void *, struct file *, unsigned),
80998diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
80999index 8293262..2b3b8bd 100644
81000--- a/include/linux/frontswap.h
81001+++ b/include/linux/frontswap.h
81002@@ -11,7 +11,7 @@ struct frontswap_ops {
81003 int (*load)(unsigned, pgoff_t, struct page *);
81004 void (*invalidate_page)(unsigned, pgoff_t);
81005 void (*invalidate_area)(unsigned);
81006-};
81007+} __no_const;
81008
81009 extern bool frontswap_enabled;
81010 extern struct frontswap_ops *
81011diff --git a/include/linux/fs.h b/include/linux/fs.h
81012index 42efe13..72d42ee 100644
81013--- a/include/linux/fs.h
81014+++ b/include/linux/fs.h
81015@@ -413,7 +413,7 @@ struct address_space {
81016 spinlock_t private_lock; /* for use by the address_space */
81017 struct list_head private_list; /* ditto */
81018 void *private_data; /* ditto */
81019-} __attribute__((aligned(sizeof(long))));
81020+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81021 /*
81022 * On most architectures that alignment is already the case; but
81023 * must be enforced here for CRIS, to let the least significant bit
81024@@ -456,7 +456,7 @@ struct block_device {
81025 int bd_fsfreeze_count;
81026 /* Mutex for freeze */
81027 struct mutex bd_fsfreeze_mutex;
81028-};
81029+} __randomize_layout;
81030
81031 /*
81032 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81033@@ -642,7 +642,7 @@ struct inode {
81034 #endif
81035
81036 void *i_private; /* fs or device private pointer */
81037-};
81038+} __randomize_layout;
81039
81040 static inline int inode_unhashed(struct inode *inode)
81041 {
81042@@ -837,7 +837,7 @@ struct file {
81043 struct list_head f_tfile_llink;
81044 #endif /* #ifdef CONFIG_EPOLL */
81045 struct address_space *f_mapping;
81046-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81047+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81048
81049 struct file_handle {
81050 __u32 handle_bytes;
81051@@ -962,7 +962,7 @@ struct file_lock {
81052 int state; /* state of grant or error if -ve */
81053 } afs;
81054 } fl_u;
81055-};
81056+} __randomize_layout;
81057
81058 /* The following constant reflects the upper bound of the file/locking space */
81059 #ifndef OFFSET_MAX
81060@@ -1305,7 +1305,7 @@ struct super_block {
81061 * Indicates how deep in a filesystem stack this SB is
81062 */
81063 int s_stack_depth;
81064-};
81065+} __randomize_layout;
81066
81067 extern struct timespec current_fs_time(struct super_block *sb);
81068
81069@@ -1536,7 +1536,8 @@ struct file_operations {
81070 long (*fallocate)(struct file *file, int mode, loff_t offset,
81071 loff_t len);
81072 void (*show_fdinfo)(struct seq_file *m, struct file *f);
81073-};
81074+} __do_const __randomize_layout;
81075+typedef struct file_operations __no_const file_operations_no_const;
81076
81077 struct inode_operations {
81078 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81079@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
81080 return !IS_DEADDIR(inode);
81081 }
81082
81083+static inline bool is_sidechannel_device(const struct inode *inode)
81084+{
81085+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81086+ umode_t mode = inode->i_mode;
81087+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81088+#else
81089+ return false;
81090+#endif
81091+}
81092+
81093 #endif /* _LINUX_FS_H */
81094diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81095index 0efc3e6..fd23610 100644
81096--- a/include/linux/fs_struct.h
81097+++ b/include/linux/fs_struct.h
81098@@ -6,13 +6,13 @@
81099 #include <linux/seqlock.h>
81100
81101 struct fs_struct {
81102- int users;
81103+ atomic_t users;
81104 spinlock_t lock;
81105 seqcount_t seq;
81106 int umask;
81107 int in_exec;
81108 struct path root, pwd;
81109-};
81110+} __randomize_layout;
81111
81112 extern struct kmem_cache *fs_cachep;
81113
81114diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81115index 7714849..a4a5c7a 100644
81116--- a/include/linux/fscache-cache.h
81117+++ b/include/linux/fscache-cache.h
81118@@ -113,7 +113,7 @@ struct fscache_operation {
81119 fscache_operation_release_t release;
81120 };
81121
81122-extern atomic_t fscache_op_debug_id;
81123+extern atomic_unchecked_t fscache_op_debug_id;
81124 extern void fscache_op_work_func(struct work_struct *work);
81125
81126 extern void fscache_enqueue_operation(struct fscache_operation *);
81127@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81128 INIT_WORK(&op->work, fscache_op_work_func);
81129 atomic_set(&op->usage, 1);
81130 op->state = FSCACHE_OP_ST_INITIALISED;
81131- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81132+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81133 op->processor = processor;
81134 op->release = release;
81135 INIT_LIST_HEAD(&op->pend_link);
81136diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81137index 115bb81..e7b812b 100644
81138--- a/include/linux/fscache.h
81139+++ b/include/linux/fscache.h
81140@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81141 * - this is mandatory for any object that may have data
81142 */
81143 void (*now_uncached)(void *cookie_netfs_data);
81144-};
81145+} __do_const;
81146
81147 /*
81148 * fscache cached network filesystem type
81149diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81150index 7ee1774..72505b8 100644
81151--- a/include/linux/fsnotify.h
81152+++ b/include/linux/fsnotify.h
81153@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81154 struct inode *inode = file_inode(file);
81155 __u32 mask = FS_ACCESS;
81156
81157+ if (is_sidechannel_device(inode))
81158+ return;
81159+
81160 if (S_ISDIR(inode->i_mode))
81161 mask |= FS_ISDIR;
81162
81163@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81164 struct inode *inode = file_inode(file);
81165 __u32 mask = FS_MODIFY;
81166
81167+ if (is_sidechannel_device(inode))
81168+ return;
81169+
81170 if (S_ISDIR(inode->i_mode))
81171 mask |= FS_ISDIR;
81172
81173@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81174 */
81175 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81176 {
81177- return kstrdup(name, GFP_KERNEL);
81178+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81179 }
81180
81181 /*
81182diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81183index ec274e0..e678159 100644
81184--- a/include/linux/genhd.h
81185+++ b/include/linux/genhd.h
81186@@ -194,7 +194,7 @@ struct gendisk {
81187 struct kobject *slave_dir;
81188
81189 struct timer_rand_state *random;
81190- atomic_t sync_io; /* RAID */
81191+ atomic_unchecked_t sync_io; /* RAID */
81192 struct disk_events *ev;
81193 #ifdef CONFIG_BLK_DEV_INTEGRITY
81194 struct blk_integrity *integrity;
81195@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81196 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81197
81198 /* drivers/char/random.c */
81199-extern void add_disk_randomness(struct gendisk *disk);
81200+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81201 extern void rand_initialize_disk(struct gendisk *disk);
81202
81203 static inline sector_t get_start_sect(struct block_device *bdev)
81204diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81205index 667c311..abac2a7 100644
81206--- a/include/linux/genl_magic_func.h
81207+++ b/include/linux/genl_magic_func.h
81208@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81209 },
81210
81211 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81212-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81213+static struct genl_ops ZZZ_genl_ops[] = {
81214 #include GENL_MAGIC_INCLUDE_FILE
81215 };
81216
81217diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81218index b840e3b..aeaeef9 100644
81219--- a/include/linux/gfp.h
81220+++ b/include/linux/gfp.h
81221@@ -34,6 +34,13 @@ struct vm_area_struct;
81222 #define ___GFP_NO_KSWAPD 0x400000u
81223 #define ___GFP_OTHER_NODE 0x800000u
81224 #define ___GFP_WRITE 0x1000000u
81225+
81226+#ifdef CONFIG_PAX_USERCOPY_SLABS
81227+#define ___GFP_USERCOPY 0x2000000u
81228+#else
81229+#define ___GFP_USERCOPY 0
81230+#endif
81231+
81232 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81233
81234 /*
81235@@ -90,6 +97,7 @@ struct vm_area_struct;
81236 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81237 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81238 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81239+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81240
81241 /*
81242 * This may seem redundant, but it's a way of annotating false positives vs.
81243@@ -97,7 +105,7 @@ struct vm_area_struct;
81244 */
81245 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81246
81247-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81248+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81249 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81250
81251 /* This equals 0, but use constants in case they ever change */
81252@@ -152,6 +160,8 @@ struct vm_area_struct;
81253 /* 4GB DMA on some platforms */
81254 #define GFP_DMA32 __GFP_DMA32
81255
81256+#define GFP_USERCOPY __GFP_USERCOPY
81257+
81258 /* Convert GFP flags to their corresponding migrate type */
81259 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81260 {
81261diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81262new file mode 100644
81263index 0000000..91858e4
81264--- /dev/null
81265+++ b/include/linux/gracl.h
81266@@ -0,0 +1,342 @@
81267+#ifndef GR_ACL_H
81268+#define GR_ACL_H
81269+
81270+#include <linux/grdefs.h>
81271+#include <linux/resource.h>
81272+#include <linux/capability.h>
81273+#include <linux/dcache.h>
81274+#include <asm/resource.h>
81275+
81276+/* Major status information */
81277+
81278+#define GR_VERSION "grsecurity 3.1"
81279+#define GRSECURITY_VERSION 0x3100
81280+
81281+enum {
81282+ GR_SHUTDOWN = 0,
81283+ GR_ENABLE = 1,
81284+ GR_SPROLE = 2,
81285+ GR_OLDRELOAD = 3,
81286+ GR_SEGVMOD = 4,
81287+ GR_STATUS = 5,
81288+ GR_UNSPROLE = 6,
81289+ GR_PASSSET = 7,
81290+ GR_SPROLEPAM = 8,
81291+ GR_RELOAD = 9,
81292+};
81293+
81294+/* Password setup definitions
81295+ * kernel/grhash.c */
81296+enum {
81297+ GR_PW_LEN = 128,
81298+ GR_SALT_LEN = 16,
81299+ GR_SHA_LEN = 32,
81300+};
81301+
81302+enum {
81303+ GR_SPROLE_LEN = 64,
81304+};
81305+
81306+enum {
81307+ GR_NO_GLOB = 0,
81308+ GR_REG_GLOB,
81309+ GR_CREATE_GLOB
81310+};
81311+
81312+#define GR_NLIMITS 32
81313+
81314+/* Begin Data Structures */
81315+
81316+struct sprole_pw {
81317+ unsigned char *rolename;
81318+ unsigned char salt[GR_SALT_LEN];
81319+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81320+};
81321+
81322+struct name_entry {
81323+ __u32 key;
81324+ u64 inode;
81325+ dev_t device;
81326+ char *name;
81327+ __u16 len;
81328+ __u8 deleted;
81329+ struct name_entry *prev;
81330+ struct name_entry *next;
81331+};
81332+
81333+struct inodev_entry {
81334+ struct name_entry *nentry;
81335+ struct inodev_entry *prev;
81336+ struct inodev_entry *next;
81337+};
81338+
81339+struct acl_role_db {
81340+ struct acl_role_label **r_hash;
81341+ __u32 r_size;
81342+};
81343+
81344+struct inodev_db {
81345+ struct inodev_entry **i_hash;
81346+ __u32 i_size;
81347+};
81348+
81349+struct name_db {
81350+ struct name_entry **n_hash;
81351+ __u32 n_size;
81352+};
81353+
81354+struct crash_uid {
81355+ uid_t uid;
81356+ unsigned long expires;
81357+};
81358+
81359+struct gr_hash_struct {
81360+ void **table;
81361+ void **nametable;
81362+ void *first;
81363+ __u32 table_size;
81364+ __u32 used_size;
81365+ int type;
81366+};
81367+
81368+/* Userspace Grsecurity ACL data structures */
81369+
81370+struct acl_subject_label {
81371+ char *filename;
81372+ u64 inode;
81373+ dev_t device;
81374+ __u32 mode;
81375+ kernel_cap_t cap_mask;
81376+ kernel_cap_t cap_lower;
81377+ kernel_cap_t cap_invert_audit;
81378+
81379+ struct rlimit res[GR_NLIMITS];
81380+ __u32 resmask;
81381+
81382+ __u8 user_trans_type;
81383+ __u8 group_trans_type;
81384+ uid_t *user_transitions;
81385+ gid_t *group_transitions;
81386+ __u16 user_trans_num;
81387+ __u16 group_trans_num;
81388+
81389+ __u32 sock_families[2];
81390+ __u32 ip_proto[8];
81391+ __u32 ip_type;
81392+ struct acl_ip_label **ips;
81393+ __u32 ip_num;
81394+ __u32 inaddr_any_override;
81395+
81396+ __u32 crashes;
81397+ unsigned long expires;
81398+
81399+ struct acl_subject_label *parent_subject;
81400+ struct gr_hash_struct *hash;
81401+ struct acl_subject_label *prev;
81402+ struct acl_subject_label *next;
81403+
81404+ struct acl_object_label **obj_hash;
81405+ __u32 obj_hash_size;
81406+ __u16 pax_flags;
81407+};
81408+
81409+struct role_allowed_ip {
81410+ __u32 addr;
81411+ __u32 netmask;
81412+
81413+ struct role_allowed_ip *prev;
81414+ struct role_allowed_ip *next;
81415+};
81416+
81417+struct role_transition {
81418+ char *rolename;
81419+
81420+ struct role_transition *prev;
81421+ struct role_transition *next;
81422+};
81423+
81424+struct acl_role_label {
81425+ char *rolename;
81426+ uid_t uidgid;
81427+ __u16 roletype;
81428+
81429+ __u16 auth_attempts;
81430+ unsigned long expires;
81431+
81432+ struct acl_subject_label *root_label;
81433+ struct gr_hash_struct *hash;
81434+
81435+ struct acl_role_label *prev;
81436+ struct acl_role_label *next;
81437+
81438+ struct role_transition *transitions;
81439+ struct role_allowed_ip *allowed_ips;
81440+ uid_t *domain_children;
81441+ __u16 domain_child_num;
81442+
81443+ umode_t umask;
81444+
81445+ struct acl_subject_label **subj_hash;
81446+ __u32 subj_hash_size;
81447+};
81448+
81449+struct user_acl_role_db {
81450+ struct acl_role_label **r_table;
81451+ __u32 num_pointers; /* Number of allocations to track */
81452+ __u32 num_roles; /* Number of roles */
81453+ __u32 num_domain_children; /* Number of domain children */
81454+ __u32 num_subjects; /* Number of subjects */
81455+ __u32 num_objects; /* Number of objects */
81456+};
81457+
81458+struct acl_object_label {
81459+ char *filename;
81460+ u64 inode;
81461+ dev_t device;
81462+ __u32 mode;
81463+
81464+ struct acl_subject_label *nested;
81465+ struct acl_object_label *globbed;
81466+
81467+ /* next two structures not used */
81468+
81469+ struct acl_object_label *prev;
81470+ struct acl_object_label *next;
81471+};
81472+
81473+struct acl_ip_label {
81474+ char *iface;
81475+ __u32 addr;
81476+ __u32 netmask;
81477+ __u16 low, high;
81478+ __u8 mode;
81479+ __u32 type;
81480+ __u32 proto[8];
81481+
81482+ /* next two structures not used */
81483+
81484+ struct acl_ip_label *prev;
81485+ struct acl_ip_label *next;
81486+};
81487+
81488+struct gr_arg {
81489+ struct user_acl_role_db role_db;
81490+ unsigned char pw[GR_PW_LEN];
81491+ unsigned char salt[GR_SALT_LEN];
81492+ unsigned char sum[GR_SHA_LEN];
81493+ unsigned char sp_role[GR_SPROLE_LEN];
81494+ struct sprole_pw *sprole_pws;
81495+ dev_t segv_device;
81496+ u64 segv_inode;
81497+ uid_t segv_uid;
81498+ __u16 num_sprole_pws;
81499+ __u16 mode;
81500+};
81501+
81502+struct gr_arg_wrapper {
81503+ struct gr_arg *arg;
81504+ __u32 version;
81505+ __u32 size;
81506+};
81507+
81508+struct subject_map {
81509+ struct acl_subject_label *user;
81510+ struct acl_subject_label *kernel;
81511+ struct subject_map *prev;
81512+ struct subject_map *next;
81513+};
81514+
81515+struct acl_subj_map_db {
81516+ struct subject_map **s_hash;
81517+ __u32 s_size;
81518+};
81519+
81520+struct gr_policy_state {
81521+ struct sprole_pw **acl_special_roles;
81522+ __u16 num_sprole_pws;
81523+ struct acl_role_label *kernel_role;
81524+ struct acl_role_label *role_list;
81525+ struct acl_role_label *default_role;
81526+ struct acl_role_db acl_role_set;
81527+ struct acl_subj_map_db subj_map_set;
81528+ struct name_db name_set;
81529+ struct inodev_db inodev_set;
81530+};
81531+
81532+struct gr_alloc_state {
81533+ unsigned long alloc_stack_next;
81534+ unsigned long alloc_stack_size;
81535+ void **alloc_stack;
81536+};
81537+
81538+struct gr_reload_state {
81539+ struct gr_policy_state oldpolicy;
81540+ struct gr_alloc_state oldalloc;
81541+ struct gr_policy_state newpolicy;
81542+ struct gr_alloc_state newalloc;
81543+ struct gr_policy_state *oldpolicy_ptr;
81544+ struct gr_alloc_state *oldalloc_ptr;
81545+ unsigned char oldmode;
81546+};
81547+
81548+/* End Data Structures Section */
81549+
81550+/* Hash functions generated by empirical testing by Brad Spengler
81551+ Makes good use of the low bits of the inode. Generally 0-1 times
81552+ in loop for successful match. 0-3 for unsuccessful match.
81553+ Shift/add algorithm with modulus of table size and an XOR*/
81554+
81555+static __inline__ unsigned int
81556+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81557+{
81558+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81559+}
81560+
81561+ static __inline__ unsigned int
81562+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81563+{
81564+ return ((const unsigned long)userp % sz);
81565+}
81566+
81567+static __inline__ unsigned int
81568+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
81569+{
81570+ unsigned int rem;
81571+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
81572+ return rem;
81573+}
81574+
81575+static __inline__ unsigned int
81576+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81577+{
81578+ return full_name_hash((const unsigned char *)name, len) % sz;
81579+}
81580+
81581+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81582+ subj = NULL; \
81583+ iter = 0; \
81584+ while (iter < role->subj_hash_size) { \
81585+ if (subj == NULL) \
81586+ subj = role->subj_hash[iter]; \
81587+ if (subj == NULL) { \
81588+ iter++; \
81589+ continue; \
81590+ }
81591+
81592+#define FOR_EACH_SUBJECT_END(subj,iter) \
81593+ subj = subj->next; \
81594+ if (subj == NULL) \
81595+ iter++; \
81596+ }
81597+
81598+
81599+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81600+ subj = role->hash->first; \
81601+ while (subj != NULL) {
81602+
81603+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81604+ subj = subj->next; \
81605+ }
81606+
81607+#endif
81608+
81609diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81610new file mode 100644
81611index 0000000..af64092
81612--- /dev/null
81613+++ b/include/linux/gracl_compat.h
81614@@ -0,0 +1,156 @@
81615+#ifndef GR_ACL_COMPAT_H
81616+#define GR_ACL_COMPAT_H
81617+
81618+#include <linux/resource.h>
81619+#include <asm/resource.h>
81620+
81621+struct sprole_pw_compat {
81622+ compat_uptr_t rolename;
81623+ unsigned char salt[GR_SALT_LEN];
81624+ unsigned char sum[GR_SHA_LEN];
81625+};
81626+
81627+struct gr_hash_struct_compat {
81628+ compat_uptr_t table;
81629+ compat_uptr_t nametable;
81630+ compat_uptr_t first;
81631+ __u32 table_size;
81632+ __u32 used_size;
81633+ int type;
81634+};
81635+
81636+struct acl_subject_label_compat {
81637+ compat_uptr_t filename;
81638+ compat_u64 inode;
81639+ __u32 device;
81640+ __u32 mode;
81641+ kernel_cap_t cap_mask;
81642+ kernel_cap_t cap_lower;
81643+ kernel_cap_t cap_invert_audit;
81644+
81645+ struct compat_rlimit res[GR_NLIMITS];
81646+ __u32 resmask;
81647+
81648+ __u8 user_trans_type;
81649+ __u8 group_trans_type;
81650+ compat_uptr_t user_transitions;
81651+ compat_uptr_t group_transitions;
81652+ __u16 user_trans_num;
81653+ __u16 group_trans_num;
81654+
81655+ __u32 sock_families[2];
81656+ __u32 ip_proto[8];
81657+ __u32 ip_type;
81658+ compat_uptr_t ips;
81659+ __u32 ip_num;
81660+ __u32 inaddr_any_override;
81661+
81662+ __u32 crashes;
81663+ compat_ulong_t expires;
81664+
81665+ compat_uptr_t parent_subject;
81666+ compat_uptr_t hash;
81667+ compat_uptr_t prev;
81668+ compat_uptr_t next;
81669+
81670+ compat_uptr_t obj_hash;
81671+ __u32 obj_hash_size;
81672+ __u16 pax_flags;
81673+};
81674+
81675+struct role_allowed_ip_compat {
81676+ __u32 addr;
81677+ __u32 netmask;
81678+
81679+ compat_uptr_t prev;
81680+ compat_uptr_t next;
81681+};
81682+
81683+struct role_transition_compat {
81684+ compat_uptr_t rolename;
81685+
81686+ compat_uptr_t prev;
81687+ compat_uptr_t next;
81688+};
81689+
81690+struct acl_role_label_compat {
81691+ compat_uptr_t rolename;
81692+ uid_t uidgid;
81693+ __u16 roletype;
81694+
81695+ __u16 auth_attempts;
81696+ compat_ulong_t expires;
81697+
81698+ compat_uptr_t root_label;
81699+ compat_uptr_t hash;
81700+
81701+ compat_uptr_t prev;
81702+ compat_uptr_t next;
81703+
81704+ compat_uptr_t transitions;
81705+ compat_uptr_t allowed_ips;
81706+ compat_uptr_t domain_children;
81707+ __u16 domain_child_num;
81708+
81709+ umode_t umask;
81710+
81711+ compat_uptr_t subj_hash;
81712+ __u32 subj_hash_size;
81713+};
81714+
81715+struct user_acl_role_db_compat {
81716+ compat_uptr_t r_table;
81717+ __u32 num_pointers;
81718+ __u32 num_roles;
81719+ __u32 num_domain_children;
81720+ __u32 num_subjects;
81721+ __u32 num_objects;
81722+};
81723+
81724+struct acl_object_label_compat {
81725+ compat_uptr_t filename;
81726+ compat_u64 inode;
81727+ __u32 device;
81728+ __u32 mode;
81729+
81730+ compat_uptr_t nested;
81731+ compat_uptr_t globbed;
81732+
81733+ compat_uptr_t prev;
81734+ compat_uptr_t next;
81735+};
81736+
81737+struct acl_ip_label_compat {
81738+ compat_uptr_t iface;
81739+ __u32 addr;
81740+ __u32 netmask;
81741+ __u16 low, high;
81742+ __u8 mode;
81743+ __u32 type;
81744+ __u32 proto[8];
81745+
81746+ compat_uptr_t prev;
81747+ compat_uptr_t next;
81748+};
81749+
81750+struct gr_arg_compat {
81751+ struct user_acl_role_db_compat role_db;
81752+ unsigned char pw[GR_PW_LEN];
81753+ unsigned char salt[GR_SALT_LEN];
81754+ unsigned char sum[GR_SHA_LEN];
81755+ unsigned char sp_role[GR_SPROLE_LEN];
81756+ compat_uptr_t sprole_pws;
81757+ __u32 segv_device;
81758+ compat_u64 segv_inode;
81759+ uid_t segv_uid;
81760+ __u16 num_sprole_pws;
81761+ __u16 mode;
81762+};
81763+
81764+struct gr_arg_wrapper_compat {
81765+ compat_uptr_t arg;
81766+ __u32 version;
81767+ __u32 size;
81768+};
81769+
81770+#endif
81771diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81772new file mode 100644
81773index 0000000..323ecf2
81774--- /dev/null
81775+++ b/include/linux/gralloc.h
81776@@ -0,0 +1,9 @@
81777+#ifndef __GRALLOC_H
81778+#define __GRALLOC_H
81779+
81780+void acl_free_all(void);
81781+int acl_alloc_stack_init(unsigned long size);
81782+void *acl_alloc(unsigned long len);
81783+void *acl_alloc_num(unsigned long num, unsigned long len);
81784+
81785+#endif
81786diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81787new file mode 100644
81788index 0000000..be66033
81789--- /dev/null
81790+++ b/include/linux/grdefs.h
81791@@ -0,0 +1,140 @@
81792+#ifndef GRDEFS_H
81793+#define GRDEFS_H
81794+
81795+/* Begin grsecurity status declarations */
81796+
81797+enum {
81798+ GR_READY = 0x01,
81799+ GR_STATUS_INIT = 0x00 // disabled state
81800+};
81801+
81802+/* Begin ACL declarations */
81803+
81804+/* Role flags */
81805+
81806+enum {
81807+ GR_ROLE_USER = 0x0001,
81808+ GR_ROLE_GROUP = 0x0002,
81809+ GR_ROLE_DEFAULT = 0x0004,
81810+ GR_ROLE_SPECIAL = 0x0008,
81811+ GR_ROLE_AUTH = 0x0010,
81812+ GR_ROLE_NOPW = 0x0020,
81813+ GR_ROLE_GOD = 0x0040,
81814+ GR_ROLE_LEARN = 0x0080,
81815+ GR_ROLE_TPE = 0x0100,
81816+ GR_ROLE_DOMAIN = 0x0200,
81817+ GR_ROLE_PAM = 0x0400,
81818+ GR_ROLE_PERSIST = 0x0800
81819+};
81820+
81821+/* ACL Subject and Object mode flags */
81822+enum {
81823+ GR_DELETED = 0x80000000
81824+};
81825+
81826+/* ACL Object-only mode flags */
81827+enum {
81828+ GR_READ = 0x00000001,
81829+ GR_APPEND = 0x00000002,
81830+ GR_WRITE = 0x00000004,
81831+ GR_EXEC = 0x00000008,
81832+ GR_FIND = 0x00000010,
81833+ GR_INHERIT = 0x00000020,
81834+ GR_SETID = 0x00000040,
81835+ GR_CREATE = 0x00000080,
81836+ GR_DELETE = 0x00000100,
81837+ GR_LINK = 0x00000200,
81838+ GR_AUDIT_READ = 0x00000400,
81839+ GR_AUDIT_APPEND = 0x00000800,
81840+ GR_AUDIT_WRITE = 0x00001000,
81841+ GR_AUDIT_EXEC = 0x00002000,
81842+ GR_AUDIT_FIND = 0x00004000,
81843+ GR_AUDIT_INHERIT= 0x00008000,
81844+ GR_AUDIT_SETID = 0x00010000,
81845+ GR_AUDIT_CREATE = 0x00020000,
81846+ GR_AUDIT_DELETE = 0x00040000,
81847+ GR_AUDIT_LINK = 0x00080000,
81848+ GR_PTRACERD = 0x00100000,
81849+ GR_NOPTRACE = 0x00200000,
81850+ GR_SUPPRESS = 0x00400000,
81851+ GR_NOLEARN = 0x00800000,
81852+ GR_INIT_TRANSFER= 0x01000000
81853+};
81854+
81855+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81856+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81857+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81858+
81859+/* ACL subject-only mode flags */
81860+enum {
81861+ GR_KILL = 0x00000001,
81862+ GR_VIEW = 0x00000002,
81863+ GR_PROTECTED = 0x00000004,
81864+ GR_LEARN = 0x00000008,
81865+ GR_OVERRIDE = 0x00000010,
81866+ /* just a placeholder, this mode is only used in userspace */
81867+ GR_DUMMY = 0x00000020,
81868+ GR_PROTSHM = 0x00000040,
81869+ GR_KILLPROC = 0x00000080,
81870+ GR_KILLIPPROC = 0x00000100,
81871+ /* just a placeholder, this mode is only used in userspace */
81872+ GR_NOTROJAN = 0x00000200,
81873+ GR_PROTPROCFD = 0x00000400,
81874+ GR_PROCACCT = 0x00000800,
81875+ GR_RELAXPTRACE = 0x00001000,
81876+ //GR_NESTED = 0x00002000,
81877+ GR_INHERITLEARN = 0x00004000,
81878+ GR_PROCFIND = 0x00008000,
81879+ GR_POVERRIDE = 0x00010000,
81880+ GR_KERNELAUTH = 0x00020000,
81881+ GR_ATSECURE = 0x00040000,
81882+ GR_SHMEXEC = 0x00080000
81883+};
81884+
81885+enum {
81886+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
81887+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
81888+ GR_PAX_ENABLE_MPROTECT = 0x0004,
81889+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
81890+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
81891+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
81892+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
81893+ GR_PAX_DISABLE_MPROTECT = 0x0400,
81894+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
81895+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
81896+};
81897+
81898+enum {
81899+ GR_ID_USER = 0x01,
81900+ GR_ID_GROUP = 0x02,
81901+};
81902+
81903+enum {
81904+ GR_ID_ALLOW = 0x01,
81905+ GR_ID_DENY = 0x02,
81906+};
81907+
81908+#define GR_CRASH_RES 31
81909+#define GR_UIDTABLE_MAX 500
81910+
81911+/* begin resource learning section */
81912+enum {
81913+ GR_RLIM_CPU_BUMP = 60,
81914+ GR_RLIM_FSIZE_BUMP = 50000,
81915+ GR_RLIM_DATA_BUMP = 10000,
81916+ GR_RLIM_STACK_BUMP = 1000,
81917+ GR_RLIM_CORE_BUMP = 10000,
81918+ GR_RLIM_RSS_BUMP = 500000,
81919+ GR_RLIM_NPROC_BUMP = 1,
81920+ GR_RLIM_NOFILE_BUMP = 5,
81921+ GR_RLIM_MEMLOCK_BUMP = 50000,
81922+ GR_RLIM_AS_BUMP = 500000,
81923+ GR_RLIM_LOCKS_BUMP = 2,
81924+ GR_RLIM_SIGPENDING_BUMP = 5,
81925+ GR_RLIM_MSGQUEUE_BUMP = 10000,
81926+ GR_RLIM_NICE_BUMP = 1,
81927+ GR_RLIM_RTPRIO_BUMP = 1,
81928+ GR_RLIM_RTTIME_BUMP = 1000000
81929+};
81930+
81931+#endif
81932diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
81933new file mode 100644
81934index 0000000..fb1de5d
81935--- /dev/null
81936+++ b/include/linux/grinternal.h
81937@@ -0,0 +1,230 @@
81938+#ifndef __GRINTERNAL_H
81939+#define __GRINTERNAL_H
81940+
81941+#ifdef CONFIG_GRKERNSEC
81942+
81943+#include <linux/fs.h>
81944+#include <linux/mnt_namespace.h>
81945+#include <linux/nsproxy.h>
81946+#include <linux/gracl.h>
81947+#include <linux/grdefs.h>
81948+#include <linux/grmsg.h>
81949+
81950+void gr_add_learn_entry(const char *fmt, ...)
81951+ __attribute__ ((format (printf, 1, 2)));
81952+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
81953+ const struct vfsmount *mnt);
81954+__u32 gr_check_create(const struct dentry *new_dentry,
81955+ const struct dentry *parent,
81956+ const struct vfsmount *mnt, const __u32 mode);
81957+int gr_check_protected_task(const struct task_struct *task);
81958+__u32 to_gr_audit(const __u32 reqmode);
81959+int gr_set_acls(const int type);
81960+int gr_acl_is_enabled(void);
81961+char gr_roletype_to_char(void);
81962+
81963+void gr_handle_alertkill(struct task_struct *task);
81964+char *gr_to_filename(const struct dentry *dentry,
81965+ const struct vfsmount *mnt);
81966+char *gr_to_filename1(const struct dentry *dentry,
81967+ const struct vfsmount *mnt);
81968+char *gr_to_filename2(const struct dentry *dentry,
81969+ const struct vfsmount *mnt);
81970+char *gr_to_filename3(const struct dentry *dentry,
81971+ const struct vfsmount *mnt);
81972+
81973+extern int grsec_enable_ptrace_readexec;
81974+extern int grsec_enable_harden_ptrace;
81975+extern int grsec_enable_link;
81976+extern int grsec_enable_fifo;
81977+extern int grsec_enable_execve;
81978+extern int grsec_enable_shm;
81979+extern int grsec_enable_execlog;
81980+extern int grsec_enable_signal;
81981+extern int grsec_enable_audit_ptrace;
81982+extern int grsec_enable_forkfail;
81983+extern int grsec_enable_time;
81984+extern int grsec_enable_rofs;
81985+extern int grsec_deny_new_usb;
81986+extern int grsec_enable_chroot_shmat;
81987+extern int grsec_enable_chroot_mount;
81988+extern int grsec_enable_chroot_double;
81989+extern int grsec_enable_chroot_pivot;
81990+extern int grsec_enable_chroot_chdir;
81991+extern int grsec_enable_chroot_chmod;
81992+extern int grsec_enable_chroot_mknod;
81993+extern int grsec_enable_chroot_fchdir;
81994+extern int grsec_enable_chroot_nice;
81995+extern int grsec_enable_chroot_execlog;
81996+extern int grsec_enable_chroot_caps;
81997+extern int grsec_enable_chroot_rename;
81998+extern int grsec_enable_chroot_sysctl;
81999+extern int grsec_enable_chroot_unix;
82000+extern int grsec_enable_symlinkown;
82001+extern kgid_t grsec_symlinkown_gid;
82002+extern int grsec_enable_tpe;
82003+extern kgid_t grsec_tpe_gid;
82004+extern int grsec_enable_tpe_all;
82005+extern int grsec_enable_tpe_invert;
82006+extern int grsec_enable_socket_all;
82007+extern kgid_t grsec_socket_all_gid;
82008+extern int grsec_enable_socket_client;
82009+extern kgid_t grsec_socket_client_gid;
82010+extern int grsec_enable_socket_server;
82011+extern kgid_t grsec_socket_server_gid;
82012+extern kgid_t grsec_audit_gid;
82013+extern int grsec_enable_group;
82014+extern int grsec_enable_log_rwxmaps;
82015+extern int grsec_enable_mount;
82016+extern int grsec_enable_chdir;
82017+extern int grsec_resource_logging;
82018+extern int grsec_enable_blackhole;
82019+extern int grsec_lastack_retries;
82020+extern int grsec_enable_brute;
82021+extern int grsec_enable_harden_ipc;
82022+extern int grsec_lock;
82023+
82024+extern spinlock_t grsec_alert_lock;
82025+extern unsigned long grsec_alert_wtime;
82026+extern unsigned long grsec_alert_fyet;
82027+
82028+extern spinlock_t grsec_audit_lock;
82029+
82030+extern rwlock_t grsec_exec_file_lock;
82031+
82032+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82033+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82034+ (tsk)->exec_file->f_path.mnt) : "/")
82035+
82036+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82037+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82038+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82039+
82040+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82041+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82042+ (tsk)->exec_file->f_path.mnt) : "/")
82043+
82044+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82045+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82046+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82047+
82048+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82049+
82050+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82051+
82052+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82053+{
82054+ if (file1 && file2) {
82055+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82056+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82057+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82058+ return true;
82059+ }
82060+
82061+ return false;
82062+}
82063+
82064+#define GR_CHROOT_CAPS {{ \
82065+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82066+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82067+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82068+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82069+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82070+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82071+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82072+
82073+#define security_learn(normal_msg,args...) \
82074+({ \
82075+ read_lock(&grsec_exec_file_lock); \
82076+ gr_add_learn_entry(normal_msg "\n", ## args); \
82077+ read_unlock(&grsec_exec_file_lock); \
82078+})
82079+
82080+enum {
82081+ GR_DO_AUDIT,
82082+ GR_DONT_AUDIT,
82083+ /* used for non-audit messages that we shouldn't kill the task on */
82084+ GR_DONT_AUDIT_GOOD
82085+};
82086+
82087+enum {
82088+ GR_TTYSNIFF,
82089+ GR_RBAC,
82090+ GR_RBAC_STR,
82091+ GR_STR_RBAC,
82092+ GR_RBAC_MODE2,
82093+ GR_RBAC_MODE3,
82094+ GR_FILENAME,
82095+ GR_SYSCTL_HIDDEN,
82096+ GR_NOARGS,
82097+ GR_ONE_INT,
82098+ GR_ONE_INT_TWO_STR,
82099+ GR_ONE_STR,
82100+ GR_STR_INT,
82101+ GR_TWO_STR_INT,
82102+ GR_TWO_INT,
82103+ GR_TWO_U64,
82104+ GR_THREE_INT,
82105+ GR_FIVE_INT_TWO_STR,
82106+ GR_TWO_STR,
82107+ GR_THREE_STR,
82108+ GR_FOUR_STR,
82109+ GR_STR_FILENAME,
82110+ GR_FILENAME_STR,
82111+ GR_FILENAME_TWO_INT,
82112+ GR_FILENAME_TWO_INT_STR,
82113+ GR_TEXTREL,
82114+ GR_PTRACE,
82115+ GR_RESOURCE,
82116+ GR_CAP,
82117+ GR_SIG,
82118+ GR_SIG2,
82119+ GR_CRASH1,
82120+ GR_CRASH2,
82121+ GR_PSACCT,
82122+ GR_RWXMAP,
82123+ GR_RWXMAPVMA
82124+};
82125+
82126+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82127+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82128+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82129+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82130+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82131+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82132+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82133+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82134+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82135+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82136+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82137+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82138+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82139+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82140+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82141+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82142+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82143+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82144+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82145+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82146+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82147+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82148+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82149+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82150+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82151+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82152+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82153+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82154+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82155+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82156+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82157+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82158+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82159+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82160+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82161+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82162+
82163+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82164+
82165+#endif
82166+
82167+#endif
82168diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82169new file mode 100644
82170index 0000000..26ef560
82171--- /dev/null
82172+++ b/include/linux/grmsg.h
82173@@ -0,0 +1,118 @@
82174+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82175+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82176+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82177+#define GR_STOPMOD_MSG "denied modification of module state by "
82178+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82179+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82180+#define GR_IOPERM_MSG "denied use of ioperm() by "
82181+#define GR_IOPL_MSG "denied use of iopl() by "
82182+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82183+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82184+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82185+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82186+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82187+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82188+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82189+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82190+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82191+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82192+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82193+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82194+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82195+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82196+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82197+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82198+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82199+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82200+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82201+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82202+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82203+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82204+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82205+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82206+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82207+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82208+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82209+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82210+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82211+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82212+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82213+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82214+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82215+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82216+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82217+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82218+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82219+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82220+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82221+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82222+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82223+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82224+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82225+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82226+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82227+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82228+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82229+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82230+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82231+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82232+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82233+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82234+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82235+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82236+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82237+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82238+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82239+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82240+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82241+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82242+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82243+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82244+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82245+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82246+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82247+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82248+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82249+#define GR_NICE_CHROOT_MSG "denied priority change by "
82250+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82251+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82252+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82253+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82254+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82255+#define GR_TIME_MSG "time set by "
82256+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82257+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82258+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82259+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82260+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82261+#define GR_BIND_MSG "denied bind() by "
82262+#define GR_CONNECT_MSG "denied connect() by "
82263+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82264+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82265+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82266+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82267+#define GR_CAP_ACL_MSG "use of %s denied for "
82268+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82269+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82270+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82271+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82272+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82273+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82274+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82275+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82276+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82277+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82278+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82279+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82280+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82281+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82282+#define GR_VM86_MSG "denied use of vm86 by "
82283+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82284+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82285+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82286+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82287+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82288+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82289+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82290+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82291+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82292diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82293new file mode 100644
82294index 0000000..63c1850
82295--- /dev/null
82296+++ b/include/linux/grsecurity.h
82297@@ -0,0 +1,250 @@
82298+#ifndef GR_SECURITY_H
82299+#define GR_SECURITY_H
82300+#include <linux/fs.h>
82301+#include <linux/fs_struct.h>
82302+#include <linux/binfmts.h>
82303+#include <linux/gracl.h>
82304+
82305+/* notify of brain-dead configs */
82306+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82307+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82308+#endif
82309+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82310+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82311+#endif
82312+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82313+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82314+#endif
82315+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82316+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82317+#endif
82318+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82319+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82320+#endif
82321+
82322+int gr_handle_new_usb(void);
82323+
82324+void gr_handle_brute_attach(int dumpable);
82325+void gr_handle_brute_check(void);
82326+void gr_handle_kernel_exploit(void);
82327+
82328+char gr_roletype_to_char(void);
82329+
82330+int gr_proc_is_restricted(void);
82331+
82332+int gr_acl_enable_at_secure(void);
82333+
82334+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82335+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82336+
82337+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82338+
82339+void gr_del_task_from_ip_table(struct task_struct *p);
82340+
82341+int gr_pid_is_chrooted(struct task_struct *p);
82342+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82343+int gr_handle_chroot_nice(void);
82344+int gr_handle_chroot_sysctl(const int op);
82345+int gr_handle_chroot_setpriority(struct task_struct *p,
82346+ const int niceval);
82347+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82348+int gr_chroot_fhandle(void);
82349+int gr_handle_chroot_chroot(const struct dentry *dentry,
82350+ const struct vfsmount *mnt);
82351+void gr_handle_chroot_chdir(const struct path *path);
82352+int gr_handle_chroot_chmod(const struct dentry *dentry,
82353+ const struct vfsmount *mnt, const int mode);
82354+int gr_handle_chroot_mknod(const struct dentry *dentry,
82355+ const struct vfsmount *mnt, const int mode);
82356+int gr_handle_chroot_mount(const struct dentry *dentry,
82357+ const struct vfsmount *mnt,
82358+ const char *dev_name);
82359+int gr_handle_chroot_pivot(void);
82360+int gr_handle_chroot_unix(const pid_t pid);
82361+
82362+int gr_handle_rawio(const struct inode *inode);
82363+
82364+void gr_handle_ioperm(void);
82365+void gr_handle_iopl(void);
82366+void gr_handle_msr_write(void);
82367+
82368+umode_t gr_acl_umask(void);
82369+
82370+int gr_tpe_allow(const struct file *file);
82371+
82372+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82373+void gr_clear_chroot_entries(struct task_struct *task);
82374+
82375+void gr_log_forkfail(const int retval);
82376+void gr_log_timechange(void);
82377+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82378+void gr_log_chdir(const struct dentry *dentry,
82379+ const struct vfsmount *mnt);
82380+void gr_log_chroot_exec(const struct dentry *dentry,
82381+ const struct vfsmount *mnt);
82382+void gr_log_remount(const char *devname, const int retval);
82383+void gr_log_unmount(const char *devname, const int retval);
82384+void gr_log_mount(const char *from, struct path *to, const int retval);
82385+void gr_log_textrel(struct vm_area_struct *vma);
82386+void gr_log_ptgnustack(struct file *file);
82387+void gr_log_rwxmmap(struct file *file);
82388+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82389+
82390+int gr_handle_follow_link(const struct inode *parent,
82391+ const struct inode *inode,
82392+ const struct dentry *dentry,
82393+ const struct vfsmount *mnt);
82394+int gr_handle_fifo(const struct dentry *dentry,
82395+ const struct vfsmount *mnt,
82396+ const struct dentry *dir, const int flag,
82397+ const int acc_mode);
82398+int gr_handle_hardlink(const struct dentry *dentry,
82399+ const struct vfsmount *mnt,
82400+ struct inode *inode,
82401+ const int mode, const struct filename *to);
82402+
82403+int gr_is_capable(const int cap);
82404+int gr_is_capable_nolog(const int cap);
82405+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82406+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82407+
82408+void gr_copy_label(struct task_struct *tsk);
82409+void gr_handle_crash(struct task_struct *task, const int sig);
82410+int gr_handle_signal(const struct task_struct *p, const int sig);
82411+int gr_check_crash_uid(const kuid_t uid);
82412+int gr_check_protected_task(const struct task_struct *task);
82413+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82414+int gr_acl_handle_mmap(const struct file *file,
82415+ const unsigned long prot);
82416+int gr_acl_handle_mprotect(const struct file *file,
82417+ const unsigned long prot);
82418+int gr_check_hidden_task(const struct task_struct *tsk);
82419+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82420+ const struct vfsmount *mnt);
82421+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82422+ const struct vfsmount *mnt);
82423+__u32 gr_acl_handle_access(const struct dentry *dentry,
82424+ const struct vfsmount *mnt, const int fmode);
82425+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82426+ const struct vfsmount *mnt, umode_t *mode);
82427+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82428+ const struct vfsmount *mnt);
82429+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82430+ const struct vfsmount *mnt);
82431+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82432+ const struct vfsmount *mnt);
82433+int gr_handle_ptrace(struct task_struct *task, const long request);
82434+int gr_handle_proc_ptrace(struct task_struct *task);
82435+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82436+ const struct vfsmount *mnt);
82437+int gr_check_crash_exec(const struct file *filp);
82438+int gr_acl_is_enabled(void);
82439+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82440+ const kgid_t gid);
82441+int gr_set_proc_label(const struct dentry *dentry,
82442+ const struct vfsmount *mnt,
82443+ const int unsafe_flags);
82444+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82445+ const struct vfsmount *mnt);
82446+__u32 gr_acl_handle_open(const struct dentry *dentry,
82447+ const struct vfsmount *mnt, int acc_mode);
82448+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82449+ const struct dentry *p_dentry,
82450+ const struct vfsmount *p_mnt,
82451+ int open_flags, int acc_mode, const int imode);
82452+void gr_handle_create(const struct dentry *dentry,
82453+ const struct vfsmount *mnt);
82454+void gr_handle_proc_create(const struct dentry *dentry,
82455+ const struct inode *inode);
82456+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82457+ const struct dentry *parent_dentry,
82458+ const struct vfsmount *parent_mnt,
82459+ const int mode);
82460+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82461+ const struct dentry *parent_dentry,
82462+ const struct vfsmount *parent_mnt);
82463+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82464+ const struct vfsmount *mnt);
82465+void gr_handle_delete(const u64 ino, const dev_t dev);
82466+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82467+ const struct vfsmount *mnt);
82468+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82469+ const struct dentry *parent_dentry,
82470+ const struct vfsmount *parent_mnt,
82471+ const struct filename *from);
82472+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82473+ const struct dentry *parent_dentry,
82474+ const struct vfsmount *parent_mnt,
82475+ const struct dentry *old_dentry,
82476+ const struct vfsmount *old_mnt, const struct filename *to);
82477+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82478+int gr_acl_handle_rename(struct dentry *new_dentry,
82479+ struct dentry *parent_dentry,
82480+ const struct vfsmount *parent_mnt,
82481+ struct dentry *old_dentry,
82482+ struct inode *old_parent_inode,
82483+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82484+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82485+ struct dentry *old_dentry,
82486+ struct dentry *new_dentry,
82487+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82488+__u32 gr_check_link(const struct dentry *new_dentry,
82489+ const struct dentry *parent_dentry,
82490+ const struct vfsmount *parent_mnt,
82491+ const struct dentry *old_dentry,
82492+ const struct vfsmount *old_mnt);
82493+int gr_acl_handle_filldir(const struct file *file, const char *name,
82494+ const unsigned int namelen, const u64 ino);
82495+
82496+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82497+ const struct vfsmount *mnt);
82498+void gr_acl_handle_exit(void);
82499+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82500+int gr_acl_handle_procpidmem(const struct task_struct *task);
82501+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82502+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82503+void gr_audit_ptrace(struct task_struct *task);
82504+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82505+u64 gr_get_ino_from_dentry(struct dentry *dentry);
82506+void gr_put_exec_file(struct task_struct *task);
82507+
82508+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82509+
82510+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82511+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82512+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
82513+ struct dentry *newdentry, struct vfsmount *newmnt);
82514+
82515+#ifdef CONFIG_GRKERNSEC_RESLOG
82516+extern void gr_log_resource(const struct task_struct *task, const int res,
82517+ const unsigned long wanted, const int gt);
82518+#else
82519+static inline void gr_log_resource(const struct task_struct *task, const int res,
82520+ const unsigned long wanted, const int gt)
82521+{
82522+}
82523+#endif
82524+
82525+#ifdef CONFIG_GRKERNSEC
82526+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82527+void gr_handle_vm86(void);
82528+void gr_handle_mem_readwrite(u64 from, u64 to);
82529+
82530+void gr_log_badprocpid(const char *entry);
82531+
82532+extern int grsec_enable_dmesg;
82533+extern int grsec_disable_privio;
82534+
82535+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82536+extern kgid_t grsec_proc_gid;
82537+#endif
82538+
82539+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82540+extern int grsec_enable_chroot_findtask;
82541+#endif
82542+#ifdef CONFIG_GRKERNSEC_SETXID
82543+extern int grsec_enable_setxid;
82544+#endif
82545+#endif
82546+
82547+#endif
82548diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82549new file mode 100644
82550index 0000000..e7ffaaf
82551--- /dev/null
82552+++ b/include/linux/grsock.h
82553@@ -0,0 +1,19 @@
82554+#ifndef __GRSOCK_H
82555+#define __GRSOCK_H
82556+
82557+extern void gr_attach_curr_ip(const struct sock *sk);
82558+extern int gr_handle_sock_all(const int family, const int type,
82559+ const int protocol);
82560+extern int gr_handle_sock_server(const struct sockaddr *sck);
82561+extern int gr_handle_sock_server_other(const struct sock *sck);
82562+extern int gr_handle_sock_client(const struct sockaddr *sck);
82563+extern int gr_search_connect(struct socket * sock,
82564+ struct sockaddr_in * addr);
82565+extern int gr_search_bind(struct socket * sock,
82566+ struct sockaddr_in * addr);
82567+extern int gr_search_listen(struct socket * sock);
82568+extern int gr_search_accept(struct socket * sock);
82569+extern int gr_search_socket(const int domain, const int type,
82570+ const int protocol);
82571+
82572+#endif
82573diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82574index 9286a46..373f27f 100644
82575--- a/include/linux/highmem.h
82576+++ b/include/linux/highmem.h
82577@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82578 kunmap_atomic(kaddr);
82579 }
82580
82581+static inline void sanitize_highpage(struct page *page)
82582+{
82583+ void *kaddr;
82584+ unsigned long flags;
82585+
82586+ local_irq_save(flags);
82587+ kaddr = kmap_atomic(page);
82588+ clear_page(kaddr);
82589+ kunmap_atomic(kaddr);
82590+ local_irq_restore(flags);
82591+}
82592+
82593 static inline void zero_user_segments(struct page *page,
82594 unsigned start1, unsigned end1,
82595 unsigned start2, unsigned end2)
82596diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82597index 1c7b89a..7dda400 100644
82598--- a/include/linux/hwmon-sysfs.h
82599+++ b/include/linux/hwmon-sysfs.h
82600@@ -25,7 +25,8 @@
82601 struct sensor_device_attribute{
82602 struct device_attribute dev_attr;
82603 int index;
82604-};
82605+} __do_const;
82606+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82607 #define to_sensor_dev_attr(_dev_attr) \
82608 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82609
82610@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82611 struct device_attribute dev_attr;
82612 u8 index;
82613 u8 nr;
82614-};
82615+} __do_const;
82616+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82617 #define to_sensor_dev_attr_2(_dev_attr) \
82618 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82619
82620diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82621index 7c76959..153e597 100644
82622--- a/include/linux/i2c.h
82623+++ b/include/linux/i2c.h
82624@@ -413,6 +413,7 @@ struct i2c_algorithm {
82625 int (*unreg_slave)(struct i2c_client *client);
82626 #endif
82627 };
82628+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82629
82630 /**
82631 * struct i2c_bus_recovery_info - I2C bus recovery information
82632diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82633index d23c3c2..eb63c81 100644
82634--- a/include/linux/i2o.h
82635+++ b/include/linux/i2o.h
82636@@ -565,7 +565,7 @@ struct i2o_controller {
82637 struct i2o_device *exec; /* Executive */
82638 #if BITS_PER_LONG == 64
82639 spinlock_t context_list_lock; /* lock for context_list */
82640- atomic_t context_list_counter; /* needed for unique contexts */
82641+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82642 struct list_head context_list; /* list of context id's
82643 and pointers */
82644 #endif
82645diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82646index aff7ad8..3942bbd 100644
82647--- a/include/linux/if_pppox.h
82648+++ b/include/linux/if_pppox.h
82649@@ -76,7 +76,7 @@ struct pppox_proto {
82650 int (*ioctl)(struct socket *sock, unsigned int cmd,
82651 unsigned long arg);
82652 struct module *owner;
82653-};
82654+} __do_const;
82655
82656 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82657 extern void unregister_pppox_proto(int proto_num);
82658diff --git a/include/linux/init.h b/include/linux/init.h
82659index 2df8e8d..3e1280d 100644
82660--- a/include/linux/init.h
82661+++ b/include/linux/init.h
82662@@ -37,9 +37,17 @@
82663 * section.
82664 */
82665
82666+#define add_init_latent_entropy __latent_entropy
82667+
82668+#ifdef CONFIG_MEMORY_HOTPLUG
82669+#define add_meminit_latent_entropy
82670+#else
82671+#define add_meminit_latent_entropy __latent_entropy
82672+#endif
82673+
82674 /* These are for everybody (although not all archs will actually
82675 discard it in modules) */
82676-#define __init __section(.init.text) __cold notrace
82677+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82678 #define __initdata __section(.init.data)
82679 #define __initconst __constsection(.init.rodata)
82680 #define __exitdata __section(.exit.data)
82681@@ -100,7 +108,7 @@
82682 #define __cpuexitconst
82683
82684 /* Used for MEMORY_HOTPLUG */
82685-#define __meminit __section(.meminit.text) __cold notrace
82686+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82687 #define __meminitdata __section(.meminit.data)
82688 #define __meminitconst __constsection(.meminit.rodata)
82689 #define __memexit __section(.memexit.text) __exitused __cold notrace
82690diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82691index 3037fc0..c6527ce 100644
82692--- a/include/linux/init_task.h
82693+++ b/include/linux/init_task.h
82694@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
82695
82696 #define INIT_TASK_COMM "swapper"
82697
82698+#ifdef CONFIG_X86
82699+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82700+#else
82701+#define INIT_TASK_THREAD_INFO
82702+#endif
82703+
82704 #ifdef CONFIG_RT_MUTEXES
82705 # define INIT_RT_MUTEXES(tsk) \
82706 .pi_waiters = RB_ROOT, \
82707@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
82708 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82709 .comm = INIT_TASK_COMM, \
82710 .thread = INIT_THREAD, \
82711+ INIT_TASK_THREAD_INFO \
82712 .fs = &init_fs, \
82713 .files = &init_files, \
82714 .signal = &init_signals, \
82715diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82716index d9b05b5..e5f5b7b 100644
82717--- a/include/linux/interrupt.h
82718+++ b/include/linux/interrupt.h
82719@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82720
82721 struct softirq_action
82722 {
82723- void (*action)(struct softirq_action *);
82724-};
82725+ void (*action)(void);
82726+} __no_const;
82727
82728 asmlinkage void do_softirq(void);
82729 asmlinkage void __do_softirq(void);
82730@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
82731 }
82732 #endif
82733
82734-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82735+extern void open_softirq(int nr, void (*action)(void));
82736 extern void softirq_init(void);
82737 extern void __raise_softirq_irqoff(unsigned int nr);
82738
82739diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82740index 38daa45..4de4317 100644
82741--- a/include/linux/iommu.h
82742+++ b/include/linux/iommu.h
82743@@ -147,7 +147,7 @@ struct iommu_ops {
82744
82745 unsigned long pgsize_bitmap;
82746 void *priv;
82747-};
82748+} __do_const;
82749
82750 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82751 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82752diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82753index 2c525022..345b106 100644
82754--- a/include/linux/ioport.h
82755+++ b/include/linux/ioport.h
82756@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82757 int adjust_resource(struct resource *res, resource_size_t start,
82758 resource_size_t size);
82759 resource_size_t resource_alignment(struct resource *res);
82760-static inline resource_size_t resource_size(const struct resource *res)
82761+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82762 {
82763 return res->end - res->start + 1;
82764 }
82765diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82766index 1eee6bc..9cf4912 100644
82767--- a/include/linux/ipc_namespace.h
82768+++ b/include/linux/ipc_namespace.h
82769@@ -60,7 +60,7 @@ struct ipc_namespace {
82770 struct user_namespace *user_ns;
82771
82772 struct ns_common ns;
82773-};
82774+} __randomize_layout;
82775
82776 extern struct ipc_namespace init_ipc_ns;
82777 extern atomic_t nr_ipc_ns;
82778diff --git a/include/linux/irq.h b/include/linux/irq.h
82779index d09ec7a..f373eb5 100644
82780--- a/include/linux/irq.h
82781+++ b/include/linux/irq.h
82782@@ -364,7 +364,8 @@ struct irq_chip {
82783 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
82784
82785 unsigned long flags;
82786-};
82787+} __do_const;
82788+typedef struct irq_chip __no_const irq_chip_no_const;
82789
82790 /*
82791 * irq_chip specific flags
82792diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82793index 71d706d..817cdec 100644
82794--- a/include/linux/irqchip/arm-gic.h
82795+++ b/include/linux/irqchip/arm-gic.h
82796@@ -95,7 +95,7 @@
82797
82798 struct device_node;
82799
82800-extern struct irq_chip gic_arch_extn;
82801+extern irq_chip_no_const gic_arch_extn;
82802
82803 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82804 u32 offset, struct device_node *);
82805diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
82806index faf433a..7dcb186 100644
82807--- a/include/linux/irqdesc.h
82808+++ b/include/linux/irqdesc.h
82809@@ -61,7 +61,7 @@ struct irq_desc {
82810 unsigned int irq_count; /* For detecting broken IRQs */
82811 unsigned long last_unhandled; /* Aging timer for unhandled count */
82812 unsigned int irqs_unhandled;
82813- atomic_t threads_handled;
82814+ atomic_unchecked_t threads_handled;
82815 int threads_handled_last;
82816 raw_spinlock_t lock;
82817 struct cpumask *percpu_enabled;
82818diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82819index c367cbd..c9b79e6 100644
82820--- a/include/linux/jiffies.h
82821+++ b/include/linux/jiffies.h
82822@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82823 /*
82824 * Convert various time units to each other:
82825 */
82826-extern unsigned int jiffies_to_msecs(const unsigned long j);
82827-extern unsigned int jiffies_to_usecs(const unsigned long j);
82828+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82829+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82830
82831-static inline u64 jiffies_to_nsecs(const unsigned long j)
82832+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82833 {
82834 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82835 }
82836
82837-extern unsigned long msecs_to_jiffies(const unsigned int m);
82838-extern unsigned long usecs_to_jiffies(const unsigned int u);
82839+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82840+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82841 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82842 extern void jiffies_to_timespec(const unsigned long jiffies,
82843- struct timespec *value);
82844-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82845+ struct timespec *value) __intentional_overflow(-1);
82846+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82847 extern void jiffies_to_timeval(const unsigned long jiffies,
82848 struct timeval *value);
82849
82850diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82851index 6883e19..e854fcb 100644
82852--- a/include/linux/kallsyms.h
82853+++ b/include/linux/kallsyms.h
82854@@ -15,7 +15,8 @@
82855
82856 struct module;
82857
82858-#ifdef CONFIG_KALLSYMS
82859+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82860+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82861 /* Lookup the address for a symbol. Returns 0 if not found. */
82862 unsigned long kallsyms_lookup_name(const char *name);
82863
82864@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82865 /* Stupid that this does nothing, but I didn't create this mess. */
82866 #define __print_symbol(fmt, addr)
82867 #endif /*CONFIG_KALLSYMS*/
82868+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82869+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82870+extern unsigned long kallsyms_lookup_name(const char *name);
82871+extern void __print_symbol(const char *fmt, unsigned long address);
82872+extern int sprint_backtrace(char *buffer, unsigned long address);
82873+extern int sprint_symbol(char *buffer, unsigned long address);
82874+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82875+const char *kallsyms_lookup(unsigned long addr,
82876+ unsigned long *symbolsize,
82877+ unsigned long *offset,
82878+ char **modname, char *namebuf);
82879+extern int kallsyms_lookup_size_offset(unsigned long addr,
82880+ unsigned long *symbolsize,
82881+ unsigned long *offset);
82882+#endif
82883
82884 /* This macro allows us to keep printk typechecking */
82885 static __printf(1, 2)
82886diff --git a/include/linux/kernel.h b/include/linux/kernel.h
82887index 64ce58b..6bcdbfa 100644
82888--- a/include/linux/kernel.h
82889+++ b/include/linux/kernel.h
82890@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
82891 /* Obsolete, do not use. Use kstrto<foo> instead */
82892
82893 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
82894-extern long simple_strtol(const char *,char **,unsigned int);
82895+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
82896 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
82897 extern long long simple_strtoll(const char *,char **,unsigned int);
82898
82899diff --git a/include/linux/key-type.h b/include/linux/key-type.h
82900index ff9f1d3..6712be5 100644
82901--- a/include/linux/key-type.h
82902+++ b/include/linux/key-type.h
82903@@ -152,7 +152,7 @@ struct key_type {
82904 /* internal fields */
82905 struct list_head link; /* link in types list */
82906 struct lock_class_key lock_class; /* key->sem lock class */
82907-};
82908+} __do_const;
82909
82910 extern struct key_type key_type_keyring;
82911
82912diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
82913index e465bb1..19f605fd 100644
82914--- a/include/linux/kgdb.h
82915+++ b/include/linux/kgdb.h
82916@@ -52,7 +52,7 @@ extern int kgdb_connected;
82917 extern int kgdb_io_module_registered;
82918
82919 extern atomic_t kgdb_setting_breakpoint;
82920-extern atomic_t kgdb_cpu_doing_single_step;
82921+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
82922
82923 extern struct task_struct *kgdb_usethread;
82924 extern struct task_struct *kgdb_contthread;
82925@@ -254,7 +254,7 @@ struct kgdb_arch {
82926 void (*correct_hw_break)(void);
82927
82928 void (*enable_nmi)(bool on);
82929-};
82930+} __do_const;
82931
82932 /**
82933 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
82934@@ -279,7 +279,7 @@ struct kgdb_io {
82935 void (*pre_exception) (void);
82936 void (*post_exception) (void);
82937 int is_console;
82938-};
82939+} __do_const;
82940
82941 extern struct kgdb_arch arch_kgdb_ops;
82942
82943diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
82944index e705467..a92471d 100644
82945--- a/include/linux/kmemleak.h
82946+++ b/include/linux/kmemleak.h
82947@@ -27,7 +27,7 @@
82948
82949 extern void kmemleak_init(void) __ref;
82950 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82951- gfp_t gfp) __ref;
82952+ gfp_t gfp) __ref __size_overflow(2);
82953 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
82954 extern void kmemleak_free(const void *ptr) __ref;
82955 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
82956@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
82957 static inline void kmemleak_init(void)
82958 {
82959 }
82960-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82961+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
82962 gfp_t gfp)
82963 {
82964 }
82965diff --git a/include/linux/kmod.h b/include/linux/kmod.h
82966index 0555cc6..40116ce 100644
82967--- a/include/linux/kmod.h
82968+++ b/include/linux/kmod.h
82969@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
82970 * usually useless though. */
82971 extern __printf(2, 3)
82972 int __request_module(bool wait, const char *name, ...);
82973+extern __printf(3, 4)
82974+int ___request_module(bool wait, char *param_name, const char *name, ...);
82975 #define request_module(mod...) __request_module(true, mod)
82976 #define request_module_nowait(mod...) __request_module(false, mod)
82977 #define try_then_request_module(x, mod...) \
82978@@ -57,6 +59,9 @@ struct subprocess_info {
82979 struct work_struct work;
82980 struct completion *complete;
82981 char *path;
82982+#ifdef CONFIG_GRKERNSEC
82983+ char *origpath;
82984+#endif
82985 char **argv;
82986 char **envp;
82987 int wait;
82988diff --git a/include/linux/kobject.h b/include/linux/kobject.h
82989index 2d61b90..a1d0a13 100644
82990--- a/include/linux/kobject.h
82991+++ b/include/linux/kobject.h
82992@@ -118,7 +118,7 @@ struct kobj_type {
82993 struct attribute **default_attrs;
82994 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
82995 const void *(*namespace)(struct kobject *kobj);
82996-};
82997+} __do_const;
82998
82999 struct kobj_uevent_env {
83000 char *argv[3];
83001@@ -142,6 +142,7 @@ struct kobj_attribute {
83002 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83003 const char *buf, size_t count);
83004 };
83005+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83006
83007 extern const struct sysfs_ops kobj_sysfs_ops;
83008
83009@@ -169,7 +170,7 @@ struct kset {
83010 spinlock_t list_lock;
83011 struct kobject kobj;
83012 const struct kset_uevent_ops *uevent_ops;
83013-};
83014+} __randomize_layout;
83015
83016 extern void kset_init(struct kset *kset);
83017 extern int __must_check kset_register(struct kset *kset);
83018diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83019index df32d25..fb52e27 100644
83020--- a/include/linux/kobject_ns.h
83021+++ b/include/linux/kobject_ns.h
83022@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83023 const void *(*netlink_ns)(struct sock *sk);
83024 const void *(*initial_ns)(void);
83025 void (*drop_ns)(void *);
83026-};
83027+} __do_const;
83028
83029 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83030 int kobj_ns_type_registered(enum kobj_ns_type type);
83031diff --git a/include/linux/kref.h b/include/linux/kref.h
83032index 484604d..0f6c5b6 100644
83033--- a/include/linux/kref.h
83034+++ b/include/linux/kref.h
83035@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83036 static inline int kref_sub(struct kref *kref, unsigned int count,
83037 void (*release)(struct kref *kref))
83038 {
83039- WARN_ON(release == NULL);
83040+ BUG_ON(release == NULL);
83041
83042 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83043 release(kref);
83044diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83045index 26f1060..bafc04a 100644
83046--- a/include/linux/kvm_host.h
83047+++ b/include/linux/kvm_host.h
83048@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
83049 {
83050 }
83051 #endif
83052-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83053+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83054 struct module *module);
83055 void kvm_exit(void);
83056
83057@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83058 struct kvm_guest_debug *dbg);
83059 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83060
83061-int kvm_arch_init(void *opaque);
83062+int kvm_arch_init(const void *opaque);
83063 void kvm_arch_exit(void);
83064
83065 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83066diff --git a/include/linux/libata.h b/include/linux/libata.h
83067index 91f705d..24be831 100644
83068--- a/include/linux/libata.h
83069+++ b/include/linux/libata.h
83070@@ -979,7 +979,7 @@ struct ata_port_operations {
83071 * fields must be pointers.
83072 */
83073 const struct ata_port_operations *inherits;
83074-};
83075+} __do_const;
83076
83077 struct ata_port_info {
83078 unsigned long flags;
83079diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83080index a6a42dd..6c5ebce 100644
83081--- a/include/linux/linkage.h
83082+++ b/include/linux/linkage.h
83083@@ -36,6 +36,7 @@
83084 #endif
83085
83086 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83087+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83088 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83089
83090 /*
83091diff --git a/include/linux/list.h b/include/linux/list.h
83092index feb773c..98f3075 100644
83093--- a/include/linux/list.h
83094+++ b/include/linux/list.h
83095@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
83096 extern void list_del(struct list_head *entry);
83097 #endif
83098
83099+extern void __pax_list_add(struct list_head *new,
83100+ struct list_head *prev,
83101+ struct list_head *next);
83102+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83103+{
83104+ __pax_list_add(new, head, head->next);
83105+}
83106+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83107+{
83108+ __pax_list_add(new, head->prev, head);
83109+}
83110+extern void pax_list_del(struct list_head *entry);
83111+
83112 /**
83113 * list_replace - replace old entry by new one
83114 * @old : the element to be replaced
83115@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
83116 INIT_LIST_HEAD(entry);
83117 }
83118
83119+extern void pax_list_del_init(struct list_head *entry);
83120+
83121 /**
83122 * list_move - delete from one list and add as another's head
83123 * @list: the entry to move
83124diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83125index 4bfde0e..d6e2e09 100644
83126--- a/include/linux/lockref.h
83127+++ b/include/linux/lockref.h
83128@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83129 return ((int)l->count < 0);
83130 }
83131
83132+static inline unsigned int __lockref_read(struct lockref *lockref)
83133+{
83134+ return lockref->count;
83135+}
83136+
83137+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83138+{
83139+ lockref->count = count;
83140+}
83141+
83142+static inline void __lockref_inc(struct lockref *lockref)
83143+{
83144+
83145+#ifdef CONFIG_PAX_REFCOUNT
83146+ atomic_inc((atomic_t *)&lockref->count);
83147+#else
83148+ lockref->count++;
83149+#endif
83150+
83151+}
83152+
83153+static inline void __lockref_dec(struct lockref *lockref)
83154+{
83155+
83156+#ifdef CONFIG_PAX_REFCOUNT
83157+ atomic_dec((atomic_t *)&lockref->count);
83158+#else
83159+ lockref->count--;
83160+#endif
83161+
83162+}
83163+
83164 #endif /* __LINUX_LOCKREF_H */
83165diff --git a/include/linux/math64.h b/include/linux/math64.h
83166index c45c089..298841c 100644
83167--- a/include/linux/math64.h
83168+++ b/include/linux/math64.h
83169@@ -15,7 +15,7 @@
83170 * This is commonly provided by 32bit archs to provide an optimized 64bit
83171 * divide.
83172 */
83173-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83174+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83175 {
83176 *remainder = dividend % divisor;
83177 return dividend / divisor;
83178@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83179 /**
83180 * div64_u64 - unsigned 64bit divide with 64bit divisor
83181 */
83182-static inline u64 div64_u64(u64 dividend, u64 divisor)
83183+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83184 {
83185 return dividend / divisor;
83186 }
83187@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83188 #define div64_ul(x, y) div_u64((x), (y))
83189
83190 #ifndef div_u64_rem
83191-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83192+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83193 {
83194 *remainder = do_div(dividend, divisor);
83195 return dividend;
83196@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83197 #endif
83198
83199 #ifndef div64_u64
83200-extern u64 div64_u64(u64 dividend, u64 divisor);
83201+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83202 #endif
83203
83204 #ifndef div64_s64
83205@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83206 * divide.
83207 */
83208 #ifndef div_u64
83209-static inline u64 div_u64(u64 dividend, u32 divisor)
83210+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83211 {
83212 u32 remainder;
83213 return div_u64_rem(dividend, divisor, &remainder);
83214diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83215index 3d385c8..deacb6a 100644
83216--- a/include/linux/mempolicy.h
83217+++ b/include/linux/mempolicy.h
83218@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83219 }
83220
83221 #define vma_policy(vma) ((vma)->vm_policy)
83222+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83223+{
83224+ vma->vm_policy = pol;
83225+}
83226
83227 static inline void mpol_get(struct mempolicy *pol)
83228 {
83229@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83230 }
83231
83232 #define vma_policy(vma) NULL
83233+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83234+{
83235+}
83236
83237 static inline int
83238 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83239diff --git a/include/linux/mm.h b/include/linux/mm.h
83240index dd5ea30..cf81cd1 100644
83241--- a/include/linux/mm.h
83242+++ b/include/linux/mm.h
83243@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83244
83245 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83246 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83247+
83248+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83249+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83250+#endif
83251+
83252 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83253 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83254 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83255@@ -256,8 +261,8 @@ struct vm_operations_struct {
83256 /* called by access_process_vm when get_user_pages() fails, typically
83257 * for use by special VMAs that can switch between memory and hardware
83258 */
83259- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83260- void *buf, int len, int write);
83261+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83262+ void *buf, size_t len, int write);
83263
83264 /* Called by the /proc/PID/maps code to ask the vma whether it
83265 * has a special name. Returning non-NULL will also cause this
83266@@ -291,6 +296,7 @@ struct vm_operations_struct {
83267 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83268 unsigned long size, pgoff_t pgoff);
83269 };
83270+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83271
83272 struct mmu_gather;
83273 struct inode;
83274@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83275 unsigned long *pfn);
83276 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83277 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83278-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83279- void *buf, int len, int write);
83280+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83281+ void *buf, size_t len, int write);
83282
83283 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83284 loff_t const holebegin, loff_t const holelen)
83285@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83286 }
83287 #endif
83288
83289-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83290-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83291- void *buf, int len, int write);
83292+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83293+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83294+ void *buf, size_t len, int write);
83295
83296 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83297 unsigned long start, unsigned long nr_pages,
83298@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
83299 int clear_page_dirty_for_io(struct page *page);
83300 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83301
83302-/* Is the vma a continuation of the stack vma above it? */
83303-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83304-{
83305- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83306-}
83307-
83308-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83309- unsigned long addr)
83310-{
83311- return (vma->vm_flags & VM_GROWSDOWN) &&
83312- (vma->vm_start == addr) &&
83313- !vma_growsdown(vma->vm_prev, addr);
83314-}
83315-
83316-/* Is the vma a continuation of the stack vma below it? */
83317-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83318-{
83319- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83320-}
83321-
83322-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83323- unsigned long addr)
83324-{
83325- return (vma->vm_flags & VM_GROWSUP) &&
83326- (vma->vm_end == addr) &&
83327- !vma_growsup(vma->vm_next, addr);
83328-}
83329-
83330 extern struct task_struct *task_of_stack(struct task_struct *task,
83331 struct vm_area_struct *vma, bool in_group);
83332
83333@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83334 {
83335 return 0;
83336 }
83337+
83338+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83339+ unsigned long address)
83340+{
83341+ return 0;
83342+}
83343 #else
83344 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83345+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83346 #endif
83347
83348 #ifdef __PAGETABLE_PMD_FOLDED
83349@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83350 {
83351 return 0;
83352 }
83353+
83354+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83355+ unsigned long address)
83356+{
83357+ return 0;
83358+}
83359 #else
83360 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83361+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83362 #endif
83363
83364 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83365@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83366 NULL: pud_offset(pgd, address);
83367 }
83368
83369+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83370+{
83371+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83372+ NULL: pud_offset(pgd, address);
83373+}
83374+
83375 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83376 {
83377 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83378 NULL: pmd_offset(pud, address);
83379 }
83380+
83381+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83382+{
83383+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83384+ NULL: pmd_offset(pud, address);
83385+}
83386 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83387
83388 #if USE_SPLIT_PTE_PTLOCKS
83389@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83390 bool *need_rmap_locks);
83391 extern void exit_mmap(struct mm_struct *);
83392
83393+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83394+extern void gr_learn_resource(const struct task_struct *task, const int res,
83395+ const unsigned long wanted, const int gt);
83396+#else
83397+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83398+ const unsigned long wanted, const int gt)
83399+{
83400+}
83401+#endif
83402+
83403 static inline int check_data_rlimit(unsigned long rlim,
83404 unsigned long new,
83405 unsigned long start,
83406 unsigned long end_data,
83407 unsigned long start_data)
83408 {
83409+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83410 if (rlim < RLIM_INFINITY) {
83411 if (((new - start) + (end_data - start_data)) > rlim)
83412 return -ENOSPC;
83413@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83414 unsigned long addr, unsigned long len,
83415 unsigned long flags, struct page **pages);
83416
83417-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83418+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83419
83420 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83421 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83422@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83423 unsigned long len, unsigned long prot, unsigned long flags,
83424 unsigned long pgoff, unsigned long *populate);
83425 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83426+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83427
83428 #ifdef CONFIG_MMU
83429 extern int __mm_populate(unsigned long addr, unsigned long len,
83430@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
83431 unsigned long high_limit;
83432 unsigned long align_mask;
83433 unsigned long align_offset;
83434+ unsigned long threadstack_offset;
83435 };
83436
83437-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83438-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83439+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83440+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83441
83442 /*
83443 * Search for an unmapped address range.
83444@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83445 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83446 */
83447 static inline unsigned long
83448-vm_unmapped_area(struct vm_unmapped_area_info *info)
83449+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83450 {
83451 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83452 return unmapped_area(info);
83453@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83454 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83455 struct vm_area_struct **pprev);
83456
83457+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83458+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83459+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83460+
83461 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83462 NULL if none. Assume start_addr < end_addr. */
83463 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83464@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83465 }
83466
83467 #ifdef CONFIG_MMU
83468-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83469+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83470 void vma_set_page_prot(struct vm_area_struct *vma);
83471 #else
83472-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83473+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83474 {
83475 return __pgprot(0);
83476 }
83477@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83478 static inline void vm_stat_account(struct mm_struct *mm,
83479 unsigned long flags, struct file *file, long pages)
83480 {
83481+
83482+#ifdef CONFIG_PAX_RANDMMAP
83483+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83484+#endif
83485+
83486 mm->total_vm += pages;
83487 }
83488 #endif /* CONFIG_PROC_FS */
83489@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
83490 extern int sysctl_memory_failure_early_kill;
83491 extern int sysctl_memory_failure_recovery;
83492 extern void shake_page(struct page *p, int access);
83493-extern atomic_long_t num_poisoned_pages;
83494+extern atomic_long_unchecked_t num_poisoned_pages;
83495 extern int soft_offline_page(struct page *page, int flags);
83496
83497 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83498@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
83499 static inline void setup_nr_node_ids(void) {}
83500 #endif
83501
83502+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83503+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83504+#else
83505+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83506+#endif
83507+
83508 #endif /* __KERNEL__ */
83509 #endif /* _LINUX_MM_H */
83510diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83511index 6d34aa2..d73d848 100644
83512--- a/include/linux/mm_types.h
83513+++ b/include/linux/mm_types.h
83514@@ -309,7 +309,9 @@ struct vm_area_struct {
83515 #ifdef CONFIG_NUMA
83516 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83517 #endif
83518-};
83519+
83520+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83521+} __randomize_layout;
83522
83523 struct core_thread {
83524 struct task_struct *task;
83525@@ -459,7 +461,25 @@ struct mm_struct {
83526 /* address of the bounds directory */
83527 void __user *bd_addr;
83528 #endif
83529-};
83530+
83531+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83532+ unsigned long pax_flags;
83533+#endif
83534+
83535+#ifdef CONFIG_PAX_DLRESOLVE
83536+ unsigned long call_dl_resolve;
83537+#endif
83538+
83539+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83540+ unsigned long call_syscall;
83541+#endif
83542+
83543+#ifdef CONFIG_PAX_ASLR
83544+ unsigned long delta_mmap; /* randomized offset */
83545+ unsigned long delta_stack; /* randomized offset */
83546+#endif
83547+
83548+} __randomize_layout;
83549
83550 static inline void mm_init_cpumask(struct mm_struct *mm)
83551 {
83552diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83553index c5d5278..f0b68c8 100644
83554--- a/include/linux/mmiotrace.h
83555+++ b/include/linux/mmiotrace.h
83556@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83557 /* Called from ioremap.c */
83558 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83559 void __iomem *addr);
83560-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83561+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83562
83563 /* For anyone to insert markers. Remember trailing newline. */
83564 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83565@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83566 {
83567 }
83568
83569-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83570+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83571 {
83572 }
83573
83574diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83575index 2f0856d..5a4bc1e 100644
83576--- a/include/linux/mmzone.h
83577+++ b/include/linux/mmzone.h
83578@@ -527,7 +527,7 @@ struct zone {
83579
83580 ZONE_PADDING(_pad3_)
83581 /* Zone statistics */
83582- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83583+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83584 } ____cacheline_internodealigned_in_smp;
83585
83586 enum zone_flags {
83587diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83588index 745def8..08a820b 100644
83589--- a/include/linux/mod_devicetable.h
83590+++ b/include/linux/mod_devicetable.h
83591@@ -139,7 +139,7 @@ struct usb_device_id {
83592 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83593 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83594
83595-#define HID_ANY_ID (~0)
83596+#define HID_ANY_ID (~0U)
83597 #define HID_BUS_ANY 0xffff
83598 #define HID_GROUP_ANY 0x0000
83599
83600@@ -475,7 +475,7 @@ struct dmi_system_id {
83601 const char *ident;
83602 struct dmi_strmatch matches[4];
83603 void *driver_data;
83604-};
83605+} __do_const;
83606 /*
83607 * struct dmi_device_id appears during expansion of
83608 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83609diff --git a/include/linux/module.h b/include/linux/module.h
83610index b653d7c..22a238f 100644
83611--- a/include/linux/module.h
83612+++ b/include/linux/module.h
83613@@ -17,9 +17,11 @@
83614 #include <linux/moduleparam.h>
83615 #include <linux/jump_label.h>
83616 #include <linux/export.h>
83617+#include <linux/fs.h>
83618
83619 #include <linux/percpu.h>
83620 #include <asm/module.h>
83621+#include <asm/pgtable.h>
83622
83623 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83624 #define MODULE_SIG_STRING "~Module signature appended~\n"
83625@@ -42,7 +44,7 @@ struct module_kobject {
83626 struct kobject *drivers_dir;
83627 struct module_param_attrs *mp;
83628 struct completion *kobj_completion;
83629-};
83630+} __randomize_layout;
83631
83632 struct module_attribute {
83633 struct attribute attr;
83634@@ -54,12 +56,13 @@ struct module_attribute {
83635 int (*test)(struct module *);
83636 void (*free)(struct module *);
83637 };
83638+typedef struct module_attribute __no_const module_attribute_no_const;
83639
83640 struct module_version_attribute {
83641 struct module_attribute mattr;
83642 const char *module_name;
83643 const char *version;
83644-} __attribute__ ((__aligned__(sizeof(void *))));
83645+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83646
83647 extern ssize_t __modver_version_show(struct module_attribute *,
83648 struct module_kobject *, char *);
83649@@ -221,7 +224,7 @@ struct module {
83650
83651 /* Sysfs stuff. */
83652 struct module_kobject mkobj;
83653- struct module_attribute *modinfo_attrs;
83654+ module_attribute_no_const *modinfo_attrs;
83655 const char *version;
83656 const char *srcversion;
83657 struct kobject *holders_dir;
83658@@ -270,19 +273,16 @@ struct module {
83659 int (*init)(void);
83660
83661 /* If this is non-NULL, vfree after init() returns */
83662- void *module_init;
83663+ void *module_init_rx, *module_init_rw;
83664
83665 /* Here is the actual code + data, vfree'd on unload. */
83666- void *module_core;
83667+ void *module_core_rx, *module_core_rw;
83668
83669 /* Here are the sizes of the init and core sections */
83670- unsigned int init_size, core_size;
83671+ unsigned int init_size_rw, core_size_rw;
83672
83673 /* The size of the executable code in each section. */
83674- unsigned int init_text_size, core_text_size;
83675-
83676- /* Size of RO sections of the module (text+rodata) */
83677- unsigned int init_ro_size, core_ro_size;
83678+ unsigned int init_size_rx, core_size_rx;
83679
83680 /* Arch-specific module values */
83681 struct mod_arch_specific arch;
83682@@ -338,6 +338,10 @@ struct module {
83683 #ifdef CONFIG_EVENT_TRACING
83684 struct ftrace_event_call **trace_events;
83685 unsigned int num_trace_events;
83686+ struct file_operations trace_id;
83687+ struct file_operations trace_enable;
83688+ struct file_operations trace_format;
83689+ struct file_operations trace_filter;
83690 #endif
83691 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83692 unsigned int num_ftrace_callsites;
83693@@ -361,7 +365,7 @@ struct module {
83694 ctor_fn_t *ctors;
83695 unsigned int num_ctors;
83696 #endif
83697-};
83698+} __randomize_layout;
83699 #ifndef MODULE_ARCH_INIT
83700 #define MODULE_ARCH_INIT {}
83701 #endif
83702@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
83703 bool is_module_percpu_address(unsigned long addr);
83704 bool is_module_text_address(unsigned long addr);
83705
83706+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83707+{
83708+
83709+#ifdef CONFIG_PAX_KERNEXEC
83710+ if (ktla_ktva(addr) >= (unsigned long)start &&
83711+ ktla_ktva(addr) < (unsigned long)start + size)
83712+ return 1;
83713+#endif
83714+
83715+ return ((void *)addr >= start && (void *)addr < start + size);
83716+}
83717+
83718+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83719+{
83720+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83721+}
83722+
83723+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83724+{
83725+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83726+}
83727+
83728+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83729+{
83730+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83731+}
83732+
83733+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83734+{
83735+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83736+}
83737+
83738 static inline bool within_module_core(unsigned long addr,
83739 const struct module *mod)
83740 {
83741- return (unsigned long)mod->module_core <= addr &&
83742- addr < (unsigned long)mod->module_core + mod->core_size;
83743+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83744 }
83745
83746 static inline bool within_module_init(unsigned long addr,
83747 const struct module *mod)
83748 {
83749- return (unsigned long)mod->module_init <= addr &&
83750- addr < (unsigned long)mod->module_init + mod->init_size;
83751+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83752 }
83753
83754 static inline bool within_module(unsigned long addr, const struct module *mod)
83755diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83756index f755626..641f822 100644
83757--- a/include/linux/moduleloader.h
83758+++ b/include/linux/moduleloader.h
83759@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83760 sections. Returns NULL on failure. */
83761 void *module_alloc(unsigned long size);
83762
83763+#ifdef CONFIG_PAX_KERNEXEC
83764+void *module_alloc_exec(unsigned long size);
83765+#else
83766+#define module_alloc_exec(x) module_alloc(x)
83767+#endif
83768+
83769 /* Free memory returned from module_alloc. */
83770 void module_memfree(void *module_region);
83771
83772+#ifdef CONFIG_PAX_KERNEXEC
83773+void module_memfree_exec(void *module_region);
83774+#else
83775+#define module_memfree_exec(x) module_memfree((x))
83776+#endif
83777+
83778 /*
83779 * Apply the given relocation to the (simplified) ELF. Return -error
83780 * or 0.
83781@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83782 unsigned int relsec,
83783 struct module *me)
83784 {
83785+#ifdef CONFIG_MODULES
83786 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83787 module_name(me));
83788+#endif
83789 return -ENOEXEC;
83790 }
83791 #endif
83792@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83793 unsigned int relsec,
83794 struct module *me)
83795 {
83796+#ifdef CONFIG_MODULES
83797 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83798 module_name(me));
83799+#endif
83800 return -ENOEXEC;
83801 }
83802 #endif
83803diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83804index 1c9effa..1160bdd 100644
83805--- a/include/linux/moduleparam.h
83806+++ b/include/linux/moduleparam.h
83807@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
83808 * @len is usually just sizeof(string).
83809 */
83810 #define module_param_string(name, string, len, perm) \
83811- static const struct kparam_string __param_string_##name \
83812+ static const struct kparam_string __param_string_##name __used \
83813 = { len, string }; \
83814 __module_param_call(MODULE_PARAM_PREFIX, name, \
83815 &param_ops_string, \
83816@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83817 */
83818 #define module_param_array_named(name, array, type, nump, perm) \
83819 param_check_##type(name, &(array)[0]); \
83820- static const struct kparam_array __param_arr_##name \
83821+ static const struct kparam_array __param_arr_##name __used \
83822 = { .max = ARRAY_SIZE(array), .num = nump, \
83823 .ops = &param_ops_##type, \
83824 .elemsize = sizeof(array[0]), .elem = array }; \
83825diff --git a/include/linux/mount.h b/include/linux/mount.h
83826index c2c561d..a5f2a8c 100644
83827--- a/include/linux/mount.h
83828+++ b/include/linux/mount.h
83829@@ -66,7 +66,7 @@ struct vfsmount {
83830 struct dentry *mnt_root; /* root of the mounted tree */
83831 struct super_block *mnt_sb; /* pointer to superblock */
83832 int mnt_flags;
83833-};
83834+} __randomize_layout;
83835
83836 struct file; /* forward dec */
83837 struct path;
83838diff --git a/include/linux/namei.h b/include/linux/namei.h
83839index c899077..b9a2010 100644
83840--- a/include/linux/namei.h
83841+++ b/include/linux/namei.h
83842@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
83843 extern void unlock_rename(struct dentry *, struct dentry *);
83844
83845 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83846-extern void nd_set_link(struct nameidata *nd, char *path);
83847-extern char *nd_get_link(struct nameidata *nd);
83848+extern void nd_set_link(struct nameidata *nd, const char *path);
83849+extern const char *nd_get_link(const struct nameidata *nd);
83850
83851 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
83852 {
83853diff --git a/include/linux/net.h b/include/linux/net.h
83854index 17d8339..81656c0 100644
83855--- a/include/linux/net.h
83856+++ b/include/linux/net.h
83857@@ -192,7 +192,7 @@ struct net_proto_family {
83858 int (*create)(struct net *net, struct socket *sock,
83859 int protocol, int kern);
83860 struct module *owner;
83861-};
83862+} __do_const;
83863
83864 struct iovec;
83865 struct kvec;
83866diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83867index 52fd8e8..19430a1 100644
83868--- a/include/linux/netdevice.h
83869+++ b/include/linux/netdevice.h
83870@@ -1191,6 +1191,7 @@ struct net_device_ops {
83871 u8 state);
83872 #endif
83873 };
83874+typedef struct net_device_ops __no_const net_device_ops_no_const;
83875
83876 /**
83877 * enum net_device_priv_flags - &struct net_device priv_flags
83878@@ -1537,10 +1538,10 @@ struct net_device {
83879
83880 struct net_device_stats stats;
83881
83882- atomic_long_t rx_dropped;
83883- atomic_long_t tx_dropped;
83884+ atomic_long_unchecked_t rx_dropped;
83885+ atomic_long_unchecked_t tx_dropped;
83886
83887- atomic_t carrier_changes;
83888+ atomic_unchecked_t carrier_changes;
83889
83890 #ifdef CONFIG_WIRELESS_EXT
83891 const struct iw_handler_def * wireless_handlers;
83892diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83893index 2517ece..0bbfcfb 100644
83894--- a/include/linux/netfilter.h
83895+++ b/include/linux/netfilter.h
83896@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83897 #endif
83898 /* Use the module struct to lock set/get code in place */
83899 struct module *owner;
83900-};
83901+} __do_const;
83902
83903 /* Function to register/unregister hook points. */
83904 int nf_register_hook(struct nf_hook_ops *reg);
83905diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
83906index e955d47..04a5338 100644
83907--- a/include/linux/netfilter/nfnetlink.h
83908+++ b/include/linux/netfilter/nfnetlink.h
83909@@ -19,7 +19,7 @@ struct nfnl_callback {
83910 const struct nlattr * const cda[]);
83911 const struct nla_policy *policy; /* netlink attribute policy */
83912 const u_int16_t attr_count; /* number of nlattr's */
83913-};
83914+} __do_const;
83915
83916 struct nfnetlink_subsystem {
83917 const char *name;
83918diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
83919new file mode 100644
83920index 0000000..33f4af8
83921--- /dev/null
83922+++ b/include/linux/netfilter/xt_gradm.h
83923@@ -0,0 +1,9 @@
83924+#ifndef _LINUX_NETFILTER_XT_GRADM_H
83925+#define _LINUX_NETFILTER_XT_GRADM_H 1
83926+
83927+struct xt_gradm_mtinfo {
83928+ __u16 flags;
83929+ __u16 invflags;
83930+};
83931+
83932+#endif
83933diff --git a/include/linux/nls.h b/include/linux/nls.h
83934index 520681b..2b7fabb 100644
83935--- a/include/linux/nls.h
83936+++ b/include/linux/nls.h
83937@@ -31,7 +31,7 @@ struct nls_table {
83938 const unsigned char *charset2upper;
83939 struct module *owner;
83940 struct nls_table *next;
83941-};
83942+} __do_const;
83943
83944 /* this value hold the maximum octet of charset */
83945 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
83946@@ -46,7 +46,7 @@ enum utf16_endian {
83947 /* nls_base.c */
83948 extern int __register_nls(struct nls_table *, struct module *);
83949 extern int unregister_nls(struct nls_table *);
83950-extern struct nls_table *load_nls(char *);
83951+extern struct nls_table *load_nls(const char *);
83952 extern void unload_nls(struct nls_table *);
83953 extern struct nls_table *load_nls_default(void);
83954 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
83955diff --git a/include/linux/notifier.h b/include/linux/notifier.h
83956index d14a4c3..a078786 100644
83957--- a/include/linux/notifier.h
83958+++ b/include/linux/notifier.h
83959@@ -54,7 +54,8 @@ struct notifier_block {
83960 notifier_fn_t notifier_call;
83961 struct notifier_block __rcu *next;
83962 int priority;
83963-};
83964+} __do_const;
83965+typedef struct notifier_block __no_const notifier_block_no_const;
83966
83967 struct atomic_notifier_head {
83968 spinlock_t lock;
83969diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
83970index b2a0f15..4d7da32 100644
83971--- a/include/linux/oprofile.h
83972+++ b/include/linux/oprofile.h
83973@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
83974 int oprofilefs_create_ro_ulong(struct dentry * root,
83975 char const * name, ulong * val);
83976
83977-/** Create a file for read-only access to an atomic_t. */
83978+/** Create a file for read-only access to an atomic_unchecked_t. */
83979 int oprofilefs_create_ro_atomic(struct dentry * root,
83980- char const * name, atomic_t * val);
83981+ char const * name, atomic_unchecked_t * val);
83982
83983 /** create a directory */
83984 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
83985diff --git a/include/linux/padata.h b/include/linux/padata.h
83986index 4386946..f50c615 100644
83987--- a/include/linux/padata.h
83988+++ b/include/linux/padata.h
83989@@ -129,7 +129,7 @@ struct parallel_data {
83990 struct padata_serial_queue __percpu *squeue;
83991 atomic_t reorder_objects;
83992 atomic_t refcnt;
83993- atomic_t seq_nr;
83994+ atomic_unchecked_t seq_nr;
83995 struct padata_cpumask cpumask;
83996 spinlock_t lock ____cacheline_aligned;
83997 unsigned int processed;
83998diff --git a/include/linux/path.h b/include/linux/path.h
83999index d137218..be0c176 100644
84000--- a/include/linux/path.h
84001+++ b/include/linux/path.h
84002@@ -1,13 +1,15 @@
84003 #ifndef _LINUX_PATH_H
84004 #define _LINUX_PATH_H
84005
84006+#include <linux/compiler.h>
84007+
84008 struct dentry;
84009 struct vfsmount;
84010
84011 struct path {
84012 struct vfsmount *mnt;
84013 struct dentry *dentry;
84014-};
84015+} __randomize_layout;
84016
84017 extern void path_get(const struct path *);
84018 extern void path_put(const struct path *);
84019diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84020index 8c78950..0d74ed9 100644
84021--- a/include/linux/pci_hotplug.h
84022+++ b/include/linux/pci_hotplug.h
84023@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84024 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84025 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84026 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84027-};
84028+} __do_const;
84029+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84030
84031 /**
84032 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84033diff --git a/include/linux/percpu.h b/include/linux/percpu.h
84034index caebf2a..4c3ae9d 100644
84035--- a/include/linux/percpu.h
84036+++ b/include/linux/percpu.h
84037@@ -34,7 +34,7 @@
84038 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
84039 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
84040 */
84041-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
84042+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
84043 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
84044
84045 /*
84046diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84047index 664de5a..b3e1bf4 100644
84048--- a/include/linux/perf_event.h
84049+++ b/include/linux/perf_event.h
84050@@ -336,8 +336,8 @@ struct perf_event {
84051
84052 enum perf_event_active_state state;
84053 unsigned int attach_state;
84054- local64_t count;
84055- atomic64_t child_count;
84056+ local64_t count; /* PaX: fix it one day */
84057+ atomic64_unchecked_t child_count;
84058
84059 /*
84060 * These are the total time in nanoseconds that the event
84061@@ -388,8 +388,8 @@ struct perf_event {
84062 * These accumulate total time (in nanoseconds) that children
84063 * events have been enabled and running, respectively.
84064 */
84065- atomic64_t child_total_time_enabled;
84066- atomic64_t child_total_time_running;
84067+ atomic64_unchecked_t child_total_time_enabled;
84068+ atomic64_unchecked_t child_total_time_running;
84069
84070 /*
84071 * Protect attach/detach and child_list:
84072@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84073 entry->ip[entry->nr++] = ip;
84074 }
84075
84076-extern int sysctl_perf_event_paranoid;
84077+extern int sysctl_perf_event_legitimately_concerned;
84078 extern int sysctl_perf_event_mlock;
84079 extern int sysctl_perf_event_sample_rate;
84080 extern int sysctl_perf_cpu_time_max_percent;
84081@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84082 loff_t *ppos);
84083
84084
84085+static inline bool perf_paranoid_any(void)
84086+{
84087+ return sysctl_perf_event_legitimately_concerned > 2;
84088+}
84089+
84090 static inline bool perf_paranoid_tracepoint_raw(void)
84091 {
84092- return sysctl_perf_event_paranoid > -1;
84093+ return sysctl_perf_event_legitimately_concerned > -1;
84094 }
84095
84096 static inline bool perf_paranoid_cpu(void)
84097 {
84098- return sysctl_perf_event_paranoid > 0;
84099+ return sysctl_perf_event_legitimately_concerned > 0;
84100 }
84101
84102 static inline bool perf_paranoid_kernel(void)
84103 {
84104- return sysctl_perf_event_paranoid > 1;
84105+ return sysctl_perf_event_legitimately_concerned > 1;
84106 }
84107
84108 extern void perf_event_init(void);
84109@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
84110 struct device_attribute attr;
84111 u64 id;
84112 const char *event_str;
84113-};
84114+} __do_const;
84115
84116 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84117 static struct perf_pmu_events_attr _var = { \
84118diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84119index b9cf6c5..5462472 100644
84120--- a/include/linux/pid_namespace.h
84121+++ b/include/linux/pid_namespace.h
84122@@ -45,7 +45,7 @@ struct pid_namespace {
84123 int hide_pid;
84124 int reboot; /* group exit code if this pidns was rebooted */
84125 struct ns_common ns;
84126-};
84127+} __randomize_layout;
84128
84129 extern struct pid_namespace init_pid_ns;
84130
84131diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84132index eb8b8ac..62649e1 100644
84133--- a/include/linux/pipe_fs_i.h
84134+++ b/include/linux/pipe_fs_i.h
84135@@ -47,10 +47,10 @@ struct pipe_inode_info {
84136 struct mutex mutex;
84137 wait_queue_head_t wait;
84138 unsigned int nrbufs, curbuf, buffers;
84139- unsigned int readers;
84140- unsigned int writers;
84141- unsigned int files;
84142- unsigned int waiting_writers;
84143+ atomic_t readers;
84144+ atomic_t writers;
84145+ atomic_t files;
84146+ atomic_t waiting_writers;
84147 unsigned int r_counter;
84148 unsigned int w_counter;
84149 struct page *tmp_page;
84150diff --git a/include/linux/pm.h b/include/linux/pm.h
84151index 8b59763..8a05939 100644
84152--- a/include/linux/pm.h
84153+++ b/include/linux/pm.h
84154@@ -608,6 +608,7 @@ struct dev_pm_domain {
84155 struct dev_pm_ops ops;
84156 void (*detach)(struct device *dev, bool power_off);
84157 };
84158+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84159
84160 /*
84161 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84162diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84163index a9edab2..8bada56 100644
84164--- a/include/linux/pm_domain.h
84165+++ b/include/linux/pm_domain.h
84166@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84167 int (*save_state)(struct device *dev);
84168 int (*restore_state)(struct device *dev);
84169 bool (*active_wakeup)(struct device *dev);
84170-};
84171+} __no_const;
84172
84173 struct gpd_cpuidle_data {
84174 unsigned int saved_exit_latency;
84175- struct cpuidle_state *idle_state;
84176+ cpuidle_state_no_const *idle_state;
84177 };
84178
84179 struct generic_pm_domain {
84180diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84181index 30e84d4..22278b4 100644
84182--- a/include/linux/pm_runtime.h
84183+++ b/include/linux/pm_runtime.h
84184@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84185
84186 static inline void pm_runtime_mark_last_busy(struct device *dev)
84187 {
84188- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84189+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84190 }
84191
84192 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84193diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84194index 195aafc..49a7bc2 100644
84195--- a/include/linux/pnp.h
84196+++ b/include/linux/pnp.h
84197@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84198 struct pnp_fixup {
84199 char id[7];
84200 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84201-};
84202+} __do_const;
84203
84204 /* config parameters */
84205 #define PNP_CONFIG_NORMAL 0x0001
84206diff --git a/include/linux/poison.h b/include/linux/poison.h
84207index 2110a81..13a11bb 100644
84208--- a/include/linux/poison.h
84209+++ b/include/linux/poison.h
84210@@ -19,8 +19,8 @@
84211 * under normal circumstances, used to verify that nobody uses
84212 * non-initialized list entries.
84213 */
84214-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84215-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84216+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84217+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84218
84219 /********** include/linux/timer.h **********/
84220 /*
84221diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84222index d8b187c3..9a9257a 100644
84223--- a/include/linux/power/smartreflex.h
84224+++ b/include/linux/power/smartreflex.h
84225@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84226 int (*notify)(struct omap_sr *sr, u32 status);
84227 u8 notify_flags;
84228 u8 class_type;
84229-};
84230+} __do_const;
84231
84232 /**
84233 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84234diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84235index 4ea1d37..80f4b33 100644
84236--- a/include/linux/ppp-comp.h
84237+++ b/include/linux/ppp-comp.h
84238@@ -84,7 +84,7 @@ struct compressor {
84239 struct module *owner;
84240 /* Extra skb space needed by the compressor algorithm */
84241 unsigned int comp_extra;
84242-};
84243+} __do_const;
84244
84245 /*
84246 * The return value from decompress routine is the length of the
84247diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84248index de83b4e..c4b997d 100644
84249--- a/include/linux/preempt.h
84250+++ b/include/linux/preempt.h
84251@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84252 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84253 #endif
84254
84255+#define raw_preempt_count_add(val) __preempt_count_add(val)
84256+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84257+
84258 #define __preempt_count_inc() __preempt_count_add(1)
84259 #define __preempt_count_dec() __preempt_count_sub(1)
84260
84261 #define preempt_count_inc() preempt_count_add(1)
84262+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84263 #define preempt_count_dec() preempt_count_sub(1)
84264+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84265
84266 #ifdef CONFIG_PREEMPT_COUNT
84267
84268@@ -41,6 +46,12 @@ do { \
84269 barrier(); \
84270 } while (0)
84271
84272+#define raw_preempt_disable() \
84273+do { \
84274+ raw_preempt_count_inc(); \
84275+ barrier(); \
84276+} while (0)
84277+
84278 #define sched_preempt_enable_no_resched() \
84279 do { \
84280 barrier(); \
84281@@ -49,6 +60,12 @@ do { \
84282
84283 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84284
84285+#define raw_preempt_enable_no_resched() \
84286+do { \
84287+ barrier(); \
84288+ raw_preempt_count_dec(); \
84289+} while (0)
84290+
84291 #ifdef CONFIG_PREEMPT
84292 #define preempt_enable() \
84293 do { \
84294@@ -113,8 +130,10 @@ do { \
84295 * region.
84296 */
84297 #define preempt_disable() barrier()
84298+#define raw_preempt_disable() barrier()
84299 #define sched_preempt_enable_no_resched() barrier()
84300 #define preempt_enable_no_resched() barrier()
84301+#define raw_preempt_enable_no_resched() barrier()
84302 #define preempt_enable() barrier()
84303 #define preempt_check_resched() do { } while (0)
84304
84305@@ -128,11 +147,13 @@ do { \
84306 /*
84307 * Modules have no business playing preemption tricks.
84308 */
84309+#ifndef CONFIG_PAX_KERNEXEC
84310 #undef sched_preempt_enable_no_resched
84311 #undef preempt_enable_no_resched
84312 #undef preempt_enable_no_resched_notrace
84313 #undef preempt_check_resched
84314 #endif
84315+#endif
84316
84317 #define preempt_set_need_resched() \
84318 do { \
84319diff --git a/include/linux/printk.h b/include/linux/printk.h
84320index 4d5bf57..d94eccf 100644
84321--- a/include/linux/printk.h
84322+++ b/include/linux/printk.h
84323@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84324 #endif
84325
84326 typedef int(*printk_func_t)(const char *fmt, va_list args);
84327+extern int kptr_restrict;
84328
84329 #ifdef CONFIG_PRINTK
84330 asmlinkage __printf(5, 0)
84331@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84332
84333 extern int printk_delay_msec;
84334 extern int dmesg_restrict;
84335-extern int kptr_restrict;
84336
84337 extern void wake_up_klogd(void);
84338
84339diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84340index b97bf2e..f14c92d4 100644
84341--- a/include/linux/proc_fs.h
84342+++ b/include/linux/proc_fs.h
84343@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84344 extern struct proc_dir_entry *proc_symlink(const char *,
84345 struct proc_dir_entry *, const char *);
84346 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84347+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84348 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84349 struct proc_dir_entry *, void *);
84350+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84351+ struct proc_dir_entry *, void *);
84352 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84353 struct proc_dir_entry *);
84354
84355@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84356 return proc_create_data(name, mode, parent, proc_fops, NULL);
84357 }
84358
84359+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84360+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84361+{
84362+#ifdef CONFIG_GRKERNSEC_PROC_USER
84363+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84364+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84365+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84366+#else
84367+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84368+#endif
84369+}
84370+
84371+
84372 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84373 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84374 extern void *PDE_DATA(const struct inode *);
84375@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84376 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84377 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84378 struct proc_dir_entry *parent) {return NULL;}
84379+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84380+ struct proc_dir_entry *parent) { return NULL; }
84381 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84382 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84383+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84384+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84385 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84386 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84387 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84388@@ -79,7 +99,7 @@ struct net;
84389 static inline struct proc_dir_entry *proc_net_mkdir(
84390 struct net *net, const char *name, struct proc_dir_entry *parent)
84391 {
84392- return proc_mkdir_data(name, 0, parent, net);
84393+ return proc_mkdir_data_restrict(name, 0, parent, net);
84394 }
84395
84396 #endif /* _LINUX_PROC_FS_H */
84397diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84398index 42dfc61..8113a99 100644
84399--- a/include/linux/proc_ns.h
84400+++ b/include/linux/proc_ns.h
84401@@ -16,7 +16,7 @@ struct proc_ns_operations {
84402 struct ns_common *(*get)(struct task_struct *task);
84403 void (*put)(struct ns_common *ns);
84404 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84405-};
84406+} __do_const __randomize_layout;
84407
84408 extern const struct proc_ns_operations netns_operations;
84409 extern const struct proc_ns_operations utsns_operations;
84410diff --git a/include/linux/quota.h b/include/linux/quota.h
84411index b86df49..8002997 100644
84412--- a/include/linux/quota.h
84413+++ b/include/linux/quota.h
84414@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84415
84416 extern bool qid_eq(struct kqid left, struct kqid right);
84417 extern bool qid_lt(struct kqid left, struct kqid right);
84418-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84419+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84420 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84421 extern bool qid_valid(struct kqid qid);
84422
84423diff --git a/include/linux/random.h b/include/linux/random.h
84424index b05856e..0a9f14e 100644
84425--- a/include/linux/random.h
84426+++ b/include/linux/random.h
84427@@ -9,9 +9,19 @@
84428 #include <uapi/linux/random.h>
84429
84430 extern void add_device_randomness(const void *, unsigned int);
84431+
84432+static inline void add_latent_entropy(void)
84433+{
84434+
84435+#ifdef LATENT_ENTROPY_PLUGIN
84436+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84437+#endif
84438+
84439+}
84440+
84441 extern void add_input_randomness(unsigned int type, unsigned int code,
84442- unsigned int value);
84443-extern void add_interrupt_randomness(int irq, int irq_flags);
84444+ unsigned int value) __latent_entropy;
84445+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84446
84447 extern void get_random_bytes(void *buf, int nbytes);
84448 extern void get_random_bytes_arch(void *buf, int nbytes);
84449@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84450 extern const struct file_operations random_fops, urandom_fops;
84451 #endif
84452
84453-unsigned int get_random_int(void);
84454+unsigned int __intentional_overflow(-1) get_random_int(void);
84455 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84456
84457-u32 prandom_u32(void);
84458+u32 prandom_u32(void) __intentional_overflow(-1);
84459 void prandom_bytes(void *buf, size_t nbytes);
84460 void prandom_seed(u32 seed);
84461 void prandom_reseed_late(void);
84462@@ -37,6 +47,11 @@ struct rnd_state {
84463 u32 prandom_u32_state(struct rnd_state *state);
84464 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84465
84466+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84467+{
84468+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84469+}
84470+
84471 /**
84472 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84473 * @ep_ro: right open interval endpoint
84474@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84475 *
84476 * Returns: pseudo-random number in interval [0, ep_ro)
84477 */
84478-static inline u32 prandom_u32_max(u32 ep_ro)
84479+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84480 {
84481 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84482 }
84483diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84484index 378c5ee..aa84a47 100644
84485--- a/include/linux/rbtree_augmented.h
84486+++ b/include/linux/rbtree_augmented.h
84487@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84488 old->rbaugmented = rbcompute(old); \
84489 } \
84490 rbstatic const struct rb_augment_callbacks rbname = { \
84491- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84492+ .propagate = rbname ## _propagate, \
84493+ .copy = rbname ## _copy, \
84494+ .rotate = rbname ## _rotate \
84495 };
84496
84497
84498diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84499index 529bc94..82ce778 100644
84500--- a/include/linux/rculist.h
84501+++ b/include/linux/rculist.h
84502@@ -29,8 +29,8 @@
84503 */
84504 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84505 {
84506- ACCESS_ONCE(list->next) = list;
84507- ACCESS_ONCE(list->prev) = list;
84508+ ACCESS_ONCE_RW(list->next) = list;
84509+ ACCESS_ONCE_RW(list->prev) = list;
84510 }
84511
84512 /*
84513@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84514 struct list_head *prev, struct list_head *next);
84515 #endif
84516
84517+void __pax_list_add_rcu(struct list_head *new,
84518+ struct list_head *prev, struct list_head *next);
84519+
84520 /**
84521 * list_add_rcu - add a new entry to rcu-protected list
84522 * @new: new entry to be added
84523@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84524 __list_add_rcu(new, head, head->next);
84525 }
84526
84527+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84528+{
84529+ __pax_list_add_rcu(new, head, head->next);
84530+}
84531+
84532 /**
84533 * list_add_tail_rcu - add a new entry to rcu-protected list
84534 * @new: new entry to be added
84535@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84536 __list_add_rcu(new, head->prev, head);
84537 }
84538
84539+static inline void pax_list_add_tail_rcu(struct list_head *new,
84540+ struct list_head *head)
84541+{
84542+ __pax_list_add_rcu(new, head->prev, head);
84543+}
84544+
84545 /**
84546 * list_del_rcu - deletes entry from list without re-initialization
84547 * @entry: the element to delete from the list.
84548@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84549 entry->prev = LIST_POISON2;
84550 }
84551
84552+extern void pax_list_del_rcu(struct list_head *entry);
84553+
84554 /**
84555 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84556 * @n: the element to delete from the hash list.
84557diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
84558index ed4f593..8a51501 100644
84559--- a/include/linux/rcupdate.h
84560+++ b/include/linux/rcupdate.h
84561@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
84562 #define rcu_note_voluntary_context_switch(t) \
84563 do { \
84564 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
84565- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
84566+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
84567 } while (0)
84568 #else /* #ifdef CONFIG_TASKS_RCU */
84569 #define TASKS_RCU(x) do { } while (0)
84570diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84571index 67fc8fc..a90f7d8 100644
84572--- a/include/linux/reboot.h
84573+++ b/include/linux/reboot.h
84574@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
84575 */
84576
84577 extern void migrate_to_reboot_cpu(void);
84578-extern void machine_restart(char *cmd);
84579-extern void machine_halt(void);
84580-extern void machine_power_off(void);
84581+extern void machine_restart(char *cmd) __noreturn;
84582+extern void machine_halt(void) __noreturn;
84583+extern void machine_power_off(void) __noreturn;
84584
84585 extern void machine_shutdown(void);
84586 struct pt_regs;
84587@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84588 */
84589
84590 extern void kernel_restart_prepare(char *cmd);
84591-extern void kernel_restart(char *cmd);
84592-extern void kernel_halt(void);
84593-extern void kernel_power_off(void);
84594+extern void kernel_restart(char *cmd) __noreturn;
84595+extern void kernel_halt(void) __noreturn;
84596+extern void kernel_power_off(void) __noreturn;
84597
84598 extern int C_A_D; /* for sysctl */
84599 void ctrl_alt_del(void);
84600@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
84601 * Emergency restart, callable from an interrupt handler.
84602 */
84603
84604-extern void emergency_restart(void);
84605+extern void emergency_restart(void) __noreturn;
84606 #include <asm/emergency-restart.h>
84607
84608 #endif /* _LINUX_REBOOT_H */
84609diff --git a/include/linux/regset.h b/include/linux/regset.h
84610index 8e0c9fe..ac4d221 100644
84611--- a/include/linux/regset.h
84612+++ b/include/linux/regset.h
84613@@ -161,7 +161,8 @@ struct user_regset {
84614 unsigned int align;
84615 unsigned int bias;
84616 unsigned int core_note_type;
84617-};
84618+} __do_const;
84619+typedef struct user_regset __no_const user_regset_no_const;
84620
84621 /**
84622 * struct user_regset_view - available regsets
84623diff --git a/include/linux/relay.h b/include/linux/relay.h
84624index d7c8359..818daf5 100644
84625--- a/include/linux/relay.h
84626+++ b/include/linux/relay.h
84627@@ -157,7 +157,7 @@ struct rchan_callbacks
84628 * The callback should return 0 if successful, negative if not.
84629 */
84630 int (*remove_buf_file)(struct dentry *dentry);
84631-};
84632+} __no_const;
84633
84634 /*
84635 * CONFIG_RELAY kernel API, kernel/relay.c
84636diff --git a/include/linux/rio.h b/include/linux/rio.h
84637index 6bda06f..bf39a9b 100644
84638--- a/include/linux/rio.h
84639+++ b/include/linux/rio.h
84640@@ -358,7 +358,7 @@ struct rio_ops {
84641 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84642 u64 rstart, u32 size, u32 flags);
84643 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84644-};
84645+} __no_const;
84646
84647 #define RIO_RESOURCE_MEM 0x00000100
84648 #define RIO_RESOURCE_DOORBELL 0x00000200
84649diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84650index d9d7e7e..86f47ac 100644
84651--- a/include/linux/rmap.h
84652+++ b/include/linux/rmap.h
84653@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84654 void anon_vma_init(void); /* create anon_vma_cachep */
84655 int anon_vma_prepare(struct vm_area_struct *);
84656 void unlink_anon_vmas(struct vm_area_struct *);
84657-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84658-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84659+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84660+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84661
84662 static inline void anon_vma_merge(struct vm_area_struct *vma,
84663 struct vm_area_struct *next)
84664diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84665index ed8f9e7..999bc96 100644
84666--- a/include/linux/scatterlist.h
84667+++ b/include/linux/scatterlist.h
84668@@ -1,6 +1,7 @@
84669 #ifndef _LINUX_SCATTERLIST_H
84670 #define _LINUX_SCATTERLIST_H
84671
84672+#include <linux/sched.h>
84673 #include <linux/string.h>
84674 #include <linux/bug.h>
84675 #include <linux/mm.h>
84676@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84677 #ifdef CONFIG_DEBUG_SG
84678 BUG_ON(!virt_addr_valid(buf));
84679 #endif
84680+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84681+ if (object_starts_on_stack(buf)) {
84682+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84683+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84684+ } else
84685+#endif
84686 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84687 }
84688
84689diff --git a/include/linux/sched.h b/include/linux/sched.h
84690index 8db31ef..0af1f81 100644
84691--- a/include/linux/sched.h
84692+++ b/include/linux/sched.h
84693@@ -133,6 +133,7 @@ struct fs_struct;
84694 struct perf_event_context;
84695 struct blk_plug;
84696 struct filename;
84697+struct linux_binprm;
84698
84699 #define VMACACHE_BITS 2
84700 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84701@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
84702 extern int in_sched_functions(unsigned long addr);
84703
84704 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84705-extern signed long schedule_timeout(signed long timeout);
84706+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84707 extern signed long schedule_timeout_interruptible(signed long timeout);
84708 extern signed long schedule_timeout_killable(signed long timeout);
84709 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84710@@ -426,6 +427,19 @@ struct nsproxy;
84711 struct user_namespace;
84712
84713 #ifdef CONFIG_MMU
84714+
84715+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84716+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84717+#else
84718+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84719+{
84720+ return 0;
84721+}
84722+#endif
84723+
84724+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84725+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84726+
84727 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84728 extern unsigned long
84729 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84730@@ -724,6 +738,17 @@ struct signal_struct {
84731 #ifdef CONFIG_TASKSTATS
84732 struct taskstats *stats;
84733 #endif
84734+
84735+#ifdef CONFIG_GRKERNSEC
84736+ u32 curr_ip;
84737+ u32 saved_ip;
84738+ u32 gr_saddr;
84739+ u32 gr_daddr;
84740+ u16 gr_sport;
84741+ u16 gr_dport;
84742+ u8 used_accept:1;
84743+#endif
84744+
84745 #ifdef CONFIG_AUDIT
84746 unsigned audit_tty;
84747 unsigned audit_tty_log_passwd;
84748@@ -750,7 +775,7 @@ struct signal_struct {
84749 struct mutex cred_guard_mutex; /* guard against foreign influences on
84750 * credential calculations
84751 * (notably. ptrace) */
84752-};
84753+} __randomize_layout;
84754
84755 /*
84756 * Bits in flags field of signal_struct.
84757@@ -803,6 +828,14 @@ struct user_struct {
84758 struct key *session_keyring; /* UID's default session keyring */
84759 #endif
84760
84761+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84762+ unsigned char kernel_banned;
84763+#endif
84764+#ifdef CONFIG_GRKERNSEC_BRUTE
84765+ unsigned char suid_banned;
84766+ unsigned long suid_ban_expires;
84767+#endif
84768+
84769 /* Hash table maintenance information */
84770 struct hlist_node uidhash_node;
84771 kuid_t uid;
84772@@ -810,7 +843,7 @@ struct user_struct {
84773 #ifdef CONFIG_PERF_EVENTS
84774 atomic_long_t locked_vm;
84775 #endif
84776-};
84777+} __randomize_layout;
84778
84779 extern int uids_sysfs_init(void);
84780
84781@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
84782 struct task_struct {
84783 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84784 void *stack;
84785+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84786+ void *lowmem_stack;
84787+#endif
84788 atomic_t usage;
84789 unsigned int flags; /* per process flags, defined below */
84790 unsigned int ptrace;
84791@@ -1405,8 +1441,8 @@ struct task_struct {
84792 struct list_head thread_node;
84793
84794 struct completion *vfork_done; /* for vfork() */
84795- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84796- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84797+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84798+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84799
84800 cputime_t utime, stime, utimescaled, stimescaled;
84801 cputime_t gtime;
84802@@ -1431,11 +1467,6 @@ struct task_struct {
84803 struct task_cputime cputime_expires;
84804 struct list_head cpu_timers[3];
84805
84806-/* process credentials */
84807- const struct cred __rcu *real_cred; /* objective and real subjective task
84808- * credentials (COW) */
84809- const struct cred __rcu *cred; /* effective (overridable) subjective task
84810- * credentials (COW) */
84811 char comm[TASK_COMM_LEN]; /* executable name excluding path
84812 - access with [gs]et_task_comm (which lock
84813 it with task_lock())
84814@@ -1453,6 +1484,10 @@ struct task_struct {
84815 #endif
84816 /* CPU-specific state of this task */
84817 struct thread_struct thread;
84818+/* thread_info moved to task_struct */
84819+#ifdef CONFIG_X86
84820+ struct thread_info tinfo;
84821+#endif
84822 /* filesystem information */
84823 struct fs_struct *fs;
84824 /* open file information */
84825@@ -1527,6 +1562,10 @@ struct task_struct {
84826 gfp_t lockdep_reclaim_gfp;
84827 #endif
84828
84829+/* process credentials */
84830+ const struct cred __rcu *real_cred; /* objective and real subjective task
84831+ * credentials (COW) */
84832+
84833 /* journalling filesystem info */
84834 void *journal_info;
84835
84836@@ -1565,6 +1604,10 @@ struct task_struct {
84837 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84838 struct list_head cg_list;
84839 #endif
84840+
84841+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84842+ * credentials (COW) */
84843+
84844 #ifdef CONFIG_FUTEX
84845 struct robust_list_head __user *robust_list;
84846 #ifdef CONFIG_COMPAT
84847@@ -1673,7 +1716,7 @@ struct task_struct {
84848 * Number of functions that haven't been traced
84849 * because of depth overrun.
84850 */
84851- atomic_t trace_overrun;
84852+ atomic_unchecked_t trace_overrun;
84853 /* Pause for the tracing */
84854 atomic_t tracing_graph_pause;
84855 #endif
84856@@ -1701,7 +1744,78 @@ struct task_struct {
84857 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
84858 unsigned long task_state_change;
84859 #endif
84860-};
84861+
84862+#ifdef CONFIG_GRKERNSEC
84863+ /* grsecurity */
84864+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84865+ u64 exec_id;
84866+#endif
84867+#ifdef CONFIG_GRKERNSEC_SETXID
84868+ const struct cred *delayed_cred;
84869+#endif
84870+ struct dentry *gr_chroot_dentry;
84871+ struct acl_subject_label *acl;
84872+ struct acl_subject_label *tmpacl;
84873+ struct acl_role_label *role;
84874+ struct file *exec_file;
84875+ unsigned long brute_expires;
84876+ u16 acl_role_id;
84877+ u8 inherited;
84878+ /* is this the task that authenticated to the special role */
84879+ u8 acl_sp_role;
84880+ u8 is_writable;
84881+ u8 brute;
84882+ u8 gr_is_chrooted;
84883+#endif
84884+
84885+} __randomize_layout;
84886+
84887+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84888+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84889+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84890+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84891+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84892+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84893+
84894+#ifdef CONFIG_PAX_SOFTMODE
84895+extern int pax_softmode;
84896+#endif
84897+
84898+extern int pax_check_flags(unsigned long *);
84899+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84900+
84901+/* if tsk != current then task_lock must be held on it */
84902+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84903+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84904+{
84905+ if (likely(tsk->mm))
84906+ return tsk->mm->pax_flags;
84907+ else
84908+ return 0UL;
84909+}
84910+
84911+/* if tsk != current then task_lock must be held on it */
84912+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84913+{
84914+ if (likely(tsk->mm)) {
84915+ tsk->mm->pax_flags = flags;
84916+ return 0;
84917+ }
84918+ return -EINVAL;
84919+}
84920+#endif
84921+
84922+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84923+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84924+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84925+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84926+#endif
84927+
84928+struct path;
84929+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
84930+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
84931+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
84932+extern void pax_report_refcount_overflow(struct pt_regs *regs);
84933
84934 /* Future-safe accessor for struct task_struct's cpus_allowed. */
84935 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
84936@@ -1783,7 +1897,7 @@ struct pid_namespace;
84937 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
84938 struct pid_namespace *ns);
84939
84940-static inline pid_t task_pid_nr(struct task_struct *tsk)
84941+static inline pid_t task_pid_nr(const struct task_struct *tsk)
84942 {
84943 return tsk->pid;
84944 }
84945@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
84946
84947 extern void sched_clock_init(void);
84948
84949+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84950+static inline void populate_stack(void)
84951+{
84952+ struct task_struct *curtask = current;
84953+ int c;
84954+ int *ptr = curtask->stack;
84955+ int *end = curtask->stack + THREAD_SIZE;
84956+
84957+ while (ptr < end) {
84958+ c = *(volatile int *)ptr;
84959+ ptr += PAGE_SIZE/sizeof(int);
84960+ }
84961+}
84962+#else
84963+static inline void populate_stack(void)
84964+{
84965+}
84966+#endif
84967+
84968 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
84969 static inline void sched_clock_tick(void)
84970 {
84971@@ -2283,7 +2416,9 @@ void yield(void);
84972 extern struct exec_domain default_exec_domain;
84973
84974 union thread_union {
84975+#ifndef CONFIG_X86
84976 struct thread_info thread_info;
84977+#endif
84978 unsigned long stack[THREAD_SIZE/sizeof(long)];
84979 };
84980
84981@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
84982 */
84983
84984 extern struct task_struct *find_task_by_vpid(pid_t nr);
84985+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
84986 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
84987 struct pid_namespace *ns);
84988
84989@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
84990 extern void exit_itimers(struct signal_struct *);
84991 extern void flush_itimer_signals(void);
84992
84993-extern void do_group_exit(int);
84994+extern __noreturn void do_group_exit(int);
84995
84996 extern int do_execve(struct filename *,
84997 const char __user * const __user *,
84998@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
84999 #define task_stack_end_corrupted(task) \
85000 (*(end_of_stack(task)) != STACK_END_MAGIC)
85001
85002-static inline int object_is_on_stack(void *obj)
85003+static inline int object_starts_on_stack(const void *obj)
85004 {
85005- void *stack = task_stack_page(current);
85006+ const void *stack = task_stack_page(current);
85007
85008 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85009 }
85010diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85011index 596a0e0..bea77ec 100644
85012--- a/include/linux/sched/sysctl.h
85013+++ b/include/linux/sched/sysctl.h
85014@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85015 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85016
85017 extern int sysctl_max_map_count;
85018+extern unsigned long sysctl_heap_stack_gap;
85019
85020 extern unsigned int sysctl_sched_latency;
85021 extern unsigned int sysctl_sched_min_granularity;
85022diff --git a/include/linux/security.h b/include/linux/security.h
85023index ba96471..74fb3f6 100644
85024--- a/include/linux/security.h
85025+++ b/include/linux/security.h
85026@@ -27,6 +27,7 @@
85027 #include <linux/slab.h>
85028 #include <linux/err.h>
85029 #include <linux/string.h>
85030+#include <linux/grsecurity.h>
85031
85032 struct linux_binprm;
85033 struct cred;
85034@@ -116,8 +117,6 @@ struct seq_file;
85035
85036 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85037
85038-void reset_security_ops(void);
85039-
85040 #ifdef CONFIG_MMU
85041 extern unsigned long mmap_min_addr;
85042 extern unsigned long dac_mmap_min_addr;
85043@@ -1729,7 +1728,7 @@ struct security_operations {
85044 struct audit_context *actx);
85045 void (*audit_rule_free) (void *lsmrule);
85046 #endif /* CONFIG_AUDIT */
85047-};
85048+} __randomize_layout;
85049
85050 /* prototypes */
85051 extern int security_init(void);
85052diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85053index dc368b8..e895209 100644
85054--- a/include/linux/semaphore.h
85055+++ b/include/linux/semaphore.h
85056@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85057 }
85058
85059 extern void down(struct semaphore *sem);
85060-extern int __must_check down_interruptible(struct semaphore *sem);
85061+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85062 extern int __must_check down_killable(struct semaphore *sem);
85063 extern int __must_check down_trylock(struct semaphore *sem);
85064 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85065diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85066index cf6a9da..bd86b1f 100644
85067--- a/include/linux/seq_file.h
85068+++ b/include/linux/seq_file.h
85069@@ -27,6 +27,9 @@ struct seq_file {
85070 struct mutex lock;
85071 const struct seq_operations *op;
85072 int poll_event;
85073+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85074+ u64 exec_id;
85075+#endif
85076 #ifdef CONFIG_USER_NS
85077 struct user_namespace *user_ns;
85078 #endif
85079@@ -39,6 +42,7 @@ struct seq_operations {
85080 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85081 int (*show) (struct seq_file *m, void *v);
85082 };
85083+typedef struct seq_operations __no_const seq_operations_no_const;
85084
85085 #define SEQ_SKIP 1
85086
85087@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
85088
85089 char *mangle_path(char *s, const char *p, const char *esc);
85090 int seq_open(struct file *, const struct seq_operations *);
85091+int seq_open_restrict(struct file *, const struct seq_operations *);
85092 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85093 loff_t seq_lseek(struct file *, loff_t, int);
85094 int seq_release(struct inode *, struct file *);
85095@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
85096 }
85097
85098 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85099+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85100 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85101 int single_release(struct inode *, struct file *);
85102 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85103diff --git a/include/linux/shm.h b/include/linux/shm.h
85104index 6fb8016..ab4465e 100644
85105--- a/include/linux/shm.h
85106+++ b/include/linux/shm.h
85107@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85108 /* The task created the shm object. NULL if the task is dead. */
85109 struct task_struct *shm_creator;
85110 struct list_head shm_clist; /* list by creator */
85111+#ifdef CONFIG_GRKERNSEC
85112+ u64 shm_createtime;
85113+ pid_t shm_lapid;
85114+#endif
85115 };
85116
85117 /* shm_mode upper byte flags */
85118diff --git a/include/linux/signal.h b/include/linux/signal.h
85119index ab1e039..ad4229e 100644
85120--- a/include/linux/signal.h
85121+++ b/include/linux/signal.h
85122@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85123 * know it'll be handled, so that they don't get converted to
85124 * SIGKILL or just silently dropped.
85125 */
85126- kernel_sigaction(sig, (__force __sighandler_t)2);
85127+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85128 }
85129
85130 static inline void disallow_signal(int sig)
85131diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85132index 85ab7d7..eb1585a 100644
85133--- a/include/linux/skbuff.h
85134+++ b/include/linux/skbuff.h
85135@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85136 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85137 int node);
85138 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85139-static inline struct sk_buff *alloc_skb(unsigned int size,
85140+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85141 gfp_t priority)
85142 {
85143 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85144@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85145 return skb->inner_transport_header - skb->inner_network_header;
85146 }
85147
85148-static inline int skb_network_offset(const struct sk_buff *skb)
85149+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85150 {
85151 return skb_network_header(skb) - skb->data;
85152 }
85153@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85154 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85155 */
85156 #ifndef NET_SKB_PAD
85157-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85158+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85159 #endif
85160
85161 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85162@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85163 int *err);
85164 unsigned int datagram_poll(struct file *file, struct socket *sock,
85165 struct poll_table_struct *wait);
85166-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85167+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85168 struct iov_iter *to, int size);
85169-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85170+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85171 struct msghdr *msg, int size)
85172 {
85173 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85174@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
85175 nf_bridge_put(skb->nf_bridge);
85176 skb->nf_bridge = NULL;
85177 #endif
85178+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85179+ skb->nf_trace = 0;
85180+#endif
85181 }
85182
85183 static inline void nf_reset_trace(struct sk_buff *skb)
85184diff --git a/include/linux/slab.h b/include/linux/slab.h
85185index 9a139b6..aab37b4 100644
85186--- a/include/linux/slab.h
85187+++ b/include/linux/slab.h
85188@@ -14,15 +14,29 @@
85189 #include <linux/gfp.h>
85190 #include <linux/types.h>
85191 #include <linux/workqueue.h>
85192-
85193+#include <linux/err.h>
85194
85195 /*
85196 * Flags to pass to kmem_cache_create().
85197 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85198 */
85199 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85200+
85201+#ifdef CONFIG_PAX_USERCOPY_SLABS
85202+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85203+#else
85204+#define SLAB_USERCOPY 0x00000000UL
85205+#endif
85206+
85207 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85208 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85209+
85210+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85211+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85212+#else
85213+#define SLAB_NO_SANITIZE 0x00000000UL
85214+#endif
85215+
85216 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85217 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85218 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85219@@ -98,10 +112,13 @@
85220 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85221 * Both make kfree a no-op.
85222 */
85223-#define ZERO_SIZE_PTR ((void *)16)
85224+#define ZERO_SIZE_PTR \
85225+({ \
85226+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85227+ (void *)(-MAX_ERRNO-1L); \
85228+})
85229
85230-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85231- (unsigned long)ZERO_SIZE_PTR)
85232+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85233
85234 #include <linux/kmemleak.h>
85235
85236@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85237 void kfree(const void *);
85238 void kzfree(const void *);
85239 size_t ksize(const void *);
85240+const char *check_heap_object(const void *ptr, unsigned long n);
85241+bool is_usercopy_object(const void *ptr);
85242
85243 /*
85244 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85245@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85246 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85247 #endif
85248
85249+#ifdef CONFIG_PAX_USERCOPY_SLABS
85250+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85251+#endif
85252+
85253 /*
85254 * Figure out which kmalloc slab an allocation of a certain size
85255 * belongs to.
85256@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85257 * 2 = 120 .. 192 bytes
85258 * n = 2^(n-1) .. 2^n -1
85259 */
85260-static __always_inline int kmalloc_index(size_t size)
85261+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85262 {
85263 if (!size)
85264 return 0;
85265@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
85266 }
85267 #endif /* !CONFIG_SLOB */
85268
85269-void *__kmalloc(size_t size, gfp_t flags);
85270+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85271 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85272
85273 #ifdef CONFIG_NUMA
85274-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85275+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85276 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85277 #else
85278-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85279+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85280 {
85281 return __kmalloc(size, flags);
85282 }
85283diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85284index b869d16..1453c73 100644
85285--- a/include/linux/slab_def.h
85286+++ b/include/linux/slab_def.h
85287@@ -40,7 +40,7 @@ struct kmem_cache {
85288 /* 4) cache creation/removal */
85289 const char *name;
85290 struct list_head list;
85291- int refcount;
85292+ atomic_t refcount;
85293 int object_size;
85294 int align;
85295
85296@@ -56,10 +56,14 @@ struct kmem_cache {
85297 unsigned long node_allocs;
85298 unsigned long node_frees;
85299 unsigned long node_overflow;
85300- atomic_t allochit;
85301- atomic_t allocmiss;
85302- atomic_t freehit;
85303- atomic_t freemiss;
85304+ atomic_unchecked_t allochit;
85305+ atomic_unchecked_t allocmiss;
85306+ atomic_unchecked_t freehit;
85307+ atomic_unchecked_t freemiss;
85308+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85309+ atomic_unchecked_t sanitized;
85310+ atomic_unchecked_t not_sanitized;
85311+#endif
85312
85313 /*
85314 * If debugging is enabled, then the allocator can add additional
85315diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85316index d82abd4..408c3a0 100644
85317--- a/include/linux/slub_def.h
85318+++ b/include/linux/slub_def.h
85319@@ -74,7 +74,7 @@ struct kmem_cache {
85320 struct kmem_cache_order_objects max;
85321 struct kmem_cache_order_objects min;
85322 gfp_t allocflags; /* gfp flags to use on each alloc */
85323- int refcount; /* Refcount for slab cache destroy */
85324+ atomic_t refcount; /* Refcount for slab cache destroy */
85325 void (*ctor)(void *);
85326 int inuse; /* Offset to metadata */
85327 int align; /* Alignment */
85328diff --git a/include/linux/smp.h b/include/linux/smp.h
85329index 93dff5f..933c561 100644
85330--- a/include/linux/smp.h
85331+++ b/include/linux/smp.h
85332@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) { }
85333 #endif
85334
85335 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85336+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85337 #define put_cpu() preempt_enable()
85338+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85339
85340 /*
85341 * Callback to arch code if there's nosmp or maxcpus=0 on the
85342diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85343index 46cca4c..3323536 100644
85344--- a/include/linux/sock_diag.h
85345+++ b/include/linux/sock_diag.h
85346@@ -11,7 +11,7 @@ struct sock;
85347 struct sock_diag_handler {
85348 __u8 family;
85349 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85350-};
85351+} __do_const;
85352
85353 int sock_diag_register(const struct sock_diag_handler *h);
85354 void sock_diag_unregister(const struct sock_diag_handler *h);
85355diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85356index 680f9a3..f13aeb0 100644
85357--- a/include/linux/sonet.h
85358+++ b/include/linux/sonet.h
85359@@ -7,7 +7,7 @@
85360 #include <uapi/linux/sonet.h>
85361
85362 struct k_sonet_stats {
85363-#define __HANDLE_ITEM(i) atomic_t i
85364+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85365 __SONET_ITEMS
85366 #undef __HANDLE_ITEM
85367 };
85368diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85369index 07d8e53..dc934c9 100644
85370--- a/include/linux/sunrpc/addr.h
85371+++ b/include/linux/sunrpc/addr.h
85372@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85373 {
85374 switch (sap->sa_family) {
85375 case AF_INET:
85376- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85377+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85378 case AF_INET6:
85379- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85380+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85381 }
85382 return 0;
85383 }
85384@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85385 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85386 const struct sockaddr *src)
85387 {
85388- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85389+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85390 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85391
85392 dsin->sin_family = ssin->sin_family;
85393@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85394 if (sa->sa_family != AF_INET6)
85395 return 0;
85396
85397- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85398+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85399 }
85400
85401 #endif /* _LINUX_SUNRPC_ADDR_H */
85402diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85403index 598ba80..d90cba6 100644
85404--- a/include/linux/sunrpc/clnt.h
85405+++ b/include/linux/sunrpc/clnt.h
85406@@ -100,7 +100,7 @@ struct rpc_procinfo {
85407 unsigned int p_timer; /* Which RTT timer to use */
85408 u32 p_statidx; /* Which procedure to account */
85409 const char * p_name; /* name of procedure */
85410-};
85411+} __do_const;
85412
85413 #ifdef __KERNEL__
85414
85415diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85416index 6f22cfe..9fd0909 100644
85417--- a/include/linux/sunrpc/svc.h
85418+++ b/include/linux/sunrpc/svc.h
85419@@ -420,7 +420,7 @@ struct svc_procedure {
85420 unsigned int pc_count; /* call count */
85421 unsigned int pc_cachetype; /* cache info (NFS) */
85422 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85423-};
85424+} __do_const;
85425
85426 /*
85427 * Function prototypes.
85428diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85429index 975da75..318c083 100644
85430--- a/include/linux/sunrpc/svc_rdma.h
85431+++ b/include/linux/sunrpc/svc_rdma.h
85432@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85433 extern unsigned int svcrdma_max_requests;
85434 extern unsigned int svcrdma_max_req_size;
85435
85436-extern atomic_t rdma_stat_recv;
85437-extern atomic_t rdma_stat_read;
85438-extern atomic_t rdma_stat_write;
85439-extern atomic_t rdma_stat_sq_starve;
85440-extern atomic_t rdma_stat_rq_starve;
85441-extern atomic_t rdma_stat_rq_poll;
85442-extern atomic_t rdma_stat_rq_prod;
85443-extern atomic_t rdma_stat_sq_poll;
85444-extern atomic_t rdma_stat_sq_prod;
85445+extern atomic_unchecked_t rdma_stat_recv;
85446+extern atomic_unchecked_t rdma_stat_read;
85447+extern atomic_unchecked_t rdma_stat_write;
85448+extern atomic_unchecked_t rdma_stat_sq_starve;
85449+extern atomic_unchecked_t rdma_stat_rq_starve;
85450+extern atomic_unchecked_t rdma_stat_rq_poll;
85451+extern atomic_unchecked_t rdma_stat_rq_prod;
85452+extern atomic_unchecked_t rdma_stat_sq_poll;
85453+extern atomic_unchecked_t rdma_stat_sq_prod;
85454
85455 #define RPCRDMA_VERSION 1
85456
85457diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85458index 8d71d65..f79586e 100644
85459--- a/include/linux/sunrpc/svcauth.h
85460+++ b/include/linux/sunrpc/svcauth.h
85461@@ -120,7 +120,7 @@ struct auth_ops {
85462 int (*release)(struct svc_rqst *rq);
85463 void (*domain_release)(struct auth_domain *);
85464 int (*set_client)(struct svc_rqst *rq);
85465-};
85466+} __do_const;
85467
85468 #define SVC_GARBAGE 1
85469 #define SVC_SYSERR 2
85470diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85471index e7a018e..49f8b17 100644
85472--- a/include/linux/swiotlb.h
85473+++ b/include/linux/swiotlb.h
85474@@ -60,7 +60,8 @@ extern void
85475
85476 extern void
85477 swiotlb_free_coherent(struct device *hwdev, size_t size,
85478- void *vaddr, dma_addr_t dma_handle);
85479+ void *vaddr, dma_addr_t dma_handle,
85480+ struct dma_attrs *attrs);
85481
85482 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85483 unsigned long offset, size_t size,
85484diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85485index 85893d7..4923581 100644
85486--- a/include/linux/syscalls.h
85487+++ b/include/linux/syscalls.h
85488@@ -99,10 +99,16 @@ union bpf_attr;
85489 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
85490
85491 #define __SC_DECL(t, a) t a
85492+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
85493 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85494 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85495 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85496-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85497+#define __SC_LONG(t, a) __typeof( \
85498+ __builtin_choose_expr( \
85499+ sizeof(t) > sizeof(int), \
85500+ (t) 0, \
85501+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
85502+ )) a
85503 #define __SC_CAST(t, a) (t) a
85504 #define __SC_ARGS(t, a) a
85505 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85506@@ -384,11 +390,11 @@ asmlinkage long sys_sync(void);
85507 asmlinkage long sys_fsync(unsigned int fd);
85508 asmlinkage long sys_fdatasync(unsigned int fd);
85509 asmlinkage long sys_bdflush(int func, long data);
85510-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85511- char __user *type, unsigned long flags,
85512+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85513+ const char __user *type, unsigned long flags,
85514 void __user *data);
85515-asmlinkage long sys_umount(char __user *name, int flags);
85516-asmlinkage long sys_oldumount(char __user *name);
85517+asmlinkage long sys_umount(const char __user *name, int flags);
85518+asmlinkage long sys_oldumount(const char __user *name);
85519 asmlinkage long sys_truncate(const char __user *path, long length);
85520 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85521 asmlinkage long sys_stat(const char __user *filename,
85522@@ -600,7 +606,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85523 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85524 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85525 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85526- struct sockaddr __user *, int);
85527+ struct sockaddr __user *, int) __intentional_overflow(0);
85528 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
85529 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85530 unsigned int vlen, unsigned flags);
85531diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85532index 27b3b0b..e093dd9 100644
85533--- a/include/linux/syscore_ops.h
85534+++ b/include/linux/syscore_ops.h
85535@@ -16,7 +16,7 @@ struct syscore_ops {
85536 int (*suspend)(void);
85537 void (*resume)(void);
85538 void (*shutdown)(void);
85539-};
85540+} __do_const;
85541
85542 extern void register_syscore_ops(struct syscore_ops *ops);
85543 extern void unregister_syscore_ops(struct syscore_ops *ops);
85544diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85545index b7361f8..341a15a 100644
85546--- a/include/linux/sysctl.h
85547+++ b/include/linux/sysctl.h
85548@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85549
85550 extern int proc_dostring(struct ctl_table *, int,
85551 void __user *, size_t *, loff_t *);
85552+extern int proc_dostring_modpriv(struct ctl_table *, int,
85553+ void __user *, size_t *, loff_t *);
85554 extern int proc_dointvec(struct ctl_table *, int,
85555 void __user *, size_t *, loff_t *);
85556 extern int proc_dointvec_minmax(struct ctl_table *, int,
85557@@ -113,7 +115,8 @@ struct ctl_table
85558 struct ctl_table_poll *poll;
85559 void *extra1;
85560 void *extra2;
85561-};
85562+} __do_const __randomize_layout;
85563+typedef struct ctl_table __no_const ctl_table_no_const;
85564
85565 struct ctl_node {
85566 struct rb_node node;
85567diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85568index ddad161..a3efd26 100644
85569--- a/include/linux/sysfs.h
85570+++ b/include/linux/sysfs.h
85571@@ -34,7 +34,8 @@ struct attribute {
85572 struct lock_class_key *key;
85573 struct lock_class_key skey;
85574 #endif
85575-};
85576+} __do_const;
85577+typedef struct attribute __no_const attribute_no_const;
85578
85579 /**
85580 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85581@@ -63,7 +64,8 @@ struct attribute_group {
85582 struct attribute *, int);
85583 struct attribute **attrs;
85584 struct bin_attribute **bin_attrs;
85585-};
85586+} __do_const;
85587+typedef struct attribute_group __no_const attribute_group_no_const;
85588
85589 /**
85590 * Use these macros to make defining attributes easier. See include/linux/device.h
85591@@ -137,7 +139,8 @@ struct bin_attribute {
85592 char *, loff_t, size_t);
85593 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85594 struct vm_area_struct *vma);
85595-};
85596+} __do_const;
85597+typedef struct bin_attribute __no_const bin_attribute_no_const;
85598
85599 /**
85600 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85601diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85602index 387fa7d..3fcde6b 100644
85603--- a/include/linux/sysrq.h
85604+++ b/include/linux/sysrq.h
85605@@ -16,6 +16,7 @@
85606
85607 #include <linux/errno.h>
85608 #include <linux/types.h>
85609+#include <linux/compiler.h>
85610
85611 /* Possible values of bitmask for enabling sysrq functions */
85612 /* 0x0001 is reserved for enable everything */
85613@@ -33,7 +34,7 @@ struct sysrq_key_op {
85614 char *help_msg;
85615 char *action_msg;
85616 int enable_mask;
85617-};
85618+} __do_const;
85619
85620 #ifdef CONFIG_MAGIC_SYSRQ
85621
85622diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85623index ff307b5..f1a4468 100644
85624--- a/include/linux/thread_info.h
85625+++ b/include/linux/thread_info.h
85626@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85627 #error "no set_restore_sigmask() provided and default one won't work"
85628 #endif
85629
85630+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85631+
85632+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85633+{
85634+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85635+}
85636+
85637 #endif /* __KERNEL__ */
85638
85639 #endif /* _LINUX_THREAD_INFO_H */
85640diff --git a/include/linux/tty.h b/include/linux/tty.h
85641index 7d66ae5..0327149 100644
85642--- a/include/linux/tty.h
85643+++ b/include/linux/tty.h
85644@@ -202,7 +202,7 @@ struct tty_port {
85645 const struct tty_port_operations *ops; /* Port operations */
85646 spinlock_t lock; /* Lock protecting tty field */
85647 int blocked_open; /* Waiting to open */
85648- int count; /* Usage count */
85649+ atomic_t count; /* Usage count */
85650 wait_queue_head_t open_wait; /* Open waiters */
85651 wait_queue_head_t close_wait; /* Close waiters */
85652 wait_queue_head_t delta_msr_wait; /* Modem status change */
85653@@ -290,7 +290,7 @@ struct tty_struct {
85654 /* If the tty has a pending do_SAK, queue it here - akpm */
85655 struct work_struct SAK_work;
85656 struct tty_port *port;
85657-};
85658+} __randomize_layout;
85659
85660 /* Each of a tty's open files has private_data pointing to tty_file_private */
85661 struct tty_file_private {
85662@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
85663 struct tty_struct *tty, struct file *filp);
85664 static inline int tty_port_users(struct tty_port *port)
85665 {
85666- return port->count + port->blocked_open;
85667+ return atomic_read(&port->count) + port->blocked_open;
85668 }
85669
85670 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85671diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85672index 92e337c..f46757b 100644
85673--- a/include/linux/tty_driver.h
85674+++ b/include/linux/tty_driver.h
85675@@ -291,7 +291,7 @@ struct tty_operations {
85676 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85677 #endif
85678 const struct file_operations *proc_fops;
85679-};
85680+} __do_const __randomize_layout;
85681
85682 struct tty_driver {
85683 int magic; /* magic number for this structure */
85684@@ -325,7 +325,7 @@ struct tty_driver {
85685
85686 const struct tty_operations *ops;
85687 struct list_head tty_drivers;
85688-};
85689+} __randomize_layout;
85690
85691 extern struct list_head tty_drivers;
85692
85693diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85694index 00c9d68..bc0188b 100644
85695--- a/include/linux/tty_ldisc.h
85696+++ b/include/linux/tty_ldisc.h
85697@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85698
85699 struct module *owner;
85700
85701- int refcount;
85702+ atomic_t refcount;
85703 };
85704
85705 struct tty_ldisc {
85706diff --git a/include/linux/types.h b/include/linux/types.h
85707index a0bb704..f511c77 100644
85708--- a/include/linux/types.h
85709+++ b/include/linux/types.h
85710@@ -177,10 +177,26 @@ typedef struct {
85711 int counter;
85712 } atomic_t;
85713
85714+#ifdef CONFIG_PAX_REFCOUNT
85715+typedef struct {
85716+ int counter;
85717+} atomic_unchecked_t;
85718+#else
85719+typedef atomic_t atomic_unchecked_t;
85720+#endif
85721+
85722 #ifdef CONFIG_64BIT
85723 typedef struct {
85724 long counter;
85725 } atomic64_t;
85726+
85727+#ifdef CONFIG_PAX_REFCOUNT
85728+typedef struct {
85729+ long counter;
85730+} atomic64_unchecked_t;
85731+#else
85732+typedef atomic64_t atomic64_unchecked_t;
85733+#endif
85734 #endif
85735
85736 struct list_head {
85737diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85738index ecd3319..8a36ded 100644
85739--- a/include/linux/uaccess.h
85740+++ b/include/linux/uaccess.h
85741@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85742 long ret; \
85743 mm_segment_t old_fs = get_fs(); \
85744 \
85745- set_fs(KERNEL_DS); \
85746 pagefault_disable(); \
85747- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85748- pagefault_enable(); \
85749+ set_fs(KERNEL_DS); \
85750+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85751 set_fs(old_fs); \
85752+ pagefault_enable(); \
85753 ret; \
85754 })
85755
85756diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85757index 2d1f9b6..d7a9fce 100644
85758--- a/include/linux/uidgid.h
85759+++ b/include/linux/uidgid.h
85760@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85761
85762 #endif /* CONFIG_USER_NS */
85763
85764+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85765+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85766+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85767+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85768+
85769 #endif /* _LINUX_UIDGID_H */
85770diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
85771index 32c0e83..671eb35 100644
85772--- a/include/linux/uio_driver.h
85773+++ b/include/linux/uio_driver.h
85774@@ -67,7 +67,7 @@ struct uio_device {
85775 struct module *owner;
85776 struct device *dev;
85777 int minor;
85778- atomic_t event;
85779+ atomic_unchecked_t event;
85780 struct fasync_struct *async_queue;
85781 wait_queue_head_t wait;
85782 struct uio_info *info;
85783diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85784index 99c1b4d..562e6f3 100644
85785--- a/include/linux/unaligned/access_ok.h
85786+++ b/include/linux/unaligned/access_ok.h
85787@@ -4,34 +4,34 @@
85788 #include <linux/kernel.h>
85789 #include <asm/byteorder.h>
85790
85791-static inline u16 get_unaligned_le16(const void *p)
85792+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85793 {
85794- return le16_to_cpup((__le16 *)p);
85795+ return le16_to_cpup((const __le16 *)p);
85796 }
85797
85798-static inline u32 get_unaligned_le32(const void *p)
85799+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85800 {
85801- return le32_to_cpup((__le32 *)p);
85802+ return le32_to_cpup((const __le32 *)p);
85803 }
85804
85805-static inline u64 get_unaligned_le64(const void *p)
85806+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85807 {
85808- return le64_to_cpup((__le64 *)p);
85809+ return le64_to_cpup((const __le64 *)p);
85810 }
85811
85812-static inline u16 get_unaligned_be16(const void *p)
85813+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85814 {
85815- return be16_to_cpup((__be16 *)p);
85816+ return be16_to_cpup((const __be16 *)p);
85817 }
85818
85819-static inline u32 get_unaligned_be32(const void *p)
85820+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85821 {
85822- return be32_to_cpup((__be32 *)p);
85823+ return be32_to_cpup((const __be32 *)p);
85824 }
85825
85826-static inline u64 get_unaligned_be64(const void *p)
85827+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85828 {
85829- return be64_to_cpup((__be64 *)p);
85830+ return be64_to_cpup((const __be64 *)p);
85831 }
85832
85833 static inline void put_unaligned_le16(u16 val, void *p)
85834diff --git a/include/linux/usb.h b/include/linux/usb.h
85835index 058a769..c17a1c2c 100644
85836--- a/include/linux/usb.h
85837+++ b/include/linux/usb.h
85838@@ -566,7 +566,7 @@ struct usb_device {
85839 int maxchild;
85840
85841 u32 quirks;
85842- atomic_t urbnum;
85843+ atomic_unchecked_t urbnum;
85844
85845 unsigned long active_duration;
85846
85847@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85848
85849 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85850 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85851- void *data, __u16 size, int timeout);
85852+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85853 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85854 void *data, int len, int *actual_length, int timeout);
85855 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85856diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85857index 9fd9e48..e2c5f35 100644
85858--- a/include/linux/usb/renesas_usbhs.h
85859+++ b/include/linux/usb/renesas_usbhs.h
85860@@ -39,7 +39,7 @@ enum {
85861 */
85862 struct renesas_usbhs_driver_callback {
85863 int (*notify_hotplug)(struct platform_device *pdev);
85864-};
85865+} __no_const;
85866
85867 /*
85868 * callback functions for platform
85869diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85870index 8297e5b..0dfae27 100644
85871--- a/include/linux/user_namespace.h
85872+++ b/include/linux/user_namespace.h
85873@@ -39,7 +39,7 @@ struct user_namespace {
85874 struct key *persistent_keyring_register;
85875 struct rw_semaphore persistent_keyring_register_sem;
85876 #endif
85877-};
85878+} __randomize_layout;
85879
85880 extern struct user_namespace init_user_ns;
85881
85882diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85883index 5093f58..c103e58 100644
85884--- a/include/linux/utsname.h
85885+++ b/include/linux/utsname.h
85886@@ -25,7 +25,7 @@ struct uts_namespace {
85887 struct new_utsname name;
85888 struct user_namespace *user_ns;
85889 struct ns_common ns;
85890-};
85891+} __randomize_layout;
85892 extern struct uts_namespace init_uts_ns;
85893
85894 #ifdef CONFIG_UTS_NS
85895diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85896index 6f8fbcf..4efc177 100644
85897--- a/include/linux/vermagic.h
85898+++ b/include/linux/vermagic.h
85899@@ -25,9 +25,42 @@
85900 #define MODULE_ARCH_VERMAGIC ""
85901 #endif
85902
85903+#ifdef CONFIG_PAX_REFCOUNT
85904+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85905+#else
85906+#define MODULE_PAX_REFCOUNT ""
85907+#endif
85908+
85909+#ifdef CONSTIFY_PLUGIN
85910+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85911+#else
85912+#define MODULE_CONSTIFY_PLUGIN ""
85913+#endif
85914+
85915+#ifdef STACKLEAK_PLUGIN
85916+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
85917+#else
85918+#define MODULE_STACKLEAK_PLUGIN ""
85919+#endif
85920+
85921+#ifdef RANDSTRUCT_PLUGIN
85922+#include <generated/randomize_layout_hash.h>
85923+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
85924+#else
85925+#define MODULE_RANDSTRUCT_PLUGIN
85926+#endif
85927+
85928+#ifdef CONFIG_GRKERNSEC
85929+#define MODULE_GRSEC "GRSEC "
85930+#else
85931+#define MODULE_GRSEC ""
85932+#endif
85933+
85934 #define VERMAGIC_STRING \
85935 UTS_RELEASE " " \
85936 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
85937 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
85938- MODULE_ARCH_VERMAGIC
85939+ MODULE_ARCH_VERMAGIC \
85940+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
85941+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
85942
85943diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
85944index b483abd..af305ad 100644
85945--- a/include/linux/vga_switcheroo.h
85946+++ b/include/linux/vga_switcheroo.h
85947@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
85948
85949 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
85950
85951-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
85952+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
85953 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
85954-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
85955+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
85956 #else
85957
85958 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
85959@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
85960
85961 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
85962
85963-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85964+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85965 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
85966-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85967+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85968
85969 #endif
85970 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
85971diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
85972index b87696f..1d11de7 100644
85973--- a/include/linux/vmalloc.h
85974+++ b/include/linux/vmalloc.h
85975@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
85976 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
85977 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
85978 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
85979+
85980+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85981+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
85982+#endif
85983+
85984 /* bits [20..32] reserved for arch specific ioremap internals */
85985
85986 /*
85987@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
85988 unsigned long flags, pgprot_t prot);
85989 extern void vunmap(const void *addr);
85990
85991+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85992+extern void unmap_process_stacks(struct task_struct *task);
85993+#endif
85994+
85995 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
85996 unsigned long uaddr, void *kaddr,
85997 unsigned long size);
85998@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
85999
86000 /* for /dev/kmem */
86001 extern long vread(char *buf, char *addr, unsigned long count);
86002-extern long vwrite(char *buf, char *addr, unsigned long count);
86003+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86004
86005 /*
86006 * Internals. Dont't use..
86007diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86008index 82e7db7..f8ce3d0 100644
86009--- a/include/linux/vmstat.h
86010+++ b/include/linux/vmstat.h
86011@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86012 /*
86013 * Zone based page accounting with per cpu differentials.
86014 */
86015-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86016+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86017
86018 static inline void zone_page_state_add(long x, struct zone *zone,
86019 enum zone_stat_item item)
86020 {
86021- atomic_long_add(x, &zone->vm_stat[item]);
86022- atomic_long_add(x, &vm_stat[item]);
86023+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86024+ atomic_long_add_unchecked(x, &vm_stat[item]);
86025 }
86026
86027-static inline unsigned long global_page_state(enum zone_stat_item item)
86028+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86029 {
86030- long x = atomic_long_read(&vm_stat[item]);
86031+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86032 #ifdef CONFIG_SMP
86033 if (x < 0)
86034 x = 0;
86035@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86036 return x;
86037 }
86038
86039-static inline unsigned long zone_page_state(struct zone *zone,
86040+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86041 enum zone_stat_item item)
86042 {
86043- long x = atomic_long_read(&zone->vm_stat[item]);
86044+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86045 #ifdef CONFIG_SMP
86046 if (x < 0)
86047 x = 0;
86048@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86049 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86050 enum zone_stat_item item)
86051 {
86052- long x = atomic_long_read(&zone->vm_stat[item]);
86053+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86054
86055 #ifdef CONFIG_SMP
86056 int cpu;
86057@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86058
86059 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86060 {
86061- atomic_long_inc(&zone->vm_stat[item]);
86062- atomic_long_inc(&vm_stat[item]);
86063+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86064+ atomic_long_inc_unchecked(&vm_stat[item]);
86065 }
86066
86067 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86068 {
86069- atomic_long_dec(&zone->vm_stat[item]);
86070- atomic_long_dec(&vm_stat[item]);
86071+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86072+ atomic_long_dec_unchecked(&vm_stat[item]);
86073 }
86074
86075 static inline void __inc_zone_page_state(struct page *page,
86076diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86077index 91b0a68..0e9adf6 100644
86078--- a/include/linux/xattr.h
86079+++ b/include/linux/xattr.h
86080@@ -28,7 +28,7 @@ struct xattr_handler {
86081 size_t size, int handler_flags);
86082 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86083 size_t size, int flags, int handler_flags);
86084-};
86085+} __do_const;
86086
86087 struct xattr {
86088 const char *name;
86089@@ -37,6 +37,9 @@ struct xattr {
86090 };
86091
86092 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86093+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86094+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86095+#endif
86096 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86097 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86098 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86099diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86100index 92dbbd3..13ab0b3 100644
86101--- a/include/linux/zlib.h
86102+++ b/include/linux/zlib.h
86103@@ -31,6 +31,7 @@
86104 #define _ZLIB_H
86105
86106 #include <linux/zconf.h>
86107+#include <linux/compiler.h>
86108
86109 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86110 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86111@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86112
86113 /* basic functions */
86114
86115-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86116+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86117 /*
86118 Returns the number of bytes that needs to be allocated for a per-
86119 stream workspace with the specified parameters. A pointer to this
86120diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86121index eb76cfd..9fd0e7c 100644
86122--- a/include/media/v4l2-dev.h
86123+++ b/include/media/v4l2-dev.h
86124@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86125 int (*mmap) (struct file *, struct vm_area_struct *);
86126 int (*open) (struct file *);
86127 int (*release) (struct file *);
86128-};
86129+} __do_const;
86130
86131 /*
86132 * Newer version of video_device, handled by videodev2.c
86133diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86134index ffb69da..040393e 100644
86135--- a/include/media/v4l2-device.h
86136+++ b/include/media/v4l2-device.h
86137@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86138 this function returns 0. If the name ends with a digit (e.g. cx18),
86139 then the name will be set to cx18-0 since cx180 looks really odd. */
86140 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86141- atomic_t *instance);
86142+ atomic_unchecked_t *instance);
86143
86144 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86145 Since the parent disappears this ensures that v4l2_dev doesn't have an
86146diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86147index 2a25dec..bf6dd8a 100644
86148--- a/include/net/9p/transport.h
86149+++ b/include/net/9p/transport.h
86150@@ -62,7 +62,7 @@ struct p9_trans_module {
86151 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86152 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86153 char *, char *, int , int, int, int);
86154-};
86155+} __do_const;
86156
86157 void v9fs_register_trans(struct p9_trans_module *m);
86158 void v9fs_unregister_trans(struct p9_trans_module *m);
86159diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86160index a175ba4..196eb8242 100644
86161--- a/include/net/af_unix.h
86162+++ b/include/net/af_unix.h
86163@@ -36,7 +36,7 @@ struct unix_skb_parms {
86164 u32 secid; /* Security ID */
86165 #endif
86166 u32 consumed;
86167-};
86168+} __randomize_layout;
86169
86170 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86171 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86172diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86173index d1bb342..e12f7d2 100644
86174--- a/include/net/bluetooth/l2cap.h
86175+++ b/include/net/bluetooth/l2cap.h
86176@@ -608,7 +608,7 @@ struct l2cap_ops {
86177 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86178 unsigned long hdr_len,
86179 unsigned long len, int nb);
86180-};
86181+} __do_const;
86182
86183 struct l2cap_conn {
86184 struct hci_conn *hcon;
86185diff --git a/include/net/bonding.h b/include/net/bonding.h
86186index 983a94b..7aa9b16 100644
86187--- a/include/net/bonding.h
86188+++ b/include/net/bonding.h
86189@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
86190
86191 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86192 {
86193- atomic_long_inc(&dev->tx_dropped);
86194+ atomic_long_inc_unchecked(&dev->tx_dropped);
86195 dev_kfree_skb_any(skb);
86196 }
86197
86198diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86199index f2ae33d..c457cf0 100644
86200--- a/include/net/caif/cfctrl.h
86201+++ b/include/net/caif/cfctrl.h
86202@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86203 void (*radioset_rsp)(void);
86204 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86205 struct cflayer *client_layer);
86206-};
86207+} __no_const;
86208
86209 /* Link Setup Parameters for CAIF-Links. */
86210 struct cfctrl_link_param {
86211@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86212 struct cfctrl {
86213 struct cfsrvl serv;
86214 struct cfctrl_rsp res;
86215- atomic_t req_seq_no;
86216- atomic_t rsp_seq_no;
86217+ atomic_unchecked_t req_seq_no;
86218+ atomic_unchecked_t rsp_seq_no;
86219 struct list_head list;
86220 /* Protects from simultaneous access to first_req list */
86221 spinlock_t info_list_lock;
86222diff --git a/include/net/flow.h b/include/net/flow.h
86223index 8109a15..504466d 100644
86224--- a/include/net/flow.h
86225+++ b/include/net/flow.h
86226@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86227
86228 void flow_cache_flush(struct net *net);
86229 void flow_cache_flush_deferred(struct net *net);
86230-extern atomic_t flow_cache_genid;
86231+extern atomic_unchecked_t flow_cache_genid;
86232
86233 #endif
86234diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86235index 6c92415..3a352d8 100644
86236--- a/include/net/genetlink.h
86237+++ b/include/net/genetlink.h
86238@@ -130,7 +130,7 @@ struct genl_ops {
86239 u8 cmd;
86240 u8 internal_flags;
86241 u8 flags;
86242-};
86243+} __do_const;
86244
86245 int __genl_register_family(struct genl_family *family);
86246
86247diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86248index 734d9b5..48a9a4b 100644
86249--- a/include/net/gro_cells.h
86250+++ b/include/net/gro_cells.h
86251@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86252 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86253
86254 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86255- atomic_long_inc(&dev->rx_dropped);
86256+ atomic_long_inc_unchecked(&dev->rx_dropped);
86257 kfree_skb(skb);
86258 return;
86259 }
86260diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86261index 848e85c..051c7de 100644
86262--- a/include/net/inet_connection_sock.h
86263+++ b/include/net/inet_connection_sock.h
86264@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86265 int (*bind_conflict)(const struct sock *sk,
86266 const struct inet_bind_bucket *tb, bool relax);
86267 void (*mtu_reduced)(struct sock *sk);
86268-};
86269+} __do_const;
86270
86271 /** inet_connection_sock - INET connection oriented sock
86272 *
86273diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86274index 80479ab..0c3f647 100644
86275--- a/include/net/inetpeer.h
86276+++ b/include/net/inetpeer.h
86277@@ -47,7 +47,7 @@ struct inet_peer {
86278 */
86279 union {
86280 struct {
86281- atomic_t rid; /* Frag reception counter */
86282+ atomic_unchecked_t rid; /* Frag reception counter */
86283 };
86284 struct rcu_head rcu;
86285 struct inet_peer *gc_next;
86286diff --git a/include/net/ip.h b/include/net/ip.h
86287index 09cf5ae..ab62fcf 100644
86288--- a/include/net/ip.h
86289+++ b/include/net/ip.h
86290@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86291 }
86292 }
86293
86294-u32 ip_idents_reserve(u32 hash, int segs);
86295+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86296 void __ip_select_ident(struct iphdr *iph, int segs);
86297
86298 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86299diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86300index 09a819e..3ab9e14 100644
86301--- a/include/net/ip_fib.h
86302+++ b/include/net/ip_fib.h
86303@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86304
86305 #define FIB_RES_SADDR(net, res) \
86306 ((FIB_RES_NH(res).nh_saddr_genid == \
86307- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86308+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86309 FIB_RES_NH(res).nh_saddr : \
86310 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86311 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86312diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86313index 615b20b..fd4cbd8 100644
86314--- a/include/net/ip_vs.h
86315+++ b/include/net/ip_vs.h
86316@@ -534,7 +534,7 @@ struct ip_vs_conn {
86317 struct ip_vs_conn *control; /* Master control connection */
86318 atomic_t n_control; /* Number of controlled ones */
86319 struct ip_vs_dest *dest; /* real server */
86320- atomic_t in_pkts; /* incoming packet counter */
86321+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86322
86323 /* Packet transmitter for different forwarding methods. If it
86324 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86325@@ -682,7 +682,7 @@ struct ip_vs_dest {
86326 __be16 port; /* port number of the server */
86327 union nf_inet_addr addr; /* IP address of the server */
86328 volatile unsigned int flags; /* dest status flags */
86329- atomic_t conn_flags; /* flags to copy to conn */
86330+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86331 atomic_t weight; /* server weight */
86332
86333 atomic_t refcnt; /* reference counter */
86334@@ -928,11 +928,11 @@ struct netns_ipvs {
86335 /* ip_vs_lblc */
86336 int sysctl_lblc_expiration;
86337 struct ctl_table_header *lblc_ctl_header;
86338- struct ctl_table *lblc_ctl_table;
86339+ ctl_table_no_const *lblc_ctl_table;
86340 /* ip_vs_lblcr */
86341 int sysctl_lblcr_expiration;
86342 struct ctl_table_header *lblcr_ctl_header;
86343- struct ctl_table *lblcr_ctl_table;
86344+ ctl_table_no_const *lblcr_ctl_table;
86345 /* ip_vs_est */
86346 struct list_head est_list; /* estimator list */
86347 spinlock_t est_lock;
86348diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86349index 8d4f588..2e37ad2 100644
86350--- a/include/net/irda/ircomm_tty.h
86351+++ b/include/net/irda/ircomm_tty.h
86352@@ -33,6 +33,7 @@
86353 #include <linux/termios.h>
86354 #include <linux/timer.h>
86355 #include <linux/tty.h> /* struct tty_struct */
86356+#include <asm/local.h>
86357
86358 #include <net/irda/irias_object.h>
86359 #include <net/irda/ircomm_core.h>
86360diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86361index 714cc9a..ea05f3e 100644
86362--- a/include/net/iucv/af_iucv.h
86363+++ b/include/net/iucv/af_iucv.h
86364@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86365 struct iucv_sock_list {
86366 struct hlist_head head;
86367 rwlock_t lock;
86368- atomic_t autobind_name;
86369+ atomic_unchecked_t autobind_name;
86370 };
86371
86372 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86373diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86374index f3be818..bf46196 100644
86375--- a/include/net/llc_c_ac.h
86376+++ b/include/net/llc_c_ac.h
86377@@ -87,7 +87,7 @@
86378 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86379 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86380
86381-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86382+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86383
86384 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86385 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86386diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86387index 3948cf1..83b28c4 100644
86388--- a/include/net/llc_c_ev.h
86389+++ b/include/net/llc_c_ev.h
86390@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86391 return (struct llc_conn_state_ev *)skb->cb;
86392 }
86393
86394-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86395-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86396+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86397+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86398
86399 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86400 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86401diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86402index 48f3f89..0e92c50 100644
86403--- a/include/net/llc_c_st.h
86404+++ b/include/net/llc_c_st.h
86405@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86406 u8 next_state;
86407 const llc_conn_ev_qfyr_t *ev_qualifiers;
86408 const llc_conn_action_t *ev_actions;
86409-};
86410+} __do_const;
86411
86412 struct llc_conn_state {
86413 u8 current_state;
86414diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86415index a61b98c..aade1eb 100644
86416--- a/include/net/llc_s_ac.h
86417+++ b/include/net/llc_s_ac.h
86418@@ -23,7 +23,7 @@
86419 #define SAP_ACT_TEST_IND 9
86420
86421 /* All action functions must look like this */
86422-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86423+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86424
86425 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86426 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86427diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86428index c4359e2..76dbc4a 100644
86429--- a/include/net/llc_s_st.h
86430+++ b/include/net/llc_s_st.h
86431@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86432 llc_sap_ev_t ev;
86433 u8 next_state;
86434 const llc_sap_action_t *ev_actions;
86435-};
86436+} __do_const;
86437
86438 struct llc_sap_state {
86439 u8 curr_state;
86440diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86441index 29c7be8..746bd73 100644
86442--- a/include/net/mac80211.h
86443+++ b/include/net/mac80211.h
86444@@ -4869,7 +4869,7 @@ struct rate_control_ops {
86445 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86446
86447 u32 (*get_expected_throughput)(void *priv_sta);
86448-};
86449+} __do_const;
86450
86451 static inline int rate_supported(struct ieee80211_sta *sta,
86452 enum ieee80211_band band,
86453diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86454index 76f7084..8f36e39 100644
86455--- a/include/net/neighbour.h
86456+++ b/include/net/neighbour.h
86457@@ -163,7 +163,7 @@ struct neigh_ops {
86458 void (*error_report)(struct neighbour *, struct sk_buff *);
86459 int (*output)(struct neighbour *, struct sk_buff *);
86460 int (*connected_output)(struct neighbour *, struct sk_buff *);
86461-};
86462+} __do_const;
86463
86464 struct pneigh_entry {
86465 struct pneigh_entry *next;
86466@@ -217,7 +217,7 @@ struct neigh_table {
86467 struct neigh_statistics __percpu *stats;
86468 struct neigh_hash_table __rcu *nht;
86469 struct pneigh_entry **phash_buckets;
86470-};
86471+} __randomize_layout;
86472
86473 enum {
86474 NEIGH_ARP_TABLE = 0,
86475diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86476index 2e8756b8..0bd0083 100644
86477--- a/include/net/net_namespace.h
86478+++ b/include/net/net_namespace.h
86479@@ -130,8 +130,8 @@ struct net {
86480 struct netns_ipvs *ipvs;
86481 #endif
86482 struct sock *diag_nlsk;
86483- atomic_t fnhe_genid;
86484-};
86485+ atomic_unchecked_t fnhe_genid;
86486+} __randomize_layout;
86487
86488 #include <linux/seq_file_net.h>
86489
86490@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86491 #define __net_init __init
86492 #define __net_exit __exit_refok
86493 #define __net_initdata __initdata
86494+#ifdef CONSTIFY_PLUGIN
86495 #define __net_initconst __initconst
86496+#else
86497+#define __net_initconst __initdata
86498+#endif
86499 #endif
86500
86501 struct pernet_operations {
86502@@ -297,7 +301,7 @@ struct pernet_operations {
86503 void (*exit_batch)(struct list_head *net_exit_list);
86504 int *id;
86505 size_t size;
86506-};
86507+} __do_const;
86508
86509 /*
86510 * Use these carefully. If you implement a network device and it
86511@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86512
86513 static inline int rt_genid_ipv4(struct net *net)
86514 {
86515- return atomic_read(&net->ipv4.rt_genid);
86516+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86517 }
86518
86519 static inline void rt_genid_bump_ipv4(struct net *net)
86520 {
86521- atomic_inc(&net->ipv4.rt_genid);
86522+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86523 }
86524
86525 extern void (*__fib6_flush_trees)(struct net *net);
86526@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
86527
86528 static inline int fnhe_genid(struct net *net)
86529 {
86530- return atomic_read(&net->fnhe_genid);
86531+ return atomic_read_unchecked(&net->fnhe_genid);
86532 }
86533
86534 static inline void fnhe_genid_bump(struct net *net)
86535 {
86536- atomic_inc(&net->fnhe_genid);
86537+ atomic_inc_unchecked(&net->fnhe_genid);
86538 }
86539
86540 #endif /* __NET_NET_NAMESPACE_H */
86541diff --git a/include/net/netlink.h b/include/net/netlink.h
86542index 6415835..ab96d87 100644
86543--- a/include/net/netlink.h
86544+++ b/include/net/netlink.h
86545@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86546 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86547 {
86548 if (mark)
86549- skb_trim(skb, (unsigned char *) mark - skb->data);
86550+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86551 }
86552
86553 /**
86554diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86555index 29d6a94..235d3d84 100644
86556--- a/include/net/netns/conntrack.h
86557+++ b/include/net/netns/conntrack.h
86558@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86559 struct nf_proto_net {
86560 #ifdef CONFIG_SYSCTL
86561 struct ctl_table_header *ctl_table_header;
86562- struct ctl_table *ctl_table;
86563+ ctl_table_no_const *ctl_table;
86564 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86565 struct ctl_table_header *ctl_compat_header;
86566- struct ctl_table *ctl_compat_table;
86567+ ctl_table_no_const *ctl_compat_table;
86568 #endif
86569 #endif
86570 unsigned int users;
86571@@ -60,7 +60,7 @@ struct nf_ip_net {
86572 struct nf_icmp_net icmpv6;
86573 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86574 struct ctl_table_header *ctl_table_header;
86575- struct ctl_table *ctl_table;
86576+ ctl_table_no_const *ctl_table;
86577 #endif
86578 };
86579
86580diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86581index 0ffef1a..2ce1ceb 100644
86582--- a/include/net/netns/ipv4.h
86583+++ b/include/net/netns/ipv4.h
86584@@ -84,7 +84,7 @@ struct netns_ipv4 {
86585
86586 struct ping_group_range ping_group_range;
86587
86588- atomic_t dev_addr_genid;
86589+ atomic_unchecked_t dev_addr_genid;
86590
86591 #ifdef CONFIG_SYSCTL
86592 unsigned long *sysctl_local_reserved_ports;
86593@@ -98,6 +98,6 @@ struct netns_ipv4 {
86594 struct fib_rules_ops *mr_rules_ops;
86595 #endif
86596 #endif
86597- atomic_t rt_genid;
86598+ atomic_unchecked_t rt_genid;
86599 };
86600 #endif
86601diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86602index 69ae41f..4f94868 100644
86603--- a/include/net/netns/ipv6.h
86604+++ b/include/net/netns/ipv6.h
86605@@ -75,8 +75,8 @@ struct netns_ipv6 {
86606 struct fib_rules_ops *mr6_rules_ops;
86607 #endif
86608 #endif
86609- atomic_t dev_addr_genid;
86610- atomic_t fib6_sernum;
86611+ atomic_unchecked_t dev_addr_genid;
86612+ atomic_unchecked_t fib6_sernum;
86613 };
86614
86615 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86616diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86617index 730d82a..045f2c4 100644
86618--- a/include/net/netns/xfrm.h
86619+++ b/include/net/netns/xfrm.h
86620@@ -78,7 +78,7 @@ struct netns_xfrm {
86621
86622 /* flow cache part */
86623 struct flow_cache flow_cache_global;
86624- atomic_t flow_cache_genid;
86625+ atomic_unchecked_t flow_cache_genid;
86626 struct list_head flow_cache_gc_list;
86627 spinlock_t flow_cache_gc_lock;
86628 struct work_struct flow_cache_gc_work;
86629diff --git a/include/net/ping.h b/include/net/ping.h
86630index f074060..830fba0 100644
86631--- a/include/net/ping.h
86632+++ b/include/net/ping.h
86633@@ -54,7 +54,7 @@ struct ping_iter_state {
86634
86635 extern struct proto ping_prot;
86636 #if IS_ENABLED(CONFIG_IPV6)
86637-extern struct pingv6_ops pingv6_ops;
86638+extern struct pingv6_ops *pingv6_ops;
86639 #endif
86640
86641 struct pingfakehdr {
86642diff --git a/include/net/protocol.h b/include/net/protocol.h
86643index d6fcc1f..ca277058 100644
86644--- a/include/net/protocol.h
86645+++ b/include/net/protocol.h
86646@@ -49,7 +49,7 @@ struct net_protocol {
86647 * socket lookup?
86648 */
86649 icmp_strict_tag_validation:1;
86650-};
86651+} __do_const;
86652
86653 #if IS_ENABLED(CONFIG_IPV6)
86654 struct inet6_protocol {
86655@@ -62,7 +62,7 @@ struct inet6_protocol {
86656 u8 type, u8 code, int offset,
86657 __be32 info);
86658 unsigned int flags; /* INET6_PROTO_xxx */
86659-};
86660+} __do_const;
86661
86662 #define INET6_PROTO_NOPOLICY 0x1
86663 #define INET6_PROTO_FINAL 0x2
86664diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86665index e21b9f9..0191ef0 100644
86666--- a/include/net/rtnetlink.h
86667+++ b/include/net/rtnetlink.h
86668@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86669 int (*fill_slave_info)(struct sk_buff *skb,
86670 const struct net_device *dev,
86671 const struct net_device *slave_dev);
86672-};
86673+} __do_const;
86674
86675 int __rtnl_link_register(struct rtnl_link_ops *ops);
86676 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86677diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86678index 4a5b9a3..ca27d73 100644
86679--- a/include/net/sctp/checksum.h
86680+++ b/include/net/sctp/checksum.h
86681@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86682 unsigned int offset)
86683 {
86684 struct sctphdr *sh = sctp_hdr(skb);
86685- __le32 ret, old = sh->checksum;
86686- const struct skb_checksum_ops ops = {
86687+ __le32 ret, old = sh->checksum;
86688+ static const struct skb_checksum_ops ops = {
86689 .update = sctp_csum_update,
86690 .combine = sctp_csum_combine,
86691 };
86692diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86693index 487ef34..d457f98 100644
86694--- a/include/net/sctp/sm.h
86695+++ b/include/net/sctp/sm.h
86696@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86697 typedef struct {
86698 sctp_state_fn_t *fn;
86699 const char *name;
86700-} sctp_sm_table_entry_t;
86701+} __do_const sctp_sm_table_entry_t;
86702
86703 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86704 * currently in use.
86705@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86706 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86707
86708 /* Extern declarations for major data structures. */
86709-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86710+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86711
86712
86713 /* Get the size of a DATA chunk payload. */
86714diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86715index 2bb2fcf..d17c291 100644
86716--- a/include/net/sctp/structs.h
86717+++ b/include/net/sctp/structs.h
86718@@ -509,7 +509,7 @@ struct sctp_pf {
86719 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86720 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86721 struct sctp_af *af;
86722-};
86723+} __do_const;
86724
86725
86726 /* Structure to track chunk fragments that have been acked, but peer
86727diff --git a/include/net/sock.h b/include/net/sock.h
86728index 2210fec..2249ad0 100644
86729--- a/include/net/sock.h
86730+++ b/include/net/sock.h
86731@@ -362,7 +362,7 @@ struct sock {
86732 unsigned int sk_napi_id;
86733 unsigned int sk_ll_usec;
86734 #endif
86735- atomic_t sk_drops;
86736+ atomic_unchecked_t sk_drops;
86737 int sk_rcvbuf;
86738
86739 struct sk_filter __rcu *sk_filter;
86740@@ -1061,7 +1061,7 @@ struct proto {
86741 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86742 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86743 #endif
86744-};
86745+} __randomize_layout;
86746
86747 /*
86748 * Bits in struct cg_proto.flags
86749@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
86750 page_counter_uncharge(&prot->memory_allocated, amt);
86751 }
86752
86753-static inline long
86754+static inline long __intentional_overflow(-1)
86755 sk_memory_allocated(const struct sock *sk)
86756 {
86757 struct proto *prot = sk->sk_prot;
86758@@ -1385,7 +1385,7 @@ struct sock_iocb {
86759 struct scm_cookie *scm;
86760 struct msghdr *msg, async_msg;
86761 struct kiocb *kiocb;
86762-};
86763+} __randomize_layout;
86764
86765 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86766 {
86767@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86768 }
86769
86770 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86771- char __user *from, char *to,
86772+ char __user *from, unsigned char *to,
86773 int copy, int offset)
86774 {
86775 if (skb->ip_summed == CHECKSUM_NONE) {
86776@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86777 }
86778 }
86779
86780-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86781+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86782
86783 /**
86784 * sk_page_frag - return an appropriate page_frag
86785diff --git a/include/net/tcp.h b/include/net/tcp.h
86786index 9d9111e..349c847 100644
86787--- a/include/net/tcp.h
86788+++ b/include/net/tcp.h
86789@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
86790 void tcp_xmit_retransmit_queue(struct sock *);
86791 void tcp_simple_retransmit(struct sock *);
86792 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86793-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86794+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86795
86796 void tcp_send_probe0(struct sock *);
86797 void tcp_send_partial(struct sock *);
86798@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
86799 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
86800 */
86801 struct tcp_skb_cb {
86802- __u32 seq; /* Starting sequence number */
86803- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86804+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86805+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86806 union {
86807 /* Note : tcp_tw_isn is used in input path only
86808 * (isn chosen by tcp_timewait_state_process())
86809@@ -715,7 +715,7 @@ struct tcp_skb_cb {
86810
86811 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86812 /* 1 byte hole */
86813- __u32 ack_seq; /* Sequence number ACK'd */
86814+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86815 union {
86816 struct inet_skb_parm h4;
86817 #if IS_ENABLED(CONFIG_IPV6)
86818diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86819index dc4865e..152ee4c 100644
86820--- a/include/net/xfrm.h
86821+++ b/include/net/xfrm.h
86822@@ -285,7 +285,6 @@ struct xfrm_dst;
86823 struct xfrm_policy_afinfo {
86824 unsigned short family;
86825 struct dst_ops *dst_ops;
86826- void (*garbage_collect)(struct net *net);
86827 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86828 const xfrm_address_t *saddr,
86829 const xfrm_address_t *daddr);
86830@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86831 struct net_device *dev,
86832 const struct flowi *fl);
86833 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86834-};
86835+} __do_const;
86836
86837 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86838 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86839@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86840 int (*transport_finish)(struct sk_buff *skb,
86841 int async);
86842 void (*local_error)(struct sk_buff *skb, u32 mtu);
86843-};
86844+} __do_const;
86845
86846 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86847 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86848@@ -437,7 +436,7 @@ struct xfrm_mode {
86849 struct module *owner;
86850 unsigned int encap;
86851 int flags;
86852-};
86853+} __do_const;
86854
86855 /* Flags for xfrm_mode. */
86856 enum {
86857@@ -534,7 +533,7 @@ struct xfrm_policy {
86858 struct timer_list timer;
86859
86860 struct flow_cache_object flo;
86861- atomic_t genid;
86862+ atomic_unchecked_t genid;
86863 u32 priority;
86864 u32 index;
86865 struct xfrm_mark mark;
86866@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86867 }
86868
86869 void xfrm_garbage_collect(struct net *net);
86870+void xfrm_garbage_collect_deferred(struct net *net);
86871
86872 #else
86873
86874@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86875 static inline void xfrm_garbage_collect(struct net *net)
86876 {
86877 }
86878+static inline void xfrm_garbage_collect_deferred(struct net *net)
86879+{
86880+}
86881 #endif
86882
86883 static __inline__
86884diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86885index 1017e0b..227aa4d 100644
86886--- a/include/rdma/iw_cm.h
86887+++ b/include/rdma/iw_cm.h
86888@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86889 int backlog);
86890
86891 int (*destroy_listen)(struct iw_cm_id *cm_id);
86892-};
86893+} __no_const;
86894
86895 /**
86896 * iw_create_cm_id - Create an IW CM identifier.
86897diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86898index 93d14da..734b3d8 100644
86899--- a/include/scsi/libfc.h
86900+++ b/include/scsi/libfc.h
86901@@ -771,6 +771,7 @@ struct libfc_function_template {
86902 */
86903 void (*disc_stop_final) (struct fc_lport *);
86904 };
86905+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86906
86907 /**
86908 * struct fc_disc - Discovery context
86909@@ -875,7 +876,7 @@ struct fc_lport {
86910 struct fc_vport *vport;
86911
86912 /* Operational Information */
86913- struct libfc_function_template tt;
86914+ libfc_function_template_no_const tt;
86915 u8 link_up;
86916 u8 qfull;
86917 enum fc_lport_state state;
86918diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
86919index 3a4edd1..feb2e3e 100644
86920--- a/include/scsi/scsi_device.h
86921+++ b/include/scsi/scsi_device.h
86922@@ -185,9 +185,9 @@ struct scsi_device {
86923 unsigned int max_device_blocked; /* what device_blocked counts down from */
86924 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
86925
86926- atomic_t iorequest_cnt;
86927- atomic_t iodone_cnt;
86928- atomic_t ioerr_cnt;
86929+ atomic_unchecked_t iorequest_cnt;
86930+ atomic_unchecked_t iodone_cnt;
86931+ atomic_unchecked_t ioerr_cnt;
86932
86933 struct device sdev_gendev,
86934 sdev_dev;
86935diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
86936index 007a0bc..7188db8 100644
86937--- a/include/scsi/scsi_transport_fc.h
86938+++ b/include/scsi/scsi_transport_fc.h
86939@@ -756,7 +756,8 @@ struct fc_function_template {
86940 unsigned long show_host_system_hostname:1;
86941
86942 unsigned long disable_target_scan:1;
86943-};
86944+} __do_const;
86945+typedef struct fc_function_template __no_const fc_function_template_no_const;
86946
86947
86948 /**
86949diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
86950index 396e8f7..b037e89 100644
86951--- a/include/sound/compress_driver.h
86952+++ b/include/sound/compress_driver.h
86953@@ -129,7 +129,7 @@ struct snd_compr_ops {
86954 struct snd_compr_caps *caps);
86955 int (*get_codec_caps) (struct snd_compr_stream *stream,
86956 struct snd_compr_codec_caps *codec);
86957-};
86958+} __no_const;
86959
86960 /**
86961 * struct snd_compr: Compressed device
86962diff --git a/include/sound/soc.h b/include/sound/soc.h
86963index ac8b333..59c3692 100644
86964--- a/include/sound/soc.h
86965+++ b/include/sound/soc.h
86966@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
86967 enum snd_soc_dapm_type, int);
86968
86969 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
86970-};
86971+} __do_const;
86972
86973 /* SoC platform interface */
86974 struct snd_soc_platform_driver {
86975@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
86976 const struct snd_compr_ops *compr_ops;
86977
86978 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
86979-};
86980+} __do_const;
86981
86982 struct snd_soc_dai_link_component {
86983 const char *name;
86984diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
86985index 4a8795a..53d8119 100644
86986--- a/include/target/target_core_base.h
86987+++ b/include/target/target_core_base.h
86988@@ -767,7 +767,7 @@ struct se_device {
86989 atomic_long_t write_bytes;
86990 /* Active commands on this virtual SE device */
86991 atomic_t simple_cmds;
86992- atomic_t dev_ordered_id;
86993+ atomic_unchecked_t dev_ordered_id;
86994 atomic_t dev_ordered_sync;
86995 atomic_t dev_qf_count;
86996 int export_count;
86997diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
86998new file mode 100644
86999index 0000000..fb634b7
87000--- /dev/null
87001+++ b/include/trace/events/fs.h
87002@@ -0,0 +1,53 @@
87003+#undef TRACE_SYSTEM
87004+#define TRACE_SYSTEM fs
87005+
87006+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87007+#define _TRACE_FS_H
87008+
87009+#include <linux/fs.h>
87010+#include <linux/tracepoint.h>
87011+
87012+TRACE_EVENT(do_sys_open,
87013+
87014+ TP_PROTO(const char *filename, int flags, int mode),
87015+
87016+ TP_ARGS(filename, flags, mode),
87017+
87018+ TP_STRUCT__entry(
87019+ __string( filename, filename )
87020+ __field( int, flags )
87021+ __field( int, mode )
87022+ ),
87023+
87024+ TP_fast_assign(
87025+ __assign_str(filename, filename);
87026+ __entry->flags = flags;
87027+ __entry->mode = mode;
87028+ ),
87029+
87030+ TP_printk("\"%s\" %x %o",
87031+ __get_str(filename), __entry->flags, __entry->mode)
87032+);
87033+
87034+TRACE_EVENT(open_exec,
87035+
87036+ TP_PROTO(const char *filename),
87037+
87038+ TP_ARGS(filename),
87039+
87040+ TP_STRUCT__entry(
87041+ __string( filename, filename )
87042+ ),
87043+
87044+ TP_fast_assign(
87045+ __assign_str(filename, filename);
87046+ ),
87047+
87048+ TP_printk("\"%s\"",
87049+ __get_str(filename))
87050+);
87051+
87052+#endif /* _TRACE_FS_H */
87053+
87054+/* This part must be outside protection */
87055+#include <trace/define_trace.h>
87056diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87057index 3608beb..df39d8a 100644
87058--- a/include/trace/events/irq.h
87059+++ b/include/trace/events/irq.h
87060@@ -36,7 +36,7 @@ struct softirq_action;
87061 */
87062 TRACE_EVENT(irq_handler_entry,
87063
87064- TP_PROTO(int irq, struct irqaction *action),
87065+ TP_PROTO(int irq, const struct irqaction *action),
87066
87067 TP_ARGS(irq, action),
87068
87069@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87070 */
87071 TRACE_EVENT(irq_handler_exit,
87072
87073- TP_PROTO(int irq, struct irqaction *action, int ret),
87074+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87075
87076 TP_ARGS(irq, action, ret),
87077
87078diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87079index 7caf44c..23c6f27 100644
87080--- a/include/uapi/linux/a.out.h
87081+++ b/include/uapi/linux/a.out.h
87082@@ -39,6 +39,14 @@ enum machine_type {
87083 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87084 };
87085
87086+/* Constants for the N_FLAGS field */
87087+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87088+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87089+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87090+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87091+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87092+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87093+
87094 #if !defined (N_MAGIC)
87095 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87096 #endif
87097diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87098index 22b6ad3..aeba37e 100644
87099--- a/include/uapi/linux/bcache.h
87100+++ b/include/uapi/linux/bcache.h
87101@@ -5,6 +5,7 @@
87102 * Bcache on disk data structures
87103 */
87104
87105+#include <linux/compiler.h>
87106 #include <asm/types.h>
87107
87108 #define BITMASK(name, type, field, offset, size) \
87109@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87110 /* Btree keys - all units are in sectors */
87111
87112 struct bkey {
87113- __u64 high;
87114- __u64 low;
87115+ __u64 high __intentional_overflow(-1);
87116+ __u64 low __intentional_overflow(-1);
87117 __u64 ptr[];
87118 };
87119
87120diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87121index d876736..ccce5c0 100644
87122--- a/include/uapi/linux/byteorder/little_endian.h
87123+++ b/include/uapi/linux/byteorder/little_endian.h
87124@@ -42,51 +42,51 @@
87125
87126 static inline __le64 __cpu_to_le64p(const __u64 *p)
87127 {
87128- return (__force __le64)*p;
87129+ return (__force const __le64)*p;
87130 }
87131-static inline __u64 __le64_to_cpup(const __le64 *p)
87132+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87133 {
87134- return (__force __u64)*p;
87135+ return (__force const __u64)*p;
87136 }
87137 static inline __le32 __cpu_to_le32p(const __u32 *p)
87138 {
87139- return (__force __le32)*p;
87140+ return (__force const __le32)*p;
87141 }
87142 static inline __u32 __le32_to_cpup(const __le32 *p)
87143 {
87144- return (__force __u32)*p;
87145+ return (__force const __u32)*p;
87146 }
87147 static inline __le16 __cpu_to_le16p(const __u16 *p)
87148 {
87149- return (__force __le16)*p;
87150+ return (__force const __le16)*p;
87151 }
87152 static inline __u16 __le16_to_cpup(const __le16 *p)
87153 {
87154- return (__force __u16)*p;
87155+ return (__force const __u16)*p;
87156 }
87157 static inline __be64 __cpu_to_be64p(const __u64 *p)
87158 {
87159- return (__force __be64)__swab64p(p);
87160+ return (__force const __be64)__swab64p(p);
87161 }
87162 static inline __u64 __be64_to_cpup(const __be64 *p)
87163 {
87164- return __swab64p((__u64 *)p);
87165+ return __swab64p((const __u64 *)p);
87166 }
87167 static inline __be32 __cpu_to_be32p(const __u32 *p)
87168 {
87169- return (__force __be32)__swab32p(p);
87170+ return (__force const __be32)__swab32p(p);
87171 }
87172-static inline __u32 __be32_to_cpup(const __be32 *p)
87173+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87174 {
87175- return __swab32p((__u32 *)p);
87176+ return __swab32p((const __u32 *)p);
87177 }
87178 static inline __be16 __cpu_to_be16p(const __u16 *p)
87179 {
87180- return (__force __be16)__swab16p(p);
87181+ return (__force const __be16)__swab16p(p);
87182 }
87183 static inline __u16 __be16_to_cpup(const __be16 *p)
87184 {
87185- return __swab16p((__u16 *)p);
87186+ return __swab16p((const __u16 *)p);
87187 }
87188 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87189 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87190diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87191index 71e1d0e..6cc9caf 100644
87192--- a/include/uapi/linux/elf.h
87193+++ b/include/uapi/linux/elf.h
87194@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87195 #define PT_GNU_EH_FRAME 0x6474e550
87196
87197 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87198+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87199+
87200+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87201+
87202+/* Constants for the e_flags field */
87203+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87204+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87205+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87206+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87207+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87208+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87209
87210 /*
87211 * Extended Numbering
87212@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87213 #define DT_DEBUG 21
87214 #define DT_TEXTREL 22
87215 #define DT_JMPREL 23
87216+#define DT_FLAGS 30
87217+ #define DF_TEXTREL 0x00000004
87218 #define DT_ENCODING 32
87219 #define OLD_DT_LOOS 0x60000000
87220 #define DT_LOOS 0x6000000d
87221@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87222 #define PF_W 0x2
87223 #define PF_X 0x1
87224
87225+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87226+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87227+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87228+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87229+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87230+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87231+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87232+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87233+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87234+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87235+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87236+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87237+
87238 typedef struct elf32_phdr{
87239 Elf32_Word p_type;
87240 Elf32_Off p_offset;
87241@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87242 #define EI_OSABI 7
87243 #define EI_PAD 8
87244
87245+#define EI_PAX 14
87246+
87247 #define ELFMAG0 0x7f /* EI_MAG */
87248 #define ELFMAG1 'E'
87249 #define ELFMAG2 'L'
87250diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87251index aa169c4..6a2771d 100644
87252--- a/include/uapi/linux/personality.h
87253+++ b/include/uapi/linux/personality.h
87254@@ -30,6 +30,7 @@ enum {
87255 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87256 ADDR_NO_RANDOMIZE | \
87257 ADDR_COMPAT_LAYOUT | \
87258+ ADDR_LIMIT_3GB | \
87259 MMAP_PAGE_ZERO)
87260
87261 /*
87262diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87263index 7530e74..e714828 100644
87264--- a/include/uapi/linux/screen_info.h
87265+++ b/include/uapi/linux/screen_info.h
87266@@ -43,7 +43,8 @@ struct screen_info {
87267 __u16 pages; /* 0x32 */
87268 __u16 vesa_attributes; /* 0x34 */
87269 __u32 capabilities; /* 0x36 */
87270- __u8 _reserved[6]; /* 0x3a */
87271+ __u16 vesapm_size; /* 0x3a */
87272+ __u8 _reserved[4]; /* 0x3c */
87273 } __attribute__((packed));
87274
87275 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87276diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87277index 0e011eb..82681b1 100644
87278--- a/include/uapi/linux/swab.h
87279+++ b/include/uapi/linux/swab.h
87280@@ -43,7 +43,7 @@
87281 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87282 */
87283
87284-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87285+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87286 {
87287 #ifdef __HAVE_BUILTIN_BSWAP16__
87288 return __builtin_bswap16(val);
87289@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87290 #endif
87291 }
87292
87293-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87294+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87295 {
87296 #ifdef __HAVE_BUILTIN_BSWAP32__
87297 return __builtin_bswap32(val);
87298@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87299 #endif
87300 }
87301
87302-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87303+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87304 {
87305 #ifdef __HAVE_BUILTIN_BSWAP64__
87306 return __builtin_bswap64(val);
87307diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87308index 1590c49..5eab462 100644
87309--- a/include/uapi/linux/xattr.h
87310+++ b/include/uapi/linux/xattr.h
87311@@ -73,5 +73,9 @@
87312 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87313 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87314
87315+/* User namespace */
87316+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87317+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87318+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87319
87320 #endif /* _UAPI_LINUX_XATTR_H */
87321diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87322index f9466fa..f4e2b81 100644
87323--- a/include/video/udlfb.h
87324+++ b/include/video/udlfb.h
87325@@ -53,10 +53,10 @@ struct dlfb_data {
87326 u32 pseudo_palette[256];
87327 int blank_mode; /*one of FB_BLANK_ */
87328 /* blit-only rendering path metrics, exposed through sysfs */
87329- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87330- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87331- atomic_t bytes_sent; /* to usb, after compression including overhead */
87332- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87333+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87334+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87335+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87336+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87337 };
87338
87339 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87340diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87341index 30f5362..8ed8ac9 100644
87342--- a/include/video/uvesafb.h
87343+++ b/include/video/uvesafb.h
87344@@ -122,6 +122,7 @@ struct uvesafb_par {
87345 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87346 u8 pmi_setpal; /* PMI for palette changes */
87347 u16 *pmi_base; /* protected mode interface location */
87348+ u8 *pmi_code; /* protected mode code location */
87349 void *pmi_start;
87350 void *pmi_pal;
87351 u8 *vbe_state_orig; /*
87352diff --git a/init/Kconfig b/init/Kconfig
87353index 9afb971..27d6fca 100644
87354--- a/init/Kconfig
87355+++ b/init/Kconfig
87356@@ -1129,6 +1129,7 @@ endif # CGROUPS
87357
87358 config CHECKPOINT_RESTORE
87359 bool "Checkpoint/restore support" if EXPERT
87360+ depends on !GRKERNSEC
87361 default n
87362 help
87363 Enables additional kernel features in a sake of checkpoint/restore.
87364@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
87365
87366 config COMPAT_BRK
87367 bool "Disable heap randomization"
87368- default y
87369+ default n
87370 help
87371 Randomizing heap placement makes heap exploits harder, but it
87372 also breaks ancient binaries (including anything libc5 based).
87373@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
87374 config STOP_MACHINE
87375 bool
87376 default y
87377- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87378+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87379 help
87380 Need stop_machine() primitive.
87381
87382diff --git a/init/Makefile b/init/Makefile
87383index 7bc47ee..6da2dc7 100644
87384--- a/init/Makefile
87385+++ b/init/Makefile
87386@@ -2,6 +2,9 @@
87387 # Makefile for the linux kernel.
87388 #
87389
87390+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87391+asflags-y := $(GCC_PLUGINS_AFLAGS)
87392+
87393 obj-y := main.o version.o mounts.o
87394 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87395 obj-y += noinitramfs.o
87396diff --git a/init/do_mounts.c b/init/do_mounts.c
87397index eb41008..f5dbbf9 100644
87398--- a/init/do_mounts.c
87399+++ b/init/do_mounts.c
87400@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87401 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87402 {
87403 struct super_block *s;
87404- int err = sys_mount(name, "/root", fs, flags, data);
87405+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87406 if (err)
87407 return err;
87408
87409- sys_chdir("/root");
87410+ sys_chdir((const char __force_user *)"/root");
87411 s = current->fs->pwd.dentry->d_sb;
87412 ROOT_DEV = s->s_dev;
87413 printk(KERN_INFO
87414@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87415 va_start(args, fmt);
87416 vsprintf(buf, fmt, args);
87417 va_end(args);
87418- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87419+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87420 if (fd >= 0) {
87421 sys_ioctl(fd, FDEJECT, 0);
87422 sys_close(fd);
87423 }
87424 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87425- fd = sys_open("/dev/console", O_RDWR, 0);
87426+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87427 if (fd >= 0) {
87428 sys_ioctl(fd, TCGETS, (long)&termios);
87429 termios.c_lflag &= ~ICANON;
87430 sys_ioctl(fd, TCSETSF, (long)&termios);
87431- sys_read(fd, &c, 1);
87432+ sys_read(fd, (char __user *)&c, 1);
87433 termios.c_lflag |= ICANON;
87434 sys_ioctl(fd, TCSETSF, (long)&termios);
87435 sys_close(fd);
87436@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87437 mount_root();
87438 out:
87439 devtmpfs_mount("dev");
87440- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87441- sys_chroot(".");
87442+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87443+ sys_chroot((const char __force_user *)".");
87444 }
87445
87446 static bool is_tmpfs;
87447diff --git a/init/do_mounts.h b/init/do_mounts.h
87448index f5b978a..69dbfe8 100644
87449--- a/init/do_mounts.h
87450+++ b/init/do_mounts.h
87451@@ -15,15 +15,15 @@ extern int root_mountflags;
87452
87453 static inline int create_dev(char *name, dev_t dev)
87454 {
87455- sys_unlink(name);
87456- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87457+ sys_unlink((char __force_user *)name);
87458+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87459 }
87460
87461 #if BITS_PER_LONG == 32
87462 static inline u32 bstat(char *name)
87463 {
87464 struct stat64 stat;
87465- if (sys_stat64(name, &stat) != 0)
87466+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87467 return 0;
87468 if (!S_ISBLK(stat.st_mode))
87469 return 0;
87470@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87471 static inline u32 bstat(char *name)
87472 {
87473 struct stat stat;
87474- if (sys_newstat(name, &stat) != 0)
87475+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87476 return 0;
87477 if (!S_ISBLK(stat.st_mode))
87478 return 0;
87479diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87480index 3e0878e..8a9d7a0 100644
87481--- a/init/do_mounts_initrd.c
87482+++ b/init/do_mounts_initrd.c
87483@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87484 {
87485 sys_unshare(CLONE_FS | CLONE_FILES);
87486 /* stdin/stdout/stderr for /linuxrc */
87487- sys_open("/dev/console", O_RDWR, 0);
87488+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87489 sys_dup(0);
87490 sys_dup(0);
87491 /* move initrd over / and chdir/chroot in initrd root */
87492- sys_chdir("/root");
87493- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87494- sys_chroot(".");
87495+ sys_chdir((const char __force_user *)"/root");
87496+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87497+ sys_chroot((const char __force_user *)".");
87498 sys_setsid();
87499 return 0;
87500 }
87501@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87502 create_dev("/dev/root.old", Root_RAM0);
87503 /* mount initrd on rootfs' /root */
87504 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87505- sys_mkdir("/old", 0700);
87506- sys_chdir("/old");
87507+ sys_mkdir((const char __force_user *)"/old", 0700);
87508+ sys_chdir((const char __force_user *)"/old");
87509
87510 /* try loading default modules from initrd */
87511 load_default_modules();
87512@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87513 current->flags &= ~PF_FREEZER_SKIP;
87514
87515 /* move initrd to rootfs' /old */
87516- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87517+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87518 /* switch root and cwd back to / of rootfs */
87519- sys_chroot("..");
87520+ sys_chroot((const char __force_user *)"..");
87521
87522 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87523- sys_chdir("/old");
87524+ sys_chdir((const char __force_user *)"/old");
87525 return;
87526 }
87527
87528- sys_chdir("/");
87529+ sys_chdir((const char __force_user *)"/");
87530 ROOT_DEV = new_decode_dev(real_root_dev);
87531 mount_root();
87532
87533 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87534- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87535+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87536 if (!error)
87537 printk("okay\n");
87538 else {
87539- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87540+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87541 if (error == -ENOENT)
87542 printk("/initrd does not exist. Ignored.\n");
87543 else
87544 printk("failed\n");
87545 printk(KERN_NOTICE "Unmounting old root\n");
87546- sys_umount("/old", MNT_DETACH);
87547+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87548 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87549 if (fd < 0) {
87550 error = fd;
87551@@ -127,11 +127,11 @@ int __init initrd_load(void)
87552 * mounted in the normal path.
87553 */
87554 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87555- sys_unlink("/initrd.image");
87556+ sys_unlink((const char __force_user *)"/initrd.image");
87557 handle_initrd();
87558 return 1;
87559 }
87560 }
87561- sys_unlink("/initrd.image");
87562+ sys_unlink((const char __force_user *)"/initrd.image");
87563 return 0;
87564 }
87565diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87566index 8cb6db5..d729f50 100644
87567--- a/init/do_mounts_md.c
87568+++ b/init/do_mounts_md.c
87569@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87570 partitioned ? "_d" : "", minor,
87571 md_setup_args[ent].device_names);
87572
87573- fd = sys_open(name, 0, 0);
87574+ fd = sys_open((char __force_user *)name, 0, 0);
87575 if (fd < 0) {
87576 printk(KERN_ERR "md: open failed - cannot start "
87577 "array %s\n", name);
87578@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87579 * array without it
87580 */
87581 sys_close(fd);
87582- fd = sys_open(name, 0, 0);
87583+ fd = sys_open((char __force_user *)name, 0, 0);
87584 sys_ioctl(fd, BLKRRPART, 0);
87585 }
87586 sys_close(fd);
87587@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87588
87589 wait_for_device_probe();
87590
87591- fd = sys_open("/dev/md0", 0, 0);
87592+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87593 if (fd >= 0) {
87594 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87595 sys_close(fd);
87596diff --git a/init/init_task.c b/init/init_task.c
87597index ba0a7f36..2bcf1d5 100644
87598--- a/init/init_task.c
87599+++ b/init/init_task.c
87600@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87601 * Initial thread structure. Alignment of this is handled by a special
87602 * linker map entry.
87603 */
87604+#ifdef CONFIG_X86
87605+union thread_union init_thread_union __init_task_data;
87606+#else
87607 union thread_union init_thread_union __init_task_data =
87608 { INIT_THREAD_INFO(init_task) };
87609+#endif
87610diff --git a/init/initramfs.c b/init/initramfs.c
87611index ad1bd77..dca2c1b 100644
87612--- a/init/initramfs.c
87613+++ b/init/initramfs.c
87614@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87615
87616 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87617 while (count) {
87618- ssize_t rv = sys_write(fd, p, count);
87619+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87620
87621 if (rv < 0) {
87622 if (rv == -EINTR || rv == -EAGAIN)
87623@@ -107,7 +107,7 @@ static void __init free_hash(void)
87624 }
87625 }
87626
87627-static long __init do_utime(char *filename, time_t mtime)
87628+static long __init do_utime(char __force_user *filename, time_t mtime)
87629 {
87630 struct timespec t[2];
87631
87632@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87633 struct dir_entry *de, *tmp;
87634 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87635 list_del(&de->list);
87636- do_utime(de->name, de->mtime);
87637+ do_utime((char __force_user *)de->name, de->mtime);
87638 kfree(de->name);
87639 kfree(de);
87640 }
87641@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87642 if (nlink >= 2) {
87643 char *old = find_link(major, minor, ino, mode, collected);
87644 if (old)
87645- return (sys_link(old, collected) < 0) ? -1 : 1;
87646+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87647 }
87648 return 0;
87649 }
87650@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
87651 {
87652 struct stat st;
87653
87654- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
87655+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
87656 if (S_ISDIR(st.st_mode))
87657- sys_rmdir(path);
87658+ sys_rmdir((char __force_user *)path);
87659 else
87660- sys_unlink(path);
87661+ sys_unlink((char __force_user *)path);
87662 }
87663 }
87664
87665@@ -338,7 +338,7 @@ static int __init do_name(void)
87666 int openflags = O_WRONLY|O_CREAT;
87667 if (ml != 1)
87668 openflags |= O_TRUNC;
87669- wfd = sys_open(collected, openflags, mode);
87670+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87671
87672 if (wfd >= 0) {
87673 sys_fchown(wfd, uid, gid);
87674@@ -350,17 +350,17 @@ static int __init do_name(void)
87675 }
87676 }
87677 } else if (S_ISDIR(mode)) {
87678- sys_mkdir(collected, mode);
87679- sys_chown(collected, uid, gid);
87680- sys_chmod(collected, mode);
87681+ sys_mkdir((char __force_user *)collected, mode);
87682+ sys_chown((char __force_user *)collected, uid, gid);
87683+ sys_chmod((char __force_user *)collected, mode);
87684 dir_add(collected, mtime);
87685 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87686 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87687 if (maybe_link() == 0) {
87688- sys_mknod(collected, mode, rdev);
87689- sys_chown(collected, uid, gid);
87690- sys_chmod(collected, mode);
87691- do_utime(collected, mtime);
87692+ sys_mknod((char __force_user *)collected, mode, rdev);
87693+ sys_chown((char __force_user *)collected, uid, gid);
87694+ sys_chmod((char __force_user *)collected, mode);
87695+ do_utime((char __force_user *)collected, mtime);
87696 }
87697 }
87698 return 0;
87699@@ -372,7 +372,7 @@ static int __init do_copy(void)
87700 if (xwrite(wfd, victim, body_len) != body_len)
87701 error("write error");
87702 sys_close(wfd);
87703- do_utime(vcollected, mtime);
87704+ do_utime((char __force_user *)vcollected, mtime);
87705 kfree(vcollected);
87706 eat(body_len);
87707 state = SkipIt;
87708@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87709 {
87710 collected[N_ALIGN(name_len) + body_len] = '\0';
87711 clean_path(collected, 0);
87712- sys_symlink(collected + N_ALIGN(name_len), collected);
87713- sys_lchown(collected, uid, gid);
87714- do_utime(collected, mtime);
87715+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87716+ sys_lchown((char __force_user *)collected, uid, gid);
87717+ do_utime((char __force_user *)collected, mtime);
87718 state = SkipIt;
87719 next_state = Reset;
87720 return 0;
87721diff --git a/init/main.c b/init/main.c
87722index 61b99376..85893612d 100644
87723--- a/init/main.c
87724+++ b/init/main.c
87725@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
87726 static inline void mark_rodata_ro(void) { }
87727 #endif
87728
87729+extern void grsecurity_init(void);
87730+
87731 /*
87732 * Debug helper: via this flag we know that we are in 'early bootup code'
87733 * where only the boot processor is running with IRQ disabled. This means
87734@@ -161,6 +163,75 @@ static int __init set_reset_devices(char *str)
87735
87736 __setup("reset_devices", set_reset_devices);
87737
87738+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87739+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87740+static int __init setup_grsec_proc_gid(char *str)
87741+{
87742+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87743+ return 1;
87744+}
87745+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87746+#endif
87747+
87748+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87749+unsigned long pax_user_shadow_base __read_only;
87750+EXPORT_SYMBOL(pax_user_shadow_base);
87751+extern char pax_enter_kernel_user[];
87752+extern char pax_exit_kernel_user[];
87753+#endif
87754+
87755+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87756+static int __init setup_pax_nouderef(char *str)
87757+{
87758+#ifdef CONFIG_X86_32
87759+ unsigned int cpu;
87760+ struct desc_struct *gdt;
87761+
87762+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87763+ gdt = get_cpu_gdt_table(cpu);
87764+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87765+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87766+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87767+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87768+ }
87769+ loadsegment(ds, __KERNEL_DS);
87770+ loadsegment(es, __KERNEL_DS);
87771+ loadsegment(ss, __KERNEL_DS);
87772+#else
87773+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87774+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87775+ clone_pgd_mask = ~(pgdval_t)0UL;
87776+ pax_user_shadow_base = 0UL;
87777+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87778+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87779+#endif
87780+
87781+ return 0;
87782+}
87783+early_param("pax_nouderef", setup_pax_nouderef);
87784+
87785+#ifdef CONFIG_X86_64
87786+static int __init setup_pax_weakuderef(char *str)
87787+{
87788+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87789+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87790+ return 1;
87791+}
87792+__setup("pax_weakuderef", setup_pax_weakuderef);
87793+#endif
87794+#endif
87795+
87796+#ifdef CONFIG_PAX_SOFTMODE
87797+int pax_softmode;
87798+
87799+static int __init setup_pax_softmode(char *str)
87800+{
87801+ get_option(&str, &pax_softmode);
87802+ return 1;
87803+}
87804+__setup("pax_softmode=", setup_pax_softmode);
87805+#endif
87806+
87807 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87808 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87809 static const char *panic_later, *panic_param;
87810@@ -735,7 +806,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87811 struct blacklist_entry *entry;
87812 char *fn_name;
87813
87814- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87815+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87816 if (!fn_name)
87817 return false;
87818
87819@@ -787,7 +858,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87820 {
87821 int count = preempt_count();
87822 int ret;
87823- char msgbuf[64];
87824+ const char *msg1 = "", *msg2 = "";
87825
87826 if (initcall_blacklisted(fn))
87827 return -EPERM;
87828@@ -797,18 +868,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87829 else
87830 ret = fn();
87831
87832- msgbuf[0] = 0;
87833-
87834 if (preempt_count() != count) {
87835- sprintf(msgbuf, "preemption imbalance ");
87836+ msg1 = " preemption imbalance";
87837 preempt_count_set(count);
87838 }
87839 if (irqs_disabled()) {
87840- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87841+ msg2 = " disabled interrupts";
87842 local_irq_enable();
87843 }
87844- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87845+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87846
87847+ add_latent_entropy();
87848 return ret;
87849 }
87850
87851@@ -914,8 +984,8 @@ static int run_init_process(const char *init_filename)
87852 {
87853 argv_init[0] = init_filename;
87854 return do_execve(getname_kernel(init_filename),
87855- (const char __user *const __user *)argv_init,
87856- (const char __user *const __user *)envp_init);
87857+ (const char __user *const __force_user *)argv_init,
87858+ (const char __user *const __force_user *)envp_init);
87859 }
87860
87861 static int try_to_run_init_process(const char *init_filename)
87862@@ -932,6 +1002,10 @@ static int try_to_run_init_process(const char *init_filename)
87863 return ret;
87864 }
87865
87866+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87867+extern int gr_init_ran;
87868+#endif
87869+
87870 static noinline void __init kernel_init_freeable(void);
87871
87872 static int __ref kernel_init(void *unused)
87873@@ -956,6 +1030,11 @@ static int __ref kernel_init(void *unused)
87874 ramdisk_execute_command, ret);
87875 }
87876
87877+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87878+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87879+ gr_init_ran = 1;
87880+#endif
87881+
87882 /*
87883 * We try each of these until one succeeds.
87884 *
87885@@ -1016,7 +1095,7 @@ static noinline void __init kernel_init_freeable(void)
87886 do_basic_setup();
87887
87888 /* Open the /dev/console on the rootfs, this should never fail */
87889- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
87890+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
87891 pr_err("Warning: unable to open an initial console.\n");
87892
87893 (void) sys_dup(0);
87894@@ -1029,11 +1108,13 @@ static noinline void __init kernel_init_freeable(void)
87895 if (!ramdisk_execute_command)
87896 ramdisk_execute_command = "/init";
87897
87898- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
87899+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
87900 ramdisk_execute_command = NULL;
87901 prepare_namespace();
87902 }
87903
87904+ grsecurity_init();
87905+
87906 /*
87907 * Ok, we have completed the initial bootup, and
87908 * we're essentially up and running. Get rid of the
87909diff --git a/ipc/compat.c b/ipc/compat.c
87910index 9b3c85f..1c4d897 100644
87911--- a/ipc/compat.c
87912+++ b/ipc/compat.c
87913@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
87914 COMPAT_SHMLBA);
87915 if (err < 0)
87916 return err;
87917- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
87918+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
87919 }
87920 case SHMDT:
87921 return sys_shmdt(compat_ptr(ptr));
87922diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
87923index 8ad93c2..efd80f8 100644
87924--- a/ipc/ipc_sysctl.c
87925+++ b/ipc/ipc_sysctl.c
87926@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
87927 static int proc_ipc_dointvec(struct ctl_table *table, int write,
87928 void __user *buffer, size_t *lenp, loff_t *ppos)
87929 {
87930- struct ctl_table ipc_table;
87931+ ctl_table_no_const ipc_table;
87932
87933 memcpy(&ipc_table, table, sizeof(ipc_table));
87934 ipc_table.data = get_ipc(table);
87935@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
87936 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
87937 void __user *buffer, size_t *lenp, loff_t *ppos)
87938 {
87939- struct ctl_table ipc_table;
87940+ ctl_table_no_const ipc_table;
87941
87942 memcpy(&ipc_table, table, sizeof(ipc_table));
87943 ipc_table.data = get_ipc(table);
87944@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
87945 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87946 void __user *buffer, size_t *lenp, loff_t *ppos)
87947 {
87948- struct ctl_table ipc_table;
87949+ ctl_table_no_const ipc_table;
87950 memcpy(&ipc_table, table, sizeof(ipc_table));
87951 ipc_table.data = get_ipc(table);
87952
87953@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87954 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
87955 void __user *buffer, size_t *lenp, loff_t *ppos)
87956 {
87957- struct ctl_table ipc_table;
87958+ ctl_table_no_const ipc_table;
87959 int dummy = 0;
87960
87961 memcpy(&ipc_table, table, sizeof(ipc_table));
87962diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
87963index 68d4e95..1477ded 100644
87964--- a/ipc/mq_sysctl.c
87965+++ b/ipc/mq_sysctl.c
87966@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
87967 static int proc_mq_dointvec(struct ctl_table *table, int write,
87968 void __user *buffer, size_t *lenp, loff_t *ppos)
87969 {
87970- struct ctl_table mq_table;
87971+ ctl_table_no_const mq_table;
87972 memcpy(&mq_table, table, sizeof(mq_table));
87973 mq_table.data = get_mq(table);
87974
87975@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
87976 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
87977 void __user *buffer, size_t *lenp, loff_t *ppos)
87978 {
87979- struct ctl_table mq_table;
87980+ ctl_table_no_const mq_table;
87981 memcpy(&mq_table, table, sizeof(mq_table));
87982 mq_table.data = get_mq(table);
87983
87984diff --git a/ipc/mqueue.c b/ipc/mqueue.c
87985index 7635a1c..7432cb6 100644
87986--- a/ipc/mqueue.c
87987+++ b/ipc/mqueue.c
87988@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
87989 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
87990 info->attr.mq_msgsize);
87991
87992+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
87993 spin_lock(&mq_lock);
87994 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
87995 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
87996diff --git a/ipc/shm.c b/ipc/shm.c
87997index 19633b4..d454904 100644
87998--- a/ipc/shm.c
87999+++ b/ipc/shm.c
88000@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88001 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88002 #endif
88003
88004+#ifdef CONFIG_GRKERNSEC
88005+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88006+ const u64 shm_createtime, const kuid_t cuid,
88007+ const int shmid);
88008+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88009+ const u64 shm_createtime);
88010+#endif
88011+
88012 void shm_init_ns(struct ipc_namespace *ns)
88013 {
88014 ns->shm_ctlmax = SHMMAX;
88015@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88016 shp->shm_lprid = 0;
88017 shp->shm_atim = shp->shm_dtim = 0;
88018 shp->shm_ctim = get_seconds();
88019+#ifdef CONFIG_GRKERNSEC
88020+ shp->shm_createtime = ktime_get_ns();
88021+#endif
88022 shp->shm_segsz = size;
88023 shp->shm_nattch = 0;
88024 shp->shm_file = file;
88025@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88026 f_mode = FMODE_READ | FMODE_WRITE;
88027 }
88028 if (shmflg & SHM_EXEC) {
88029+
88030+#ifdef CONFIG_PAX_MPROTECT
88031+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88032+ goto out;
88033+#endif
88034+
88035 prot |= PROT_EXEC;
88036 acc_mode |= S_IXUGO;
88037 }
88038@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88039 if (err)
88040 goto out_unlock;
88041
88042+#ifdef CONFIG_GRKERNSEC
88043+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88044+ shp->shm_perm.cuid, shmid) ||
88045+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88046+ err = -EACCES;
88047+ goto out_unlock;
88048+ }
88049+#endif
88050+
88051 ipc_lock_object(&shp->shm_perm);
88052
88053 /* check if shm_destroy() is tearing down shp */
88054@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88055 path = shp->shm_file->f_path;
88056 path_get(&path);
88057 shp->shm_nattch++;
88058+#ifdef CONFIG_GRKERNSEC
88059+ shp->shm_lapid = current->pid;
88060+#endif
88061 size = i_size_read(path.dentry->d_inode);
88062 ipc_unlock_object(&shp->shm_perm);
88063 rcu_read_unlock();
88064diff --git a/ipc/util.c b/ipc/util.c
88065index 106bed0..f851429 100644
88066--- a/ipc/util.c
88067+++ b/ipc/util.c
88068@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88069 int (*show)(struct seq_file *, void *);
88070 };
88071
88072+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88073+
88074 /**
88075 * ipc_init - initialise ipc subsystem
88076 *
88077@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88078 granted_mode >>= 6;
88079 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88080 granted_mode >>= 3;
88081+
88082+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88083+ return -1;
88084+
88085 /* is there some bit set in requested_mode but not in granted_mode? */
88086 if ((requested_mode & ~granted_mode & 0007) &&
88087 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88088diff --git a/kernel/audit.c b/kernel/audit.c
88089index 72ab759..757deba 100644
88090--- a/kernel/audit.c
88091+++ b/kernel/audit.c
88092@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88093 3) suppressed due to audit_rate_limit
88094 4) suppressed due to audit_backlog_limit
88095 */
88096-static atomic_t audit_lost = ATOMIC_INIT(0);
88097+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88098
88099 /* The netlink socket. */
88100 static struct sock *audit_sock;
88101@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88102 unsigned long now;
88103 int print;
88104
88105- atomic_inc(&audit_lost);
88106+ atomic_inc_unchecked(&audit_lost);
88107
88108 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88109
88110@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88111 if (print) {
88112 if (printk_ratelimit())
88113 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88114- atomic_read(&audit_lost),
88115+ atomic_read_unchecked(&audit_lost),
88116 audit_rate_limit,
88117 audit_backlog_limit);
88118 audit_panic(message);
88119@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88120 s.pid = audit_pid;
88121 s.rate_limit = audit_rate_limit;
88122 s.backlog_limit = audit_backlog_limit;
88123- s.lost = atomic_read(&audit_lost);
88124+ s.lost = atomic_read_unchecked(&audit_lost);
88125 s.backlog = skb_queue_len(&audit_skb_queue);
88126 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88127 s.backlog_wait_time = audit_backlog_wait_time;
88128diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88129index 072566d..1190489 100644
88130--- a/kernel/auditsc.c
88131+++ b/kernel/auditsc.c
88132@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88133 }
88134
88135 /* global counter which is incremented every time something logs in */
88136-static atomic_t session_id = ATOMIC_INIT(0);
88137+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88138
88139 static int audit_set_loginuid_perm(kuid_t loginuid)
88140 {
88141@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
88142
88143 /* are we setting or clearing? */
88144 if (uid_valid(loginuid))
88145- sessionid = (unsigned int)atomic_inc_return(&session_id);
88146+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88147
88148 task->sessionid = sessionid;
88149 task->loginuid = loginuid;
88150diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88151index a64e7a2..2e69448 100644
88152--- a/kernel/bpf/core.c
88153+++ b/kernel/bpf/core.c
88154@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88155 * random section of illegal instructions.
88156 */
88157 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88158- hdr = module_alloc(size);
88159+ hdr = module_alloc_exec(size);
88160 if (hdr == NULL)
88161 return NULL;
88162
88163 /* Fill space with illegal/arch-dep instructions. */
88164 bpf_fill_ill_insns(hdr, size);
88165
88166+ pax_open_kernel();
88167 hdr->pages = size / PAGE_SIZE;
88168+ pax_close_kernel();
88169+
88170 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88171 PAGE_SIZE - sizeof(*hdr));
88172 start = (prandom_u32() % hole) & ~(alignment - 1);
88173@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88174
88175 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88176 {
88177- module_memfree(hdr);
88178+ module_memfree_exec(hdr);
88179 }
88180 #endif /* CONFIG_BPF_JIT */
88181
88182diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88183index 536edc2..d28c85d 100644
88184--- a/kernel/bpf/syscall.c
88185+++ b/kernel/bpf/syscall.c
88186@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88187 int err;
88188
88189 /* the syscall is limited to root temporarily. This restriction will be
88190- * lifted when security audit is clean. Note that eBPF+tracing must have
88191- * this restriction, since it may pass kernel data to user space
88192+ * lifted by upstream when a half-assed security audit is clean. Note
88193+ * that eBPF+tracing must have this restriction, since it may pass
88194+ * kernel data to user space
88195 */
88196 if (!capable(CAP_SYS_ADMIN))
88197 return -EPERM;
88198+#ifdef CONFIG_GRKERNSEC
88199+ return -EPERM;
88200+#endif
88201
88202 if (!access_ok(VERIFY_READ, uattr, 1))
88203 return -EFAULT;
88204diff --git a/kernel/capability.c b/kernel/capability.c
88205index 989f5bf..d317ca0 100644
88206--- a/kernel/capability.c
88207+++ b/kernel/capability.c
88208@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88209 * before modification is attempted and the application
88210 * fails.
88211 */
88212+ if (tocopy > ARRAY_SIZE(kdata))
88213+ return -EFAULT;
88214+
88215 if (copy_to_user(dataptr, kdata, tocopy
88216 * sizeof(struct __user_cap_data_struct))) {
88217 return -EFAULT;
88218@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88219 int ret;
88220
88221 rcu_read_lock();
88222- ret = security_capable(__task_cred(t), ns, cap);
88223+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88224+ gr_task_is_capable(t, __task_cred(t), cap);
88225 rcu_read_unlock();
88226
88227- return (ret == 0);
88228+ return ret;
88229 }
88230
88231 /**
88232@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88233 int ret;
88234
88235 rcu_read_lock();
88236- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88237+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88238 rcu_read_unlock();
88239
88240- return (ret == 0);
88241+ return ret;
88242 }
88243
88244 /**
88245@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88246 BUG();
88247 }
88248
88249- if (security_capable(current_cred(), ns, cap) == 0) {
88250+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88251 current->flags |= PF_SUPERPRIV;
88252 return true;
88253 }
88254@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88255 }
88256 EXPORT_SYMBOL(ns_capable);
88257
88258+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88259+{
88260+ if (unlikely(!cap_valid(cap))) {
88261+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88262+ BUG();
88263+ }
88264+
88265+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88266+ current->flags |= PF_SUPERPRIV;
88267+ return true;
88268+ }
88269+ return false;
88270+}
88271+EXPORT_SYMBOL(ns_capable_nolog);
88272+
88273 /**
88274 * file_ns_capable - Determine if the file's opener had a capability in effect
88275 * @file: The file we want to check
88276@@ -427,6 +446,12 @@ bool capable(int cap)
88277 }
88278 EXPORT_SYMBOL(capable);
88279
88280+bool capable_nolog(int cap)
88281+{
88282+ return ns_capable_nolog(&init_user_ns, cap);
88283+}
88284+EXPORT_SYMBOL(capable_nolog);
88285+
88286 /**
88287 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88288 * @inode: The inode in question
88289@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88290 kgid_has_mapping(ns, inode->i_gid);
88291 }
88292 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88293+
88294+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88295+{
88296+ struct user_namespace *ns = current_user_ns();
88297+
88298+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88299+ kgid_has_mapping(ns, inode->i_gid);
88300+}
88301+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88302diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88303index 04cfe8a..adadcc0 100644
88304--- a/kernel/cgroup.c
88305+++ b/kernel/cgroup.c
88306@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
88307 if (!pathbuf || !agentbuf)
88308 goto out;
88309
88310+ if (agentbuf[0] == '\0')
88311+ goto out;
88312+
88313 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88314 if (!path)
88315 goto out;
88316@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88317 struct task_struct *task;
88318 int count = 0;
88319
88320- seq_printf(seq, "css_set %p\n", cset);
88321+ seq_printf(seq, "css_set %pK\n", cset);
88322
88323 list_for_each_entry(task, &cset->tasks, cg_list) {
88324 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88325diff --git a/kernel/compat.c b/kernel/compat.c
88326index ebb3c36..1df606e 100644
88327--- a/kernel/compat.c
88328+++ b/kernel/compat.c
88329@@ -13,6 +13,7 @@
88330
88331 #include <linux/linkage.h>
88332 #include <linux/compat.h>
88333+#include <linux/module.h>
88334 #include <linux/errno.h>
88335 #include <linux/time.h>
88336 #include <linux/signal.h>
88337@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88338 mm_segment_t oldfs;
88339 long ret;
88340
88341- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88342+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88343 oldfs = get_fs();
88344 set_fs(KERNEL_DS);
88345 ret = hrtimer_nanosleep_restart(restart);
88346@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88347 oldfs = get_fs();
88348 set_fs(KERNEL_DS);
88349 ret = hrtimer_nanosleep(&tu,
88350- rmtp ? (struct timespec __user *)&rmt : NULL,
88351+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88352 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88353 set_fs(oldfs);
88354
88355@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88356 mm_segment_t old_fs = get_fs();
88357
88358 set_fs(KERNEL_DS);
88359- ret = sys_sigpending((old_sigset_t __user *) &s);
88360+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88361 set_fs(old_fs);
88362 if (ret == 0)
88363 ret = put_user(s, set);
88364@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88365 mm_segment_t old_fs = get_fs();
88366
88367 set_fs(KERNEL_DS);
88368- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88369+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88370 set_fs(old_fs);
88371
88372 if (!ret) {
88373@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88374 set_fs (KERNEL_DS);
88375 ret = sys_wait4(pid,
88376 (stat_addr ?
88377- (unsigned int __user *) &status : NULL),
88378- options, (struct rusage __user *) &r);
88379+ (unsigned int __force_user *) &status : NULL),
88380+ options, (struct rusage __force_user *) &r);
88381 set_fs (old_fs);
88382
88383 if (ret > 0) {
88384@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88385 memset(&info, 0, sizeof(info));
88386
88387 set_fs(KERNEL_DS);
88388- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88389- uru ? (struct rusage __user *)&ru : NULL);
88390+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88391+ uru ? (struct rusage __force_user *)&ru : NULL);
88392 set_fs(old_fs);
88393
88394 if ((ret < 0) || (info.si_signo == 0))
88395@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88396 oldfs = get_fs();
88397 set_fs(KERNEL_DS);
88398 err = sys_timer_settime(timer_id, flags,
88399- (struct itimerspec __user *) &newts,
88400- (struct itimerspec __user *) &oldts);
88401+ (struct itimerspec __force_user *) &newts,
88402+ (struct itimerspec __force_user *) &oldts);
88403 set_fs(oldfs);
88404 if (!err && old && put_compat_itimerspec(old, &oldts))
88405 return -EFAULT;
88406@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88407 oldfs = get_fs();
88408 set_fs(KERNEL_DS);
88409 err = sys_timer_gettime(timer_id,
88410- (struct itimerspec __user *) &ts);
88411+ (struct itimerspec __force_user *) &ts);
88412 set_fs(oldfs);
88413 if (!err && put_compat_itimerspec(setting, &ts))
88414 return -EFAULT;
88415@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88416 oldfs = get_fs();
88417 set_fs(KERNEL_DS);
88418 err = sys_clock_settime(which_clock,
88419- (struct timespec __user *) &ts);
88420+ (struct timespec __force_user *) &ts);
88421 set_fs(oldfs);
88422 return err;
88423 }
88424@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88425 oldfs = get_fs();
88426 set_fs(KERNEL_DS);
88427 err = sys_clock_gettime(which_clock,
88428- (struct timespec __user *) &ts);
88429+ (struct timespec __force_user *) &ts);
88430 set_fs(oldfs);
88431 if (!err && compat_put_timespec(&ts, tp))
88432 return -EFAULT;
88433@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88434
88435 oldfs = get_fs();
88436 set_fs(KERNEL_DS);
88437- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88438+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88439 set_fs(oldfs);
88440
88441 err = compat_put_timex(utp, &txc);
88442@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88443 oldfs = get_fs();
88444 set_fs(KERNEL_DS);
88445 err = sys_clock_getres(which_clock,
88446- (struct timespec __user *) &ts);
88447+ (struct timespec __force_user *) &ts);
88448 set_fs(oldfs);
88449 if (!err && tp && compat_put_timespec(&ts, tp))
88450 return -EFAULT;
88451@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88452 struct timespec tu;
88453 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88454
88455- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88456+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88457 oldfs = get_fs();
88458 set_fs(KERNEL_DS);
88459 err = clock_nanosleep_restart(restart);
88460@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88461 oldfs = get_fs();
88462 set_fs(KERNEL_DS);
88463 err = sys_clock_nanosleep(which_clock, flags,
88464- (struct timespec __user *) &in,
88465- (struct timespec __user *) &out);
88466+ (struct timespec __force_user *) &in,
88467+ (struct timespec __force_user *) &out);
88468 set_fs(oldfs);
88469
88470 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88471@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88472 mm_segment_t old_fs = get_fs();
88473
88474 set_fs(KERNEL_DS);
88475- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88476+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88477 set_fs(old_fs);
88478 if (compat_put_timespec(&t, interval))
88479 return -EFAULT;
88480diff --git a/kernel/configs.c b/kernel/configs.c
88481index c18b1f1..b9a0132 100644
88482--- a/kernel/configs.c
88483+++ b/kernel/configs.c
88484@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88485 struct proc_dir_entry *entry;
88486
88487 /* create the current config file */
88488+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88489+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88490+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88491+ &ikconfig_file_ops);
88492+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88493+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88494+ &ikconfig_file_ops);
88495+#endif
88496+#else
88497 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88498 &ikconfig_file_ops);
88499+#endif
88500+
88501 if (!entry)
88502 return -ENOMEM;
88503
88504diff --git a/kernel/cred.c b/kernel/cred.c
88505index e0573a4..26c0fd3 100644
88506--- a/kernel/cred.c
88507+++ b/kernel/cred.c
88508@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88509 validate_creds(cred);
88510 alter_cred_subscribers(cred, -1);
88511 put_cred(cred);
88512+
88513+#ifdef CONFIG_GRKERNSEC_SETXID
88514+ cred = (struct cred *) tsk->delayed_cred;
88515+ if (cred != NULL) {
88516+ tsk->delayed_cred = NULL;
88517+ validate_creds(cred);
88518+ alter_cred_subscribers(cred, -1);
88519+ put_cred(cred);
88520+ }
88521+#endif
88522 }
88523
88524 /**
88525@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88526 * Always returns 0 thus allowing this function to be tail-called at the end
88527 * of, say, sys_setgid().
88528 */
88529-int commit_creds(struct cred *new)
88530+static int __commit_creds(struct cred *new)
88531 {
88532 struct task_struct *task = current;
88533 const struct cred *old = task->real_cred;
88534@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88535
88536 get_cred(new); /* we will require a ref for the subj creds too */
88537
88538+ gr_set_role_label(task, new->uid, new->gid);
88539+
88540 /* dumpability changes */
88541 if (!uid_eq(old->euid, new->euid) ||
88542 !gid_eq(old->egid, new->egid) ||
88543@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88544 put_cred(old);
88545 return 0;
88546 }
88547+#ifdef CONFIG_GRKERNSEC_SETXID
88548+extern int set_user(struct cred *new);
88549+
88550+void gr_delayed_cred_worker(void)
88551+{
88552+ const struct cred *new = current->delayed_cred;
88553+ struct cred *ncred;
88554+
88555+ current->delayed_cred = NULL;
88556+
88557+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88558+ // from doing get_cred on it when queueing this
88559+ put_cred(new);
88560+ return;
88561+ } else if (new == NULL)
88562+ return;
88563+
88564+ ncred = prepare_creds();
88565+ if (!ncred)
88566+ goto die;
88567+ // uids
88568+ ncred->uid = new->uid;
88569+ ncred->euid = new->euid;
88570+ ncred->suid = new->suid;
88571+ ncred->fsuid = new->fsuid;
88572+ // gids
88573+ ncred->gid = new->gid;
88574+ ncred->egid = new->egid;
88575+ ncred->sgid = new->sgid;
88576+ ncred->fsgid = new->fsgid;
88577+ // groups
88578+ set_groups(ncred, new->group_info);
88579+ // caps
88580+ ncred->securebits = new->securebits;
88581+ ncred->cap_inheritable = new->cap_inheritable;
88582+ ncred->cap_permitted = new->cap_permitted;
88583+ ncred->cap_effective = new->cap_effective;
88584+ ncred->cap_bset = new->cap_bset;
88585+
88586+ if (set_user(ncred)) {
88587+ abort_creds(ncred);
88588+ goto die;
88589+ }
88590+
88591+ // from doing get_cred on it when queueing this
88592+ put_cred(new);
88593+
88594+ __commit_creds(ncred);
88595+ return;
88596+die:
88597+ // from doing get_cred on it when queueing this
88598+ put_cred(new);
88599+ do_group_exit(SIGKILL);
88600+}
88601+#endif
88602+
88603+int commit_creds(struct cred *new)
88604+{
88605+#ifdef CONFIG_GRKERNSEC_SETXID
88606+ int ret;
88607+ int schedule_it = 0;
88608+ struct task_struct *t;
88609+ unsigned oldsecurebits = current_cred()->securebits;
88610+
88611+ /* we won't get called with tasklist_lock held for writing
88612+ and interrupts disabled as the cred struct in that case is
88613+ init_cred
88614+ */
88615+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88616+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88617+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88618+ schedule_it = 1;
88619+ }
88620+ ret = __commit_creds(new);
88621+ if (schedule_it) {
88622+ rcu_read_lock();
88623+ read_lock(&tasklist_lock);
88624+ for (t = next_thread(current); t != current;
88625+ t = next_thread(t)) {
88626+ /* we'll check if the thread has uid 0 in
88627+ * the delayed worker routine
88628+ */
88629+ if (task_securebits(t) == oldsecurebits &&
88630+ t->delayed_cred == NULL) {
88631+ t->delayed_cred = get_cred(new);
88632+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88633+ set_tsk_need_resched(t);
88634+ }
88635+ }
88636+ read_unlock(&tasklist_lock);
88637+ rcu_read_unlock();
88638+ }
88639+
88640+ return ret;
88641+#else
88642+ return __commit_creds(new);
88643+#endif
88644+}
88645+
88646 EXPORT_SYMBOL(commit_creds);
88647
88648 /**
88649diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88650index ac5c0f9..4b1c6c2 100644
88651--- a/kernel/debug/debug_core.c
88652+++ b/kernel/debug/debug_core.c
88653@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88654 */
88655 static atomic_t masters_in_kgdb;
88656 static atomic_t slaves_in_kgdb;
88657-static atomic_t kgdb_break_tasklet_var;
88658+static atomic_unchecked_t kgdb_break_tasklet_var;
88659 atomic_t kgdb_setting_breakpoint;
88660
88661 struct task_struct *kgdb_usethread;
88662@@ -137,7 +137,7 @@ int kgdb_single_step;
88663 static pid_t kgdb_sstep_pid;
88664
88665 /* to keep track of the CPU which is doing the single stepping*/
88666-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88667+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88668
88669 /*
88670 * If you are debugging a problem where roundup (the collection of
88671@@ -552,7 +552,7 @@ return_normal:
88672 * kernel will only try for the value of sstep_tries before
88673 * giving up and continuing on.
88674 */
88675- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88676+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88677 (kgdb_info[cpu].task &&
88678 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88679 atomic_set(&kgdb_active, -1);
88680@@ -654,8 +654,8 @@ cpu_master_loop:
88681 }
88682
88683 kgdb_restore:
88684- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88685- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88686+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88687+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88688 if (kgdb_info[sstep_cpu].task)
88689 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88690 else
88691@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
88692 static void kgdb_tasklet_bpt(unsigned long ing)
88693 {
88694 kgdb_breakpoint();
88695- atomic_set(&kgdb_break_tasklet_var, 0);
88696+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88697 }
88698
88699 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88700
88701 void kgdb_schedule_breakpoint(void)
88702 {
88703- if (atomic_read(&kgdb_break_tasklet_var) ||
88704+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88705 atomic_read(&kgdb_active) != -1 ||
88706 atomic_read(&kgdb_setting_breakpoint))
88707 return;
88708- atomic_inc(&kgdb_break_tasklet_var);
88709+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88710 tasklet_schedule(&kgdb_tasklet_breakpoint);
88711 }
88712 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88713diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88714index 60f6bb8..104bb07 100644
88715--- a/kernel/debug/kdb/kdb_main.c
88716+++ b/kernel/debug/kdb/kdb_main.c
88717@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
88718 continue;
88719
88720 kdb_printf("%-20s%8u 0x%p ", mod->name,
88721- mod->core_size, (void *)mod);
88722+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88723 #ifdef CONFIG_MODULE_UNLOAD
88724 kdb_printf("%4d ", module_refcount(mod));
88725 #endif
88726@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
88727 kdb_printf(" (Loading)");
88728 else
88729 kdb_printf(" (Live)");
88730- kdb_printf(" 0x%p", mod->module_core);
88731+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88732
88733 #ifdef CONFIG_MODULE_UNLOAD
88734 {
88735diff --git a/kernel/events/core.c b/kernel/events/core.c
88736index 19efcf133..7c05c93 100644
88737--- a/kernel/events/core.c
88738+++ b/kernel/events/core.c
88739@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
88740 * 0 - disallow raw tracepoint access for unpriv
88741 * 1 - disallow cpu events for unpriv
88742 * 2 - disallow kernel profiling for unpriv
88743+ * 3 - disallow all unpriv perf event use
88744 */
88745-int sysctl_perf_event_paranoid __read_mostly = 1;
88746+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88747+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88748+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88749+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88750+#else
88751+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88752+#endif
88753
88754 /* Minimum for 512 kiB + 1 user control page */
88755 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88756@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
88757
88758 tmp *= sysctl_perf_cpu_time_max_percent;
88759 do_div(tmp, 100);
88760- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88761+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88762 }
88763
88764 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88765@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88766 }
88767 }
88768
88769-static atomic64_t perf_event_id;
88770+static atomic64_unchecked_t perf_event_id;
88771
88772 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88773 enum event_type_t event_type);
88774@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
88775
88776 static inline u64 perf_event_count(struct perf_event *event)
88777 {
88778- return local64_read(&event->count) + atomic64_read(&event->child_count);
88779+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88780 }
88781
88782 static u64 perf_event_read(struct perf_event *event)
88783@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88784 mutex_lock(&event->child_mutex);
88785 total += perf_event_read(event);
88786 *enabled += event->total_time_enabled +
88787- atomic64_read(&event->child_total_time_enabled);
88788+ atomic64_read_unchecked(&event->child_total_time_enabled);
88789 *running += event->total_time_running +
88790- atomic64_read(&event->child_total_time_running);
88791+ atomic64_read_unchecked(&event->child_total_time_running);
88792
88793 list_for_each_entry(child, &event->child_list, child_list) {
88794 total += perf_event_read(child);
88795@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
88796 userpg->offset -= local64_read(&event->hw.prev_count);
88797
88798 userpg->time_enabled = enabled +
88799- atomic64_read(&event->child_total_time_enabled);
88800+ atomic64_read_unchecked(&event->child_total_time_enabled);
88801
88802 userpg->time_running = running +
88803- atomic64_read(&event->child_total_time_running);
88804+ atomic64_read_unchecked(&event->child_total_time_running);
88805
88806 arch_perf_update_userpage(userpg, now);
88807
88808@@ -4568,7 +4575,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88809
88810 /* Data. */
88811 sp = perf_user_stack_pointer(regs);
88812- rem = __output_copy_user(handle, (void *) sp, dump_size);
88813+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88814 dyn_size = dump_size - rem;
88815
88816 perf_output_skip(handle, rem);
88817@@ -4659,11 +4666,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88818 values[n++] = perf_event_count(event);
88819 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88820 values[n++] = enabled +
88821- atomic64_read(&event->child_total_time_enabled);
88822+ atomic64_read_unchecked(&event->child_total_time_enabled);
88823 }
88824 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
88825 values[n++] = running +
88826- atomic64_read(&event->child_total_time_running);
88827+ atomic64_read_unchecked(&event->child_total_time_running);
88828 }
88829 if (read_format & PERF_FORMAT_ID)
88830 values[n++] = primary_event_id(event);
88831@@ -6994,7 +7001,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
88832 event->parent = parent_event;
88833
88834 event->ns = get_pid_ns(task_active_pid_ns(current));
88835- event->id = atomic64_inc_return(&perf_event_id);
88836+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
88837
88838 event->state = PERF_EVENT_STATE_INACTIVE;
88839
88840@@ -7275,6 +7282,11 @@ SYSCALL_DEFINE5(perf_event_open,
88841 if (flags & ~PERF_FLAG_ALL)
88842 return -EINVAL;
88843
88844+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88845+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
88846+ return -EACCES;
88847+#endif
88848+
88849 err = perf_copy_attr(attr_uptr, &attr);
88850 if (err)
88851 return err;
88852@@ -7642,10 +7654,10 @@ static void sync_child_event(struct perf_event *child_event,
88853 /*
88854 * Add back the child's count to the parent's count:
88855 */
88856- atomic64_add(child_val, &parent_event->child_count);
88857- atomic64_add(child_event->total_time_enabled,
88858+ atomic64_add_unchecked(child_val, &parent_event->child_count);
88859+ atomic64_add_unchecked(child_event->total_time_enabled,
88860 &parent_event->child_total_time_enabled);
88861- atomic64_add(child_event->total_time_running,
88862+ atomic64_add_unchecked(child_event->total_time_running,
88863 &parent_event->child_total_time_running);
88864
88865 /*
88866diff --git a/kernel/events/internal.h b/kernel/events/internal.h
88867index 569b2187..19940d9 100644
88868--- a/kernel/events/internal.h
88869+++ b/kernel/events/internal.h
88870@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
88871 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
88872 }
88873
88874-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
88875+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
88876 static inline unsigned long \
88877 func_name(struct perf_output_handle *handle, \
88878- const void *buf, unsigned long len) \
88879+ const void user *buf, unsigned long len) \
88880 { \
88881 unsigned long size, written; \
88882 \
88883@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
88884 return 0;
88885 }
88886
88887-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
88888+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
88889
88890 static inline unsigned long
88891 memcpy_skip(void *dst, const void *src, unsigned long n)
88892@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
88893 return 0;
88894 }
88895
88896-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
88897+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
88898
88899 #ifndef arch_perf_out_copy_user
88900 #define arch_perf_out_copy_user arch_perf_out_copy_user
88901@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
88902 }
88903 #endif
88904
88905-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
88906+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
88907
88908 /* Callchain handling */
88909 extern struct perf_callchain_entry *
88910diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
88911index cb346f2..e4dc317 100644
88912--- a/kernel/events/uprobes.c
88913+++ b/kernel/events/uprobes.c
88914@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
88915 {
88916 struct page *page;
88917 uprobe_opcode_t opcode;
88918- int result;
88919+ long result;
88920
88921 pagefault_disable();
88922 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
88923diff --git a/kernel/exit.c b/kernel/exit.c
88924index 6806c55..a5fb128 100644
88925--- a/kernel/exit.c
88926+++ b/kernel/exit.c
88927@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
88928 struct task_struct *leader;
88929 int zap_leader;
88930 repeat:
88931+#ifdef CONFIG_NET
88932+ gr_del_task_from_ip_table(p);
88933+#endif
88934+
88935 /* don't need to get the RCU readlock here - the process is dead and
88936 * can't be modifying its own credentials. But shut RCU-lockdep up */
88937 rcu_read_lock();
88938@@ -655,6 +659,8 @@ void do_exit(long code)
88939 int group_dead;
88940 TASKS_RCU(int tasks_rcu_i);
88941
88942+ set_fs(USER_DS);
88943+
88944 profile_task_exit(tsk);
88945
88946 WARN_ON(blk_needs_flush_plug(tsk));
88947@@ -671,7 +677,6 @@ void do_exit(long code)
88948 * mm_release()->clear_child_tid() from writing to a user-controlled
88949 * kernel address.
88950 */
88951- set_fs(USER_DS);
88952
88953 ptrace_event(PTRACE_EVENT_EXIT, code);
88954
88955@@ -729,6 +734,9 @@ void do_exit(long code)
88956 tsk->exit_code = code;
88957 taskstats_exit(tsk, group_dead);
88958
88959+ gr_acl_handle_psacct(tsk, code);
88960+ gr_acl_handle_exit();
88961+
88962 exit_mm(tsk);
88963
88964 if (group_dead)
88965@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
88966 * Take down every thread in the group. This is called by fatal signals
88967 * as well as by sys_exit_group (below).
88968 */
88969-void
88970+__noreturn void
88971 do_group_exit(int exit_code)
88972 {
88973 struct signal_struct *sig = current->signal;
88974diff --git a/kernel/fork.c b/kernel/fork.c
88975index 4dc2dda..651add0 100644
88976--- a/kernel/fork.c
88977+++ b/kernel/fork.c
88978@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
88979 void thread_info_cache_init(void)
88980 {
88981 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
88982- THREAD_SIZE, 0, NULL);
88983+ THREAD_SIZE, SLAB_USERCOPY, NULL);
88984 BUG_ON(thread_info_cache == NULL);
88985 }
88986 # endif
88987 #endif
88988
88989+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88990+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88991+ int node, void **lowmem_stack)
88992+{
88993+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
88994+ void *ret = NULL;
88995+ unsigned int i;
88996+
88997+ *lowmem_stack = alloc_thread_info_node(tsk, node);
88998+ if (*lowmem_stack == NULL)
88999+ goto out;
89000+
89001+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89002+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89003+
89004+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89005+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89006+ if (ret == NULL) {
89007+ free_thread_info(*lowmem_stack);
89008+ *lowmem_stack = NULL;
89009+ }
89010+
89011+out:
89012+ return ret;
89013+}
89014+
89015+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89016+{
89017+ unmap_process_stacks(tsk);
89018+}
89019+#else
89020+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89021+ int node, void **lowmem_stack)
89022+{
89023+ return alloc_thread_info_node(tsk, node);
89024+}
89025+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89026+{
89027+ free_thread_info(ti);
89028+}
89029+#endif
89030+
89031 /* SLAB cache for signal_struct structures (tsk->signal) */
89032 static struct kmem_cache *signal_cachep;
89033
89034@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89035 /* SLAB cache for mm_struct structures (tsk->mm) */
89036 static struct kmem_cache *mm_cachep;
89037
89038-static void account_kernel_stack(struct thread_info *ti, int account)
89039+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89040 {
89041+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89042+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89043+#else
89044 struct zone *zone = page_zone(virt_to_page(ti));
89045+#endif
89046
89047 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89048 }
89049
89050 void free_task(struct task_struct *tsk)
89051 {
89052- account_kernel_stack(tsk->stack, -1);
89053+ account_kernel_stack(tsk, tsk->stack, -1);
89054 arch_release_thread_info(tsk->stack);
89055- free_thread_info(tsk->stack);
89056+ gr_free_thread_info(tsk, tsk->stack);
89057 rt_mutex_debug_task_free(tsk);
89058 ftrace_graph_exit_task(tsk);
89059 put_seccomp_filter(tsk);
89060@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89061 {
89062 struct task_struct *tsk;
89063 struct thread_info *ti;
89064+ void *lowmem_stack;
89065 int node = tsk_fork_get_node(orig);
89066 int err;
89067
89068@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89069 if (!tsk)
89070 return NULL;
89071
89072- ti = alloc_thread_info_node(tsk, node);
89073+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
89074 if (!ti)
89075 goto free_tsk;
89076
89077@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89078 goto free_ti;
89079
89080 tsk->stack = ti;
89081+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89082+ tsk->lowmem_stack = lowmem_stack;
89083+#endif
89084 #ifdef CONFIG_SECCOMP
89085 /*
89086 * We must handle setting up seccomp filters once we're under
89087@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89088 set_task_stack_end_magic(tsk);
89089
89090 #ifdef CONFIG_CC_STACKPROTECTOR
89091- tsk->stack_canary = get_random_int();
89092+ tsk->stack_canary = pax_get_random_long();
89093 #endif
89094
89095 /*
89096@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89097 tsk->splice_pipe = NULL;
89098 tsk->task_frag.page = NULL;
89099
89100- account_kernel_stack(ti, 1);
89101+ account_kernel_stack(tsk, ti, 1);
89102
89103 return tsk;
89104
89105 free_ti:
89106- free_thread_info(ti);
89107+ gr_free_thread_info(tsk, ti);
89108 free_tsk:
89109 free_task_struct(tsk);
89110 return NULL;
89111 }
89112
89113 #ifdef CONFIG_MMU
89114-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89115+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89116+{
89117+ struct vm_area_struct *tmp;
89118+ unsigned long charge;
89119+ struct file *file;
89120+ int retval;
89121+
89122+ charge = 0;
89123+ if (mpnt->vm_flags & VM_ACCOUNT) {
89124+ unsigned long len = vma_pages(mpnt);
89125+
89126+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89127+ goto fail_nomem;
89128+ charge = len;
89129+ }
89130+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89131+ if (!tmp)
89132+ goto fail_nomem;
89133+ *tmp = *mpnt;
89134+ tmp->vm_mm = mm;
89135+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89136+ retval = vma_dup_policy(mpnt, tmp);
89137+ if (retval)
89138+ goto fail_nomem_policy;
89139+ if (anon_vma_fork(tmp, mpnt))
89140+ goto fail_nomem_anon_vma_fork;
89141+ tmp->vm_flags &= ~VM_LOCKED;
89142+ tmp->vm_next = tmp->vm_prev = NULL;
89143+ tmp->vm_mirror = NULL;
89144+ file = tmp->vm_file;
89145+ if (file) {
89146+ struct inode *inode = file_inode(file);
89147+ struct address_space *mapping = file->f_mapping;
89148+
89149+ get_file(file);
89150+ if (tmp->vm_flags & VM_DENYWRITE)
89151+ atomic_dec(&inode->i_writecount);
89152+ i_mmap_lock_write(mapping);
89153+ if (tmp->vm_flags & VM_SHARED)
89154+ atomic_inc(&mapping->i_mmap_writable);
89155+ flush_dcache_mmap_lock(mapping);
89156+ /* insert tmp into the share list, just after mpnt */
89157+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89158+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89159+ else
89160+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89161+ flush_dcache_mmap_unlock(mapping);
89162+ i_mmap_unlock_write(mapping);
89163+ }
89164+
89165+ /*
89166+ * Clear hugetlb-related page reserves for children. This only
89167+ * affects MAP_PRIVATE mappings. Faults generated by the child
89168+ * are not guaranteed to succeed, even if read-only
89169+ */
89170+ if (is_vm_hugetlb_page(tmp))
89171+ reset_vma_resv_huge_pages(tmp);
89172+
89173+ return tmp;
89174+
89175+fail_nomem_anon_vma_fork:
89176+ mpol_put(vma_policy(tmp));
89177+fail_nomem_policy:
89178+ kmem_cache_free(vm_area_cachep, tmp);
89179+fail_nomem:
89180+ vm_unacct_memory(charge);
89181+ return NULL;
89182+}
89183+
89184+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89185 {
89186 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89187 struct rb_node **rb_link, *rb_parent;
89188 int retval;
89189- unsigned long charge;
89190
89191 uprobe_start_dup_mmap();
89192 down_write(&oldmm->mmap_sem);
89193@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89194
89195 prev = NULL;
89196 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89197- struct file *file;
89198-
89199 if (mpnt->vm_flags & VM_DONTCOPY) {
89200 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89201 -vma_pages(mpnt));
89202 continue;
89203 }
89204- charge = 0;
89205- if (mpnt->vm_flags & VM_ACCOUNT) {
89206- unsigned long len = vma_pages(mpnt);
89207-
89208- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89209- goto fail_nomem;
89210- charge = len;
89211- }
89212- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89213- if (!tmp)
89214- goto fail_nomem;
89215- *tmp = *mpnt;
89216- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89217- retval = vma_dup_policy(mpnt, tmp);
89218- if (retval)
89219- goto fail_nomem_policy;
89220- tmp->vm_mm = mm;
89221- if (anon_vma_fork(tmp, mpnt))
89222- goto fail_nomem_anon_vma_fork;
89223- tmp->vm_flags &= ~VM_LOCKED;
89224- tmp->vm_next = tmp->vm_prev = NULL;
89225- file = tmp->vm_file;
89226- if (file) {
89227- struct inode *inode = file_inode(file);
89228- struct address_space *mapping = file->f_mapping;
89229-
89230- get_file(file);
89231- if (tmp->vm_flags & VM_DENYWRITE)
89232- atomic_dec(&inode->i_writecount);
89233- i_mmap_lock_write(mapping);
89234- if (tmp->vm_flags & VM_SHARED)
89235- atomic_inc(&mapping->i_mmap_writable);
89236- flush_dcache_mmap_lock(mapping);
89237- /* insert tmp into the share list, just after mpnt */
89238- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89239- vma_nonlinear_insert(tmp,
89240- &mapping->i_mmap_nonlinear);
89241- else
89242- vma_interval_tree_insert_after(tmp, mpnt,
89243- &mapping->i_mmap);
89244- flush_dcache_mmap_unlock(mapping);
89245- i_mmap_unlock_write(mapping);
89246+ tmp = dup_vma(mm, oldmm, mpnt);
89247+ if (!tmp) {
89248+ retval = -ENOMEM;
89249+ goto out;
89250 }
89251
89252 /*
89253@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89254 if (retval)
89255 goto out;
89256 }
89257+
89258+#ifdef CONFIG_PAX_SEGMEXEC
89259+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89260+ struct vm_area_struct *mpnt_m;
89261+
89262+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89263+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89264+
89265+ if (!mpnt->vm_mirror)
89266+ continue;
89267+
89268+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89269+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89270+ mpnt->vm_mirror = mpnt_m;
89271+ } else {
89272+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89273+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89274+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89275+ mpnt->vm_mirror->vm_mirror = mpnt;
89276+ }
89277+ }
89278+ BUG_ON(mpnt_m);
89279+ }
89280+#endif
89281+
89282 /* a new mm has just been created */
89283 arch_dup_mmap(oldmm, mm);
89284 retval = 0;
89285@@ -486,14 +589,6 @@ out:
89286 up_write(&oldmm->mmap_sem);
89287 uprobe_end_dup_mmap();
89288 return retval;
89289-fail_nomem_anon_vma_fork:
89290- mpol_put(vma_policy(tmp));
89291-fail_nomem_policy:
89292- kmem_cache_free(vm_area_cachep, tmp);
89293-fail_nomem:
89294- retval = -ENOMEM;
89295- vm_unacct_memory(charge);
89296- goto out;
89297 }
89298
89299 static inline int mm_alloc_pgd(struct mm_struct *mm)
89300@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89301 return ERR_PTR(err);
89302
89303 mm = get_task_mm(task);
89304- if (mm && mm != current->mm &&
89305- !ptrace_may_access(task, mode)) {
89306+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89307+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89308 mmput(mm);
89309 mm = ERR_PTR(-EACCES);
89310 }
89311@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89312 spin_unlock(&fs->lock);
89313 return -EAGAIN;
89314 }
89315- fs->users++;
89316+ atomic_inc(&fs->users);
89317 spin_unlock(&fs->lock);
89318 return 0;
89319 }
89320 tsk->fs = copy_fs_struct(fs);
89321 if (!tsk->fs)
89322 return -ENOMEM;
89323+ /* Carry through gr_chroot_dentry and is_chrooted instead
89324+ of recomputing it here. Already copied when the task struct
89325+ is duplicated. This allows pivot_root to not be treated as
89326+ a chroot
89327+ */
89328+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89329+
89330 return 0;
89331 }
89332
89333@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89334 * parts of the process environment (as per the clone
89335 * flags). The actual kick-off is left to the caller.
89336 */
89337-static struct task_struct *copy_process(unsigned long clone_flags,
89338+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89339 unsigned long stack_start,
89340 unsigned long stack_size,
89341 int __user *child_tidptr,
89342@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89343 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89344 #endif
89345 retval = -EAGAIN;
89346+
89347+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89348+
89349 if (atomic_read(&p->real_cred->user->processes) >=
89350 task_rlimit(p, RLIMIT_NPROC)) {
89351 if (p->real_cred->user != INIT_USER &&
89352@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89353 goto bad_fork_free_pid;
89354 }
89355
89356+ /* synchronizes with gr_set_acls()
89357+ we need to call this past the point of no return for fork()
89358+ */
89359+ gr_copy_label(p);
89360+
89361 if (likely(p->pid)) {
89362 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89363
89364@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
89365 bad_fork_free:
89366 free_task(p);
89367 fork_out:
89368+ gr_log_forkfail(retval);
89369+
89370 return ERR_PTR(retval);
89371 }
89372
89373@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
89374
89375 p = copy_process(clone_flags, stack_start, stack_size,
89376 child_tidptr, NULL, trace);
89377+ add_latent_entropy();
89378 /*
89379 * Do this prior waking up the new thread - the thread pointer
89380 * might get invalid after that point, if the thread exits quickly.
89381@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
89382 if (clone_flags & CLONE_PARENT_SETTID)
89383 put_user(nr, parent_tidptr);
89384
89385+ gr_handle_brute_check();
89386+
89387 if (clone_flags & CLONE_VFORK) {
89388 p->vfork_done = &vfork;
89389 init_completion(&vfork);
89390@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
89391 mm_cachep = kmem_cache_create("mm_struct",
89392 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89393 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89394- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89395+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89396 mmap_init();
89397 nsproxy_cache_init();
89398 }
89399@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89400 return 0;
89401
89402 /* don't need lock here; in the worst case we'll do useless copy */
89403- if (fs->users == 1)
89404+ if (atomic_read(&fs->users) == 1)
89405 return 0;
89406
89407 *new_fsp = copy_fs_struct(fs);
89408@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89409 fs = current->fs;
89410 spin_lock(&fs->lock);
89411 current->fs = new_fs;
89412- if (--fs->users)
89413+ gr_set_chroot_entries(current, &current->fs->root);
89414+ if (atomic_dec_return(&fs->users))
89415 new_fs = NULL;
89416 else
89417 new_fs = fs;
89418diff --git a/kernel/futex.c b/kernel/futex.c
89419index 63678b5..512f9af 100644
89420--- a/kernel/futex.c
89421+++ b/kernel/futex.c
89422@@ -201,7 +201,7 @@ struct futex_pi_state {
89423 atomic_t refcount;
89424
89425 union futex_key key;
89426-};
89427+} __randomize_layout;
89428
89429 /**
89430 * struct futex_q - The hashed futex queue entry, one per waiting task
89431@@ -235,7 +235,7 @@ struct futex_q {
89432 struct rt_mutex_waiter *rt_waiter;
89433 union futex_key *requeue_pi_key;
89434 u32 bitset;
89435-};
89436+} __randomize_layout;
89437
89438 static const struct futex_q futex_q_init = {
89439 /* list gets initialized in queue_me()*/
89440@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89441 struct page *page, *page_head;
89442 int err, ro = 0;
89443
89444+#ifdef CONFIG_PAX_SEGMEXEC
89445+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89446+ return -EFAULT;
89447+#endif
89448+
89449 /*
89450 * The futex address must be "naturally" aligned.
89451 */
89452@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89453
89454 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89455 {
89456- int ret;
89457+ unsigned long ret;
89458
89459 pagefault_disable();
89460 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89461@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
89462 {
89463 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89464 u32 curval;
89465+ mm_segment_t oldfs;
89466
89467 /*
89468 * This will fail and we want it. Some arch implementations do
89469@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
89470 * implementation, the non-functional ones will return
89471 * -ENOSYS.
89472 */
89473+ oldfs = get_fs();
89474+ set_fs(USER_DS);
89475 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89476 futex_cmpxchg_enabled = 1;
89477+ set_fs(oldfs);
89478 #endif
89479 }
89480
89481diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89482index 55c8c93..9ba7ad6 100644
89483--- a/kernel/futex_compat.c
89484+++ b/kernel/futex_compat.c
89485@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89486 return 0;
89487 }
89488
89489-static void __user *futex_uaddr(struct robust_list __user *entry,
89490+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89491 compat_long_t futex_offset)
89492 {
89493 compat_uptr_t base = ptr_to_compat(entry);
89494diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89495index b358a80..fc25240 100644
89496--- a/kernel/gcov/base.c
89497+++ b/kernel/gcov/base.c
89498@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89499 }
89500
89501 #ifdef CONFIG_MODULES
89502-static inline int within(void *addr, void *start, unsigned long size)
89503-{
89504- return ((addr >= start) && (addr < start + size));
89505-}
89506-
89507 /* Update list and generate events when modules are unloaded. */
89508 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89509 void *data)
89510@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89511
89512 /* Remove entries located in module from linked list. */
89513 while ((info = gcov_info_next(info))) {
89514- if (within(info, mod->module_core, mod->core_size)) {
89515+ if (within_module_core_rw((unsigned long)info, mod)) {
89516 gcov_info_unlink(prev, info);
89517 if (gcov_events_enabled)
89518 gcov_event(GCOV_REMOVE, info);
89519diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
89520index 8069237..fe712d0 100644
89521--- a/kernel/irq/manage.c
89522+++ b/kernel/irq/manage.c
89523@@ -871,7 +871,7 @@ static int irq_thread(void *data)
89524
89525 action_ret = handler_fn(desc, action);
89526 if (action_ret == IRQ_HANDLED)
89527- atomic_inc(&desc->threads_handled);
89528+ atomic_inc_unchecked(&desc->threads_handled);
89529
89530 wake_threads_waitq(desc);
89531 }
89532diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
89533index e2514b0..de3dfe0 100644
89534--- a/kernel/irq/spurious.c
89535+++ b/kernel/irq/spurious.c
89536@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
89537 * count. We just care about the count being
89538 * different than the one we saw before.
89539 */
89540- handled = atomic_read(&desc->threads_handled);
89541+ handled = atomic_read_unchecked(&desc->threads_handled);
89542 handled |= SPURIOUS_DEFERRED;
89543 if (handled != desc->threads_handled_last) {
89544 action_ret = IRQ_HANDLED;
89545diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89546index 9019f15..9a3c42e 100644
89547--- a/kernel/jump_label.c
89548+++ b/kernel/jump_label.c
89549@@ -14,6 +14,7 @@
89550 #include <linux/err.h>
89551 #include <linux/static_key.h>
89552 #include <linux/jump_label_ratelimit.h>
89553+#include <linux/mm.h>
89554
89555 #ifdef HAVE_JUMP_LABEL
89556
89557@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89558
89559 size = (((unsigned long)stop - (unsigned long)start)
89560 / sizeof(struct jump_entry));
89561+ pax_open_kernel();
89562 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89563+ pax_close_kernel();
89564 }
89565
89566 static void jump_label_update(struct static_key *key, int enable);
89567@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89568 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89569 struct jump_entry *iter;
89570
89571+ pax_open_kernel();
89572 for (iter = iter_start; iter < iter_stop; iter++) {
89573 if (within_module_init(iter->code, mod))
89574 iter->code = 0;
89575 }
89576+ pax_close_kernel();
89577 }
89578
89579 static int
89580diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89581index 5c5987f..bc502b0 100644
89582--- a/kernel/kallsyms.c
89583+++ b/kernel/kallsyms.c
89584@@ -11,6 +11,9 @@
89585 * Changed the compression method from stem compression to "table lookup"
89586 * compression (see scripts/kallsyms.c for a more complete description)
89587 */
89588+#ifdef CONFIG_GRKERNSEC_HIDESYM
89589+#define __INCLUDED_BY_HIDESYM 1
89590+#endif
89591 #include <linux/kallsyms.h>
89592 #include <linux/module.h>
89593 #include <linux/init.h>
89594@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89595
89596 static inline int is_kernel_inittext(unsigned long addr)
89597 {
89598+ if (system_state != SYSTEM_BOOTING)
89599+ return 0;
89600+
89601 if (addr >= (unsigned long)_sinittext
89602 && addr <= (unsigned long)_einittext)
89603 return 1;
89604 return 0;
89605 }
89606
89607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89608+#ifdef CONFIG_MODULES
89609+static inline int is_module_text(unsigned long addr)
89610+{
89611+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89612+ return 1;
89613+
89614+ addr = ktla_ktva(addr);
89615+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89616+}
89617+#else
89618+static inline int is_module_text(unsigned long addr)
89619+{
89620+ return 0;
89621+}
89622+#endif
89623+#endif
89624+
89625 static inline int is_kernel_text(unsigned long addr)
89626 {
89627 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89628@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89629
89630 static inline int is_kernel(unsigned long addr)
89631 {
89632+
89633+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89634+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89635+ return 1;
89636+
89637+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89638+#else
89639 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89640+#endif
89641+
89642 return 1;
89643 return in_gate_area_no_mm(addr);
89644 }
89645
89646 static int is_ksym_addr(unsigned long addr)
89647 {
89648+
89649+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89650+ if (is_module_text(addr))
89651+ return 0;
89652+#endif
89653+
89654 if (all_var)
89655 return is_kernel(addr);
89656
89657@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89658
89659 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89660 {
89661- iter->name[0] = '\0';
89662 iter->nameoff = get_symbol_offset(new_pos);
89663 iter->pos = new_pos;
89664 }
89665@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89666 {
89667 struct kallsym_iter *iter = m->private;
89668
89669+#ifdef CONFIG_GRKERNSEC_HIDESYM
89670+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89671+ return 0;
89672+#endif
89673+
89674 /* Some debugging symbols have no name. Ignore them. */
89675 if (!iter->name[0])
89676 return 0;
89677@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89678 */
89679 type = iter->exported ? toupper(iter->type) :
89680 tolower(iter->type);
89681+
89682 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89683 type, iter->name, iter->module_name);
89684 } else
89685diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89686index 0aa69ea..a7fcafb 100644
89687--- a/kernel/kcmp.c
89688+++ b/kernel/kcmp.c
89689@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89690 struct task_struct *task1, *task2;
89691 int ret;
89692
89693+#ifdef CONFIG_GRKERNSEC
89694+ return -ENOSYS;
89695+#endif
89696+
89697 rcu_read_lock();
89698
89699 /*
89700diff --git a/kernel/kexec.c b/kernel/kexec.c
89701index 9a8a01a..3c35dd6 100644
89702--- a/kernel/kexec.c
89703+++ b/kernel/kexec.c
89704@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89705 compat_ulong_t, flags)
89706 {
89707 struct compat_kexec_segment in;
89708- struct kexec_segment out, __user *ksegments;
89709+ struct kexec_segment out;
89710+ struct kexec_segment __user *ksegments;
89711 unsigned long i, result;
89712
89713 /* Don't allow clients that don't understand the native
89714diff --git a/kernel/kmod.c b/kernel/kmod.c
89715index 2777f40..6cf5e70 100644
89716--- a/kernel/kmod.c
89717+++ b/kernel/kmod.c
89718@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89719 kfree(info->argv);
89720 }
89721
89722-static int call_modprobe(char *module_name, int wait)
89723+static int call_modprobe(char *module_name, char *module_param, int wait)
89724 {
89725 struct subprocess_info *info;
89726 static char *envp[] = {
89727@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
89728 NULL
89729 };
89730
89731- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89732+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89733 if (!argv)
89734 goto out;
89735
89736@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
89737 argv[1] = "-q";
89738 argv[2] = "--";
89739 argv[3] = module_name; /* check free_modprobe_argv() */
89740- argv[4] = NULL;
89741+ argv[4] = module_param;
89742+ argv[5] = NULL;
89743
89744 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89745 NULL, free_modprobe_argv, NULL);
89746@@ -122,9 +123,8 @@ out:
89747 * If module auto-loading support is disabled then this function
89748 * becomes a no-operation.
89749 */
89750-int __request_module(bool wait, const char *fmt, ...)
89751+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89752 {
89753- va_list args;
89754 char module_name[MODULE_NAME_LEN];
89755 unsigned int max_modprobes;
89756 int ret;
89757@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
89758 if (!modprobe_path[0])
89759 return 0;
89760
89761- va_start(args, fmt);
89762- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89763- va_end(args);
89764+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89765 if (ret >= MODULE_NAME_LEN)
89766 return -ENAMETOOLONG;
89767
89768@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
89769 if (ret)
89770 return ret;
89771
89772+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89773+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89774+ /* hack to workaround consolekit/udisks stupidity */
89775+ read_lock(&tasklist_lock);
89776+ if (!strcmp(current->comm, "mount") &&
89777+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89778+ read_unlock(&tasklist_lock);
89779+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89780+ return -EPERM;
89781+ }
89782+ read_unlock(&tasklist_lock);
89783+ }
89784+#endif
89785+
89786 /* If modprobe needs a service that is in a module, we get a recursive
89787 * loop. Limit the number of running kmod threads to max_threads/2 or
89788 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89789@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
89790
89791 trace_module_request(module_name, wait, _RET_IP_);
89792
89793- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89794+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89795
89796 atomic_dec(&kmod_concurrent);
89797 return ret;
89798 }
89799+
89800+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89801+{
89802+ va_list args;
89803+ int ret;
89804+
89805+ va_start(args, fmt);
89806+ ret = ____request_module(wait, module_param, fmt, args);
89807+ va_end(args);
89808+
89809+ return ret;
89810+}
89811+
89812+int __request_module(bool wait, const char *fmt, ...)
89813+{
89814+ va_list args;
89815+ int ret;
89816+
89817+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89818+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89819+ char module_param[MODULE_NAME_LEN];
89820+
89821+ memset(module_param, 0, sizeof(module_param));
89822+
89823+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89824+
89825+ va_start(args, fmt);
89826+ ret = ____request_module(wait, module_param, fmt, args);
89827+ va_end(args);
89828+
89829+ return ret;
89830+ }
89831+#endif
89832+
89833+ va_start(args, fmt);
89834+ ret = ____request_module(wait, NULL, fmt, args);
89835+ va_end(args);
89836+
89837+ return ret;
89838+}
89839+
89840 EXPORT_SYMBOL(__request_module);
89841 #endif /* CONFIG_MODULES */
89842
89843 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
89844 {
89845+#ifdef CONFIG_GRKERNSEC
89846+ kfree(info->path);
89847+ info->path = info->origpath;
89848+#endif
89849 if (info->cleanup)
89850 (*info->cleanup)(info);
89851 kfree(info);
89852@@ -232,6 +289,20 @@ static int ____call_usermodehelper(void *data)
89853 */
89854 set_user_nice(current, 0);
89855
89856+#ifdef CONFIG_GRKERNSEC
89857+ /* this is race-free as far as userland is concerned as we copied
89858+ out the path to be used prior to this point and are now operating
89859+ on that copy
89860+ */
89861+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
89862+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
89863+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
89864+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
89865+ retval = -EPERM;
89866+ goto out;
89867+ }
89868+#endif
89869+
89870 retval = -ENOMEM;
89871 new = prepare_kernel_cred(current);
89872 if (!new)
89873@@ -254,8 +325,8 @@ static int ____call_usermodehelper(void *data)
89874 commit_creds(new);
89875
89876 retval = do_execve(getname_kernel(sub_info->path),
89877- (const char __user *const __user *)sub_info->argv,
89878- (const char __user *const __user *)sub_info->envp);
89879+ (const char __user *const __force_user *)sub_info->argv,
89880+ (const char __user *const __force_user *)sub_info->envp);
89881 out:
89882 sub_info->retval = retval;
89883 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
89884@@ -288,7 +359,7 @@ static int wait_for_helper(void *data)
89885 *
89886 * Thus the __user pointer cast is valid here.
89887 */
89888- sys_wait4(pid, (int __user *)&ret, 0, NULL);
89889+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
89890
89891 /*
89892 * If ret is 0, either ____call_usermodehelper failed and the
89893@@ -510,7 +581,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
89894 goto out;
89895
89896 INIT_WORK(&sub_info->work, __call_usermodehelper);
89897+#ifdef CONFIG_GRKERNSEC
89898+ sub_info->origpath = path;
89899+ sub_info->path = kstrdup(path, gfp_mask);
89900+#else
89901 sub_info->path = path;
89902+#endif
89903 sub_info->argv = argv;
89904 sub_info->envp = envp;
89905
89906@@ -612,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper);
89907 static int proc_cap_handler(struct ctl_table *table, int write,
89908 void __user *buffer, size_t *lenp, loff_t *ppos)
89909 {
89910- struct ctl_table t;
89911+ ctl_table_no_const t;
89912 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
89913 kernel_cap_t new_cap;
89914 int err, i;
89915diff --git a/kernel/kprobes.c b/kernel/kprobes.c
89916index ee61992..62142b1 100644
89917--- a/kernel/kprobes.c
89918+++ b/kernel/kprobes.c
89919@@ -31,6 +31,9 @@
89920 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
89921 * <prasanna@in.ibm.com> added function-return probes.
89922 */
89923+#ifdef CONFIG_GRKERNSEC_HIDESYM
89924+#define __INCLUDED_BY_HIDESYM 1
89925+#endif
89926 #include <linux/kprobes.h>
89927 #include <linux/hash.h>
89928 #include <linux/init.h>
89929@@ -122,12 +125,12 @@ enum kprobe_slot_state {
89930
89931 static void *alloc_insn_page(void)
89932 {
89933- return module_alloc(PAGE_SIZE);
89934+ return module_alloc_exec(PAGE_SIZE);
89935 }
89936
89937 static void free_insn_page(void *page)
89938 {
89939- module_memfree(page);
89940+ module_memfree_exec(page);
89941 }
89942
89943 struct kprobe_insn_cache kprobe_insn_slots = {
89944@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
89945 kprobe_type = "k";
89946
89947 if (sym)
89948- seq_printf(pi, "%p %s %s+0x%x %s ",
89949+ seq_printf(pi, "%pK %s %s+0x%x %s ",
89950 p->addr, kprobe_type, sym, offset,
89951 (modname ? modname : " "));
89952 else
89953- seq_printf(pi, "%p %s %p ",
89954+ seq_printf(pi, "%pK %s %pK ",
89955 p->addr, kprobe_type, p->addr);
89956
89957 if (!pp)
89958diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
89959index 6683cce..daf8999 100644
89960--- a/kernel/ksysfs.c
89961+++ b/kernel/ksysfs.c
89962@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
89963 {
89964 if (count+1 > UEVENT_HELPER_PATH_LEN)
89965 return -ENOENT;
89966+ if (!capable(CAP_SYS_ADMIN))
89967+ return -EPERM;
89968 memcpy(uevent_helper, buf, count);
89969 uevent_helper[count] = '\0';
89970 if (count && uevent_helper[count-1] == '\n')
89971@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
89972 return count;
89973 }
89974
89975-static struct bin_attribute notes_attr = {
89976+static bin_attribute_no_const notes_attr __read_only = {
89977 .attr = {
89978 .name = "notes",
89979 .mode = S_IRUGO,
89980diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
89981index 88d0d44..e9ce0ee 100644
89982--- a/kernel/locking/lockdep.c
89983+++ b/kernel/locking/lockdep.c
89984@@ -599,6 +599,10 @@ static int static_obj(void *obj)
89985 end = (unsigned long) &_end,
89986 addr = (unsigned long) obj;
89987
89988+#ifdef CONFIG_PAX_KERNEXEC
89989+ start = ktla_ktva(start);
89990+#endif
89991+
89992 /*
89993 * static variable?
89994 */
89995@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
89996 if (!static_obj(lock->key)) {
89997 debug_locks_off();
89998 printk("INFO: trying to register non-static key.\n");
89999+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90000 printk("the code is fine but needs lockdep annotation.\n");
90001 printk("turning off the locking correctness validator.\n");
90002 dump_stack();
90003@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90004 if (!class)
90005 return 0;
90006 }
90007- atomic_inc((atomic_t *)&class->ops);
90008+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90009 if (very_verbose(class)) {
90010 printk("\nacquire class [%p] %s", class->key, class->name);
90011 if (class->name_version > 1)
90012diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90013index ef43ac4..2720dfa 100644
90014--- a/kernel/locking/lockdep_proc.c
90015+++ b/kernel/locking/lockdep_proc.c
90016@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
90017 return 0;
90018 }
90019
90020- seq_printf(m, "%p", class->key);
90021+ seq_printf(m, "%pK", class->key);
90022 #ifdef CONFIG_DEBUG_LOCKDEP
90023 seq_printf(m, " OPS:%8ld", class->ops);
90024 #endif
90025@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
90026
90027 list_for_each_entry(entry, &class->locks_after, entry) {
90028 if (entry->distance == 1) {
90029- seq_printf(m, " -> [%p] ", entry->class->key);
90030+ seq_printf(m, " -> [%pK] ", entry->class->key);
90031 print_name(m, entry->class);
90032 seq_puts(m, "\n");
90033 }
90034@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
90035 if (!class->key)
90036 continue;
90037
90038- seq_printf(m, "[%p] ", class->key);
90039+ seq_printf(m, "[%pK] ", class->key);
90040 print_name(m, class);
90041 seq_puts(m, "\n");
90042 }
90043@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90044 if (!i)
90045 seq_line(m, '-', 40-namelen, namelen);
90046
90047- snprintf(ip, sizeof(ip), "[<%p>]",
90048+ snprintf(ip, sizeof(ip), "[<%pK>]",
90049 (void *)class->contention_point[i]);
90050 seq_printf(m, "%40s %14lu %29s %pS\n",
90051 name, stats->contention_point[i],
90052@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90053 if (!i)
90054 seq_line(m, '-', 40-namelen, namelen);
90055
90056- snprintf(ip, sizeof(ip), "[<%p>]",
90057+ snprintf(ip, sizeof(ip), "[<%pK>]",
90058 (void *)class->contending_point[i]);
90059 seq_printf(m, "%40s %14lu %29s %pS\n",
90060 name, stats->contending_point[i],
90061diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
90062index 9887a90..0cd2b1d 100644
90063--- a/kernel/locking/mcs_spinlock.c
90064+++ b/kernel/locking/mcs_spinlock.c
90065@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
90066
90067 prev = decode_cpu(old);
90068 node->prev = prev;
90069- ACCESS_ONCE(prev->next) = node;
90070+ ACCESS_ONCE_RW(prev->next) = node;
90071
90072 /*
90073 * Normally @prev is untouchable after the above store; because at that
90074@@ -172,8 +172,8 @@ unqueue:
90075 * it will wait in Step-A.
90076 */
90077
90078- ACCESS_ONCE(next->prev) = prev;
90079- ACCESS_ONCE(prev->next) = next;
90080+ ACCESS_ONCE_RW(next->prev) = prev;
90081+ ACCESS_ONCE_RW(prev->next) = next;
90082
90083 return false;
90084 }
90085@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90086 node = this_cpu_ptr(&osq_node);
90087 next = xchg(&node->next, NULL);
90088 if (next) {
90089- ACCESS_ONCE(next->locked) = 1;
90090+ ACCESS_ONCE_RW(next->locked) = 1;
90091 return;
90092 }
90093
90094 next = osq_wait_next(lock, node, NULL);
90095 if (next)
90096- ACCESS_ONCE(next->locked) = 1;
90097+ ACCESS_ONCE_RW(next->locked) = 1;
90098 }
90099
90100 #endif
90101diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90102index 4d60986..5d351c1 100644
90103--- a/kernel/locking/mcs_spinlock.h
90104+++ b/kernel/locking/mcs_spinlock.h
90105@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90106 */
90107 return;
90108 }
90109- ACCESS_ONCE(prev->next) = node;
90110+ ACCESS_ONCE_RW(prev->next) = node;
90111
90112 /* Wait until the lock holder passes the lock down. */
90113 arch_mcs_spin_lock_contended(&node->locked);
90114diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90115index 3ef3736..9c951fa 100644
90116--- a/kernel/locking/mutex-debug.c
90117+++ b/kernel/locking/mutex-debug.c
90118@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90119 }
90120
90121 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90122- struct thread_info *ti)
90123+ struct task_struct *task)
90124 {
90125 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90126
90127 /* Mark the current thread as blocked on the lock: */
90128- ti->task->blocked_on = waiter;
90129+ task->blocked_on = waiter;
90130 }
90131
90132 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90133- struct thread_info *ti)
90134+ struct task_struct *task)
90135 {
90136 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90137- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90138- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90139- ti->task->blocked_on = NULL;
90140+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90141+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90142+ task->blocked_on = NULL;
90143
90144 list_del_init(&waiter->list);
90145 waiter->task = NULL;
90146diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90147index 0799fd3..d06ae3b 100644
90148--- a/kernel/locking/mutex-debug.h
90149+++ b/kernel/locking/mutex-debug.h
90150@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90151 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90152 extern void debug_mutex_add_waiter(struct mutex *lock,
90153 struct mutex_waiter *waiter,
90154- struct thread_info *ti);
90155+ struct task_struct *task);
90156 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90157- struct thread_info *ti);
90158+ struct task_struct *task);
90159 extern void debug_mutex_unlock(struct mutex *lock);
90160 extern void debug_mutex_init(struct mutex *lock, const char *name,
90161 struct lock_class_key *key);
90162diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90163index 4541951..39fe90a 100644
90164--- a/kernel/locking/mutex.c
90165+++ b/kernel/locking/mutex.c
90166@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90167 goto skip_wait;
90168
90169 debug_mutex_lock_common(lock, &waiter);
90170- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90171+ debug_mutex_add_waiter(lock, &waiter, task);
90172
90173 /* add waiting tasks to the end of the waitqueue (FIFO): */
90174 list_add_tail(&waiter.list, &lock->wait_list);
90175@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90176 schedule_preempt_disabled();
90177 spin_lock_mutex(&lock->wait_lock, flags);
90178 }
90179- mutex_remove_waiter(lock, &waiter, current_thread_info());
90180+ mutex_remove_waiter(lock, &waiter, task);
90181 /* set it to 0 if there are no waiters left: */
90182 if (likely(list_empty(&lock->wait_list)))
90183 atomic_set(&lock->count, 0);
90184@@ -606,7 +606,7 @@ skip_wait:
90185 return 0;
90186
90187 err:
90188- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90189+ mutex_remove_waiter(lock, &waiter, task);
90190 spin_unlock_mutex(&lock->wait_lock, flags);
90191 debug_mutex_free_waiter(&waiter);
90192 mutex_release(&lock->dep_map, 1, ip);
90193diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90194index 1d96dd0..994ff19 100644
90195--- a/kernel/locking/rtmutex-tester.c
90196+++ b/kernel/locking/rtmutex-tester.c
90197@@ -22,7 +22,7 @@
90198 #define MAX_RT_TEST_MUTEXES 8
90199
90200 static spinlock_t rttest_lock;
90201-static atomic_t rttest_event;
90202+static atomic_unchecked_t rttest_event;
90203
90204 struct test_thread_data {
90205 int opcode;
90206@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90207
90208 case RTTEST_LOCKCONT:
90209 td->mutexes[td->opdata] = 1;
90210- td->event = atomic_add_return(1, &rttest_event);
90211+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90212 return 0;
90213
90214 case RTTEST_RESET:
90215@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90216 return 0;
90217
90218 case RTTEST_RESETEVENT:
90219- atomic_set(&rttest_event, 0);
90220+ atomic_set_unchecked(&rttest_event, 0);
90221 return 0;
90222
90223 default:
90224@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90225 return ret;
90226
90227 td->mutexes[id] = 1;
90228- td->event = atomic_add_return(1, &rttest_event);
90229+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90230 rt_mutex_lock(&mutexes[id]);
90231- td->event = atomic_add_return(1, &rttest_event);
90232+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90233 td->mutexes[id] = 4;
90234 return 0;
90235
90236@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90237 return ret;
90238
90239 td->mutexes[id] = 1;
90240- td->event = atomic_add_return(1, &rttest_event);
90241+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90242 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90243- td->event = atomic_add_return(1, &rttest_event);
90244+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90245 td->mutexes[id] = ret ? 0 : 4;
90246 return ret ? -EINTR : 0;
90247
90248@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90249 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90250 return ret;
90251
90252- td->event = atomic_add_return(1, &rttest_event);
90253+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90254 rt_mutex_unlock(&mutexes[id]);
90255- td->event = atomic_add_return(1, &rttest_event);
90256+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90257 td->mutexes[id] = 0;
90258 return 0;
90259
90260@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90261 break;
90262
90263 td->mutexes[dat] = 2;
90264- td->event = atomic_add_return(1, &rttest_event);
90265+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90266 break;
90267
90268 default:
90269@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90270 return;
90271
90272 td->mutexes[dat] = 3;
90273- td->event = atomic_add_return(1, &rttest_event);
90274+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90275 break;
90276
90277 case RTTEST_LOCKNOWAIT:
90278@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90279 return;
90280
90281 td->mutexes[dat] = 1;
90282- td->event = atomic_add_return(1, &rttest_event);
90283+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90284 return;
90285
90286 default:
90287diff --git a/kernel/module.c b/kernel/module.c
90288index d856e96..b82225c 100644
90289--- a/kernel/module.c
90290+++ b/kernel/module.c
90291@@ -59,6 +59,7 @@
90292 #include <linux/jump_label.h>
90293 #include <linux/pfn.h>
90294 #include <linux/bsearch.h>
90295+#include <linux/grsecurity.h>
90296 #include <uapi/linux/module.h>
90297 #include "module-internal.h"
90298
90299@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90300
90301 /* Bounds of module allocation, for speeding __module_address.
90302 * Protected by module_mutex. */
90303-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90304+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90305+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90306
90307 int register_module_notifier(struct notifier_block *nb)
90308 {
90309@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90310 return true;
90311
90312 list_for_each_entry_rcu(mod, &modules, list) {
90313- struct symsearch arr[] = {
90314+ struct symsearch modarr[] = {
90315 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90316 NOT_GPL_ONLY, false },
90317 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90318@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90319 if (mod->state == MODULE_STATE_UNFORMED)
90320 continue;
90321
90322- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90323+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90324 return true;
90325 }
90326 return false;
90327@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90328 if (!pcpusec->sh_size)
90329 return 0;
90330
90331- if (align > PAGE_SIZE) {
90332+ if (align-1 >= PAGE_SIZE) {
90333 pr_warn("%s: per-cpu alignment %li > %li\n",
90334 mod->name, align, PAGE_SIZE);
90335 align = PAGE_SIZE;
90336@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90337 static ssize_t show_coresize(struct module_attribute *mattr,
90338 struct module_kobject *mk, char *buffer)
90339 {
90340- return sprintf(buffer, "%u\n", mk->mod->core_size);
90341+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90342 }
90343
90344 static struct module_attribute modinfo_coresize =
90345@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90346 static ssize_t show_initsize(struct module_attribute *mattr,
90347 struct module_kobject *mk, char *buffer)
90348 {
90349- return sprintf(buffer, "%u\n", mk->mod->init_size);
90350+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90351 }
90352
90353 static struct module_attribute modinfo_initsize =
90354@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90355 goto bad_version;
90356 }
90357
90358+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90359+ /*
90360+ * avoid potentially printing jibberish on attempted load
90361+ * of a module randomized with a different seed
90362+ */
90363+ pr_warn("no symbol version for %s\n", symname);
90364+#else
90365 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90366+#endif
90367 return 0;
90368
90369 bad_version:
90370+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90371+ /*
90372+ * avoid potentially printing jibberish on attempted load
90373+ * of a module randomized with a different seed
90374+ */
90375+ pr_warn("attempted module disagrees about version of symbol %s\n",
90376+ symname);
90377+#else
90378 pr_warn("%s: disagrees about version of symbol %s\n",
90379 mod->name, symname);
90380+#endif
90381 return 0;
90382 }
90383
90384@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
90385 */
90386 #ifdef CONFIG_SYSFS
90387
90388-#ifdef CONFIG_KALLSYMS
90389+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90390 static inline bool sect_empty(const Elf_Shdr *sect)
90391 {
90392 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90393@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90394 {
90395 unsigned int notes, loaded, i;
90396 struct module_notes_attrs *notes_attrs;
90397- struct bin_attribute *nattr;
90398+ bin_attribute_no_const *nattr;
90399
90400 /* failed to create section attributes, so can't create notes */
90401 if (!mod->sect_attrs)
90402@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
90403 static int module_add_modinfo_attrs(struct module *mod)
90404 {
90405 struct module_attribute *attr;
90406- struct module_attribute *temp_attr;
90407+ module_attribute_no_const *temp_attr;
90408 int error = 0;
90409 int i;
90410
90411@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
90412
90413 static void unset_module_core_ro_nx(struct module *mod)
90414 {
90415- set_page_attributes(mod->module_core + mod->core_text_size,
90416- mod->module_core + mod->core_size,
90417+ set_page_attributes(mod->module_core_rw,
90418+ mod->module_core_rw + mod->core_size_rw,
90419 set_memory_x);
90420- set_page_attributes(mod->module_core,
90421- mod->module_core + mod->core_ro_size,
90422+ set_page_attributes(mod->module_core_rx,
90423+ mod->module_core_rx + mod->core_size_rx,
90424 set_memory_rw);
90425 }
90426
90427 static void unset_module_init_ro_nx(struct module *mod)
90428 {
90429- set_page_attributes(mod->module_init + mod->init_text_size,
90430- mod->module_init + mod->init_size,
90431+ set_page_attributes(mod->module_init_rw,
90432+ mod->module_init_rw + mod->init_size_rw,
90433 set_memory_x);
90434- set_page_attributes(mod->module_init,
90435- mod->module_init + mod->init_ro_size,
90436+ set_page_attributes(mod->module_init_rx,
90437+ mod->module_init_rx + mod->init_size_rx,
90438 set_memory_rw);
90439 }
90440
90441@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
90442 list_for_each_entry_rcu(mod, &modules, list) {
90443 if (mod->state == MODULE_STATE_UNFORMED)
90444 continue;
90445- if ((mod->module_core) && (mod->core_text_size)) {
90446- set_page_attributes(mod->module_core,
90447- mod->module_core + mod->core_text_size,
90448+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90449+ set_page_attributes(mod->module_core_rx,
90450+ mod->module_core_rx + mod->core_size_rx,
90451 set_memory_rw);
90452 }
90453- if ((mod->module_init) && (mod->init_text_size)) {
90454- set_page_attributes(mod->module_init,
90455- mod->module_init + mod->init_text_size,
90456+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90457+ set_page_attributes(mod->module_init_rx,
90458+ mod->module_init_rx + mod->init_size_rx,
90459 set_memory_rw);
90460 }
90461 }
90462@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
90463 list_for_each_entry_rcu(mod, &modules, list) {
90464 if (mod->state == MODULE_STATE_UNFORMED)
90465 continue;
90466- if ((mod->module_core) && (mod->core_text_size)) {
90467- set_page_attributes(mod->module_core,
90468- mod->module_core + mod->core_text_size,
90469+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90470+ set_page_attributes(mod->module_core_rx,
90471+ mod->module_core_rx + mod->core_size_rx,
90472 set_memory_ro);
90473 }
90474- if ((mod->module_init) && (mod->init_text_size)) {
90475- set_page_attributes(mod->module_init,
90476- mod->module_init + mod->init_text_size,
90477+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90478+ set_page_attributes(mod->module_init_rx,
90479+ mod->module_init_rx + mod->init_size_rx,
90480 set_memory_ro);
90481 }
90482 }
90483@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
90484 #else
90485 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
90486 static void unset_module_core_ro_nx(struct module *mod) { }
90487-static void unset_module_init_ro_nx(struct module *mod) { }
90488+static void unset_module_init_ro_nx(struct module *mod)
90489+{
90490+
90491+#ifdef CONFIG_PAX_KERNEXEC
90492+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90493+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90494+#endif
90495+
90496+}
90497 #endif
90498
90499 void __weak module_memfree(void *module_region)
90500@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
90501 /* This may be NULL, but that's OK */
90502 unset_module_init_ro_nx(mod);
90503 module_arch_freeing_init(mod);
90504- module_memfree(mod->module_init);
90505+ module_memfree(mod->module_init_rw);
90506+ module_memfree_exec(mod->module_init_rx);
90507 kfree(mod->args);
90508 percpu_modfree(mod);
90509
90510 /* Free lock-classes: */
90511- lockdep_free_key_range(mod->module_core, mod->core_size);
90512+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90513+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90514
90515 /* Finally, free the core (containing the module structure) */
90516 unset_module_core_ro_nx(mod);
90517- module_memfree(mod->module_core);
90518+ module_memfree_exec(mod->module_core_rx);
90519+ module_memfree(mod->module_core_rw);
90520
90521 #ifdef CONFIG_MPU
90522 update_protections(current->mm);
90523@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90524 int ret = 0;
90525 const struct kernel_symbol *ksym;
90526
90527+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90528+ int is_fs_load = 0;
90529+ int register_filesystem_found = 0;
90530+ char *p;
90531+
90532+ p = strstr(mod->args, "grsec_modharden_fs");
90533+ if (p) {
90534+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90535+ /* copy \0 as well */
90536+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90537+ is_fs_load = 1;
90538+ }
90539+#endif
90540+
90541 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90542 const char *name = info->strtab + sym[i].st_name;
90543
90544+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90545+ /* it's a real shame this will never get ripped and copied
90546+ upstream! ;(
90547+ */
90548+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90549+ register_filesystem_found = 1;
90550+#endif
90551+
90552 switch (sym[i].st_shndx) {
90553 case SHN_COMMON:
90554 /* Ignore common symbols */
90555@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90556 ksym = resolve_symbol_wait(mod, info, name);
90557 /* Ok if resolved. */
90558 if (ksym && !IS_ERR(ksym)) {
90559+ pax_open_kernel();
90560 sym[i].st_value = ksym->value;
90561+ pax_close_kernel();
90562 break;
90563 }
90564
90565@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90566 secbase = (unsigned long)mod_percpu(mod);
90567 else
90568 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90569+ pax_open_kernel();
90570 sym[i].st_value += secbase;
90571+ pax_close_kernel();
90572 break;
90573 }
90574 }
90575
90576+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90577+ if (is_fs_load && !register_filesystem_found) {
90578+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90579+ ret = -EPERM;
90580+ }
90581+#endif
90582+
90583 return ret;
90584 }
90585
90586@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90587 || s->sh_entsize != ~0UL
90588 || strstarts(sname, ".init"))
90589 continue;
90590- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90591+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90592+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90593+ else
90594+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90595 pr_debug("\t%s\n", sname);
90596 }
90597- switch (m) {
90598- case 0: /* executable */
90599- mod->core_size = debug_align(mod->core_size);
90600- mod->core_text_size = mod->core_size;
90601- break;
90602- case 1: /* RO: text and ro-data */
90603- mod->core_size = debug_align(mod->core_size);
90604- mod->core_ro_size = mod->core_size;
90605- break;
90606- case 3: /* whole core */
90607- mod->core_size = debug_align(mod->core_size);
90608- break;
90609- }
90610 }
90611
90612 pr_debug("Init section allocation order:\n");
90613@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90614 || s->sh_entsize != ~0UL
90615 || !strstarts(sname, ".init"))
90616 continue;
90617- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90618- | INIT_OFFSET_MASK);
90619+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90620+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90621+ else
90622+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90623+ s->sh_entsize |= INIT_OFFSET_MASK;
90624 pr_debug("\t%s\n", sname);
90625 }
90626- switch (m) {
90627- case 0: /* executable */
90628- mod->init_size = debug_align(mod->init_size);
90629- mod->init_text_size = mod->init_size;
90630- break;
90631- case 1: /* RO: text and ro-data */
90632- mod->init_size = debug_align(mod->init_size);
90633- mod->init_ro_size = mod->init_size;
90634- break;
90635- case 3: /* whole init */
90636- mod->init_size = debug_align(mod->init_size);
90637- break;
90638- }
90639 }
90640 }
90641
90642@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90643
90644 /* Put symbol section at end of init part of module. */
90645 symsect->sh_flags |= SHF_ALLOC;
90646- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90647+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90648 info->index.sym) | INIT_OFFSET_MASK;
90649 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90650
90651@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90652 }
90653
90654 /* Append room for core symbols at end of core part. */
90655- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90656- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90657- mod->core_size += strtab_size;
90658+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90659+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90660+ mod->core_size_rx += strtab_size;
90661
90662 /* Put string table section at end of init part of module. */
90663 strsect->sh_flags |= SHF_ALLOC;
90664- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90665+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90666 info->index.str) | INIT_OFFSET_MASK;
90667 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90668 }
90669@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90670 /* Make sure we get permanent strtab: don't use info->strtab. */
90671 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90672
90673+ pax_open_kernel();
90674+
90675 /* Set types up while we still have access to sections. */
90676 for (i = 0; i < mod->num_symtab; i++)
90677 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90678
90679- mod->core_symtab = dst = mod->module_core + info->symoffs;
90680- mod->core_strtab = s = mod->module_core + info->stroffs;
90681+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90682+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90683 src = mod->symtab;
90684 for (ndst = i = 0; i < mod->num_symtab; i++) {
90685 if (i == 0 ||
90686@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90687 }
90688 }
90689 mod->core_num_syms = ndst;
90690+
90691+ pax_close_kernel();
90692 }
90693 #else
90694 static inline void layout_symtab(struct module *mod, struct load_info *info)
90695@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
90696 return vmalloc_exec(size);
90697 }
90698
90699-static void *module_alloc_update_bounds(unsigned long size)
90700+static void *module_alloc_update_bounds_rw(unsigned long size)
90701 {
90702 void *ret = module_alloc(size);
90703
90704 if (ret) {
90705 mutex_lock(&module_mutex);
90706 /* Update module bounds. */
90707- if ((unsigned long)ret < module_addr_min)
90708- module_addr_min = (unsigned long)ret;
90709- if ((unsigned long)ret + size > module_addr_max)
90710- module_addr_max = (unsigned long)ret + size;
90711+ if ((unsigned long)ret < module_addr_min_rw)
90712+ module_addr_min_rw = (unsigned long)ret;
90713+ if ((unsigned long)ret + size > module_addr_max_rw)
90714+ module_addr_max_rw = (unsigned long)ret + size;
90715+ mutex_unlock(&module_mutex);
90716+ }
90717+ return ret;
90718+}
90719+
90720+static void *module_alloc_update_bounds_rx(unsigned long size)
90721+{
90722+ void *ret = module_alloc_exec(size);
90723+
90724+ if (ret) {
90725+ mutex_lock(&module_mutex);
90726+ /* Update module bounds. */
90727+ if ((unsigned long)ret < module_addr_min_rx)
90728+ module_addr_min_rx = (unsigned long)ret;
90729+ if ((unsigned long)ret + size > module_addr_max_rx)
90730+ module_addr_max_rx = (unsigned long)ret + size;
90731 mutex_unlock(&module_mutex);
90732 }
90733 return ret;
90734@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90735 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90736
90737 if (info->index.sym == 0) {
90738+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90739+ /*
90740+ * avoid potentially printing jibberish on attempted load
90741+ * of a module randomized with a different seed
90742+ */
90743+ pr_warn("module has no symbols (stripped?)\n");
90744+#else
90745 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90746+#endif
90747 return ERR_PTR(-ENOEXEC);
90748 }
90749
90750@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90751 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90752 {
90753 const char *modmagic = get_modinfo(info, "vermagic");
90754+ const char *license = get_modinfo(info, "license");
90755 int err;
90756
90757+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90758+ if (!license || !license_is_gpl_compatible(license))
90759+ return -ENOEXEC;
90760+#endif
90761+
90762 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90763 modmagic = NULL;
90764
90765@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90766 }
90767
90768 /* Set up license info based on the info section */
90769- set_license(mod, get_modinfo(info, "license"));
90770+ set_license(mod, license);
90771
90772 return 0;
90773 }
90774@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
90775 void *ptr;
90776
90777 /* Do the allocs. */
90778- ptr = module_alloc_update_bounds(mod->core_size);
90779+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90780 /*
90781 * The pointer to this block is stored in the module structure
90782 * which is inside the block. Just mark it as not being a
90783@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
90784 if (!ptr)
90785 return -ENOMEM;
90786
90787- memset(ptr, 0, mod->core_size);
90788- mod->module_core = ptr;
90789+ memset(ptr, 0, mod->core_size_rw);
90790+ mod->module_core_rw = ptr;
90791
90792- if (mod->init_size) {
90793- ptr = module_alloc_update_bounds(mod->init_size);
90794+ if (mod->init_size_rw) {
90795+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90796 /*
90797 * The pointer to this block is stored in the module structure
90798 * which is inside the block. This block doesn't need to be
90799@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
90800 */
90801 kmemleak_ignore(ptr);
90802 if (!ptr) {
90803- module_memfree(mod->module_core);
90804+ module_memfree(mod->module_core_rw);
90805 return -ENOMEM;
90806 }
90807- memset(ptr, 0, mod->init_size);
90808- mod->module_init = ptr;
90809+ memset(ptr, 0, mod->init_size_rw);
90810+ mod->module_init_rw = ptr;
90811 } else
90812- mod->module_init = NULL;
90813+ mod->module_init_rw = NULL;
90814+
90815+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90816+ kmemleak_not_leak(ptr);
90817+ if (!ptr) {
90818+ if (mod->module_init_rw)
90819+ module_memfree(mod->module_init_rw);
90820+ module_memfree(mod->module_core_rw);
90821+ return -ENOMEM;
90822+ }
90823+
90824+ pax_open_kernel();
90825+ memset(ptr, 0, mod->core_size_rx);
90826+ pax_close_kernel();
90827+ mod->module_core_rx = ptr;
90828+
90829+ if (mod->init_size_rx) {
90830+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90831+ kmemleak_ignore(ptr);
90832+ if (!ptr && mod->init_size_rx) {
90833+ module_memfree_exec(mod->module_core_rx);
90834+ if (mod->module_init_rw)
90835+ module_memfree(mod->module_init_rw);
90836+ module_memfree(mod->module_core_rw);
90837+ return -ENOMEM;
90838+ }
90839+
90840+ pax_open_kernel();
90841+ memset(ptr, 0, mod->init_size_rx);
90842+ pax_close_kernel();
90843+ mod->module_init_rx = ptr;
90844+ } else
90845+ mod->module_init_rx = NULL;
90846
90847 /* Transfer each section which specifies SHF_ALLOC */
90848 pr_debug("final section addresses:\n");
90849@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
90850 if (!(shdr->sh_flags & SHF_ALLOC))
90851 continue;
90852
90853- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90854- dest = mod->module_init
90855- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90856- else
90857- dest = mod->module_core + shdr->sh_entsize;
90858+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90859+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90860+ dest = mod->module_init_rw
90861+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90862+ else
90863+ dest = mod->module_init_rx
90864+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90865+ } else {
90866+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90867+ dest = mod->module_core_rw + shdr->sh_entsize;
90868+ else
90869+ dest = mod->module_core_rx + shdr->sh_entsize;
90870+ }
90871+
90872+ if (shdr->sh_type != SHT_NOBITS) {
90873+
90874+#ifdef CONFIG_PAX_KERNEXEC
90875+#ifdef CONFIG_X86_64
90876+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
90877+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
90878+#endif
90879+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
90880+ pax_open_kernel();
90881+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90882+ pax_close_kernel();
90883+ } else
90884+#endif
90885
90886- if (shdr->sh_type != SHT_NOBITS)
90887 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90888+ }
90889 /* Update sh_addr to point to copy in image. */
90890- shdr->sh_addr = (unsigned long)dest;
90891+
90892+#ifdef CONFIG_PAX_KERNEXEC
90893+ if (shdr->sh_flags & SHF_EXECINSTR)
90894+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
90895+ else
90896+#endif
90897+
90898+ shdr->sh_addr = (unsigned long)dest;
90899 pr_debug("\t0x%lx %s\n",
90900 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
90901 }
90902@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
90903 * Do it before processing of module parameters, so the module
90904 * can provide parameter accessor functions of its own.
90905 */
90906- if (mod->module_init)
90907- flush_icache_range((unsigned long)mod->module_init,
90908- (unsigned long)mod->module_init
90909- + mod->init_size);
90910- flush_icache_range((unsigned long)mod->module_core,
90911- (unsigned long)mod->module_core + mod->core_size);
90912+ if (mod->module_init_rx)
90913+ flush_icache_range((unsigned long)mod->module_init_rx,
90914+ (unsigned long)mod->module_init_rx
90915+ + mod->init_size_rx);
90916+ flush_icache_range((unsigned long)mod->module_core_rx,
90917+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
90918
90919 set_fs(old_fs);
90920 }
90921@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
90922 {
90923 percpu_modfree(mod);
90924 module_arch_freeing_init(mod);
90925- module_memfree(mod->module_init);
90926- module_memfree(mod->module_core);
90927+ module_memfree_exec(mod->module_init_rx);
90928+ module_memfree_exec(mod->module_core_rx);
90929+ module_memfree(mod->module_init_rw);
90930+ module_memfree(mod->module_core_rw);
90931 }
90932
90933 int __weak module_finalize(const Elf_Ehdr *hdr,
90934@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
90935 static int post_relocation(struct module *mod, const struct load_info *info)
90936 {
90937 /* Sort exception table now relocations are done. */
90938+ pax_open_kernel();
90939 sort_extable(mod->extable, mod->extable + mod->num_exentries);
90940+ pax_close_kernel();
90941
90942 /* Copy relocated percpu area over. */
90943 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
90944@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
90945 /* For freeing module_init on success, in case kallsyms traversing */
90946 struct mod_initfree {
90947 struct rcu_head rcu;
90948- void *module_init;
90949+ void *module_init_rw;
90950+ void *module_init_rx;
90951 };
90952
90953 static void do_free_init(struct rcu_head *head)
90954 {
90955 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
90956- module_memfree(m->module_init);
90957+ module_memfree(m->module_init_rw);
90958+ module_memfree_exec(m->module_init_rx);
90959 kfree(m);
90960 }
90961
90962@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
90963 ret = -ENOMEM;
90964 goto fail;
90965 }
90966- freeinit->module_init = mod->module_init;
90967+ freeinit->module_init_rw = mod->module_init_rw;
90968+ freeinit->module_init_rx = mod->module_init_rx;
90969
90970 /*
90971 * We want to find out whether @mod uses async during init. Clear
90972@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
90973 #endif
90974 unset_module_init_ro_nx(mod);
90975 module_arch_freeing_init(mod);
90976- mod->module_init = NULL;
90977- mod->init_size = 0;
90978- mod->init_ro_size = 0;
90979- mod->init_text_size = 0;
90980+ mod->module_init_rw = NULL;
90981+ mod->module_init_rx = NULL;
90982+ mod->init_size_rw = 0;
90983+ mod->init_size_rx = 0;
90984 /*
90985 * We want to free module_init, but be aware that kallsyms may be
90986 * walking this with preempt disabled. In all the failure paths,
90987@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
90988 module_bug_finalize(info->hdr, info->sechdrs, mod);
90989
90990 /* Set RO and NX regions for core */
90991- set_section_ro_nx(mod->module_core,
90992- mod->core_text_size,
90993- mod->core_ro_size,
90994- mod->core_size);
90995+ set_section_ro_nx(mod->module_core_rx,
90996+ mod->core_size_rx,
90997+ mod->core_size_rx,
90998+ mod->core_size_rx);
90999
91000 /* Set RO and NX regions for init */
91001- set_section_ro_nx(mod->module_init,
91002- mod->init_text_size,
91003- mod->init_ro_size,
91004- mod->init_size);
91005+ set_section_ro_nx(mod->module_init_rx,
91006+ mod->init_size_rx,
91007+ mod->init_size_rx,
91008+ mod->init_size_rx);
91009
91010 /* Mark state as coming so strong_try_module_get() ignores us,
91011 * but kallsyms etc. can see us. */
91012@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91013 if (err)
91014 goto free_unload;
91015
91016+ /* Now copy in args */
91017+ mod->args = strndup_user(uargs, ~0UL >> 1);
91018+ if (IS_ERR(mod->args)) {
91019+ err = PTR_ERR(mod->args);
91020+ goto free_unload;
91021+ }
91022+
91023 /* Set up MODINFO_ATTR fields */
91024 setup_modinfo(mod, info);
91025
91026+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91027+ {
91028+ char *p, *p2;
91029+
91030+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91031+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91032+ err = -EPERM;
91033+ goto free_modinfo;
91034+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91035+ p += sizeof("grsec_modharden_normal") - 1;
91036+ p2 = strstr(p, "_");
91037+ if (p2) {
91038+ *p2 = '\0';
91039+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91040+ *p2 = '_';
91041+ }
91042+ err = -EPERM;
91043+ goto free_modinfo;
91044+ }
91045+ }
91046+#endif
91047+
91048 /* Fix up syms, so that st_value is a pointer to location. */
91049 err = simplify_symbols(mod, info);
91050 if (err < 0)
91051@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
91052
91053 flush_module_icache(mod);
91054
91055- /* Now copy in args */
91056- mod->args = strndup_user(uargs, ~0UL >> 1);
91057- if (IS_ERR(mod->args)) {
91058- err = PTR_ERR(mod->args);
91059- goto free_arch_cleanup;
91060- }
91061-
91062 dynamic_debug_setup(info->debug, info->num_debug);
91063
91064 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
91065@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
91066 ddebug_cleanup:
91067 dynamic_debug_remove(info->debug);
91068 synchronize_sched();
91069- kfree(mod->args);
91070- free_arch_cleanup:
91071 module_arch_cleanup(mod);
91072 free_modinfo:
91073 free_modinfo(mod);
91074+ kfree(mod->args);
91075 free_unload:
91076 module_unload_free(mod);
91077 unlink_mod:
91078@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
91079 unsigned long nextval;
91080
91081 /* At worse, next value is at end of module */
91082- if (within_module_init(addr, mod))
91083- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91084+ if (within_module_init_rx(addr, mod))
91085+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91086+ else if (within_module_init_rw(addr, mod))
91087+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91088+ else if (within_module_core_rx(addr, mod))
91089+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91090+ else if (within_module_core_rw(addr, mod))
91091+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91092 else
91093- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91094+ return NULL;
91095
91096 /* Scan for closest preceding symbol, and next symbol. (ELF
91097 starts real symbols at 1). */
91098@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
91099 return 0;
91100
91101 seq_printf(m, "%s %u",
91102- mod->name, mod->init_size + mod->core_size);
91103+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91104 print_unload_info(m, mod);
91105
91106 /* Informative for users. */
91107@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
91108 mod->state == MODULE_STATE_COMING ? "Loading" :
91109 "Live");
91110 /* Used by oprofile and other similar tools. */
91111- seq_printf(m, " 0x%pK", mod->module_core);
91112+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91113
91114 /* Taints info */
91115 if (mod->taints)
91116@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
91117
91118 static int __init proc_modules_init(void)
91119 {
91120+#ifndef CONFIG_GRKERNSEC_HIDESYM
91121+#ifdef CONFIG_GRKERNSEC_PROC_USER
91122+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91123+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91124+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91125+#else
91126 proc_create("modules", 0, NULL, &proc_modules_operations);
91127+#endif
91128+#else
91129+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91130+#endif
91131 return 0;
91132 }
91133 module_init(proc_modules_init);
91134@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
91135 {
91136 struct module *mod;
91137
91138- if (addr < module_addr_min || addr > module_addr_max)
91139+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91140+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91141 return NULL;
91142
91143 list_for_each_entry_rcu(mod, &modules, list) {
91144@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
91145 */
91146 struct module *__module_text_address(unsigned long addr)
91147 {
91148- struct module *mod = __module_address(addr);
91149+ struct module *mod;
91150+
91151+#ifdef CONFIG_X86_32
91152+ addr = ktla_ktva(addr);
91153+#endif
91154+
91155+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91156+ return NULL;
91157+
91158+ mod = __module_address(addr);
91159+
91160 if (mod) {
91161 /* Make sure it's within the text section. */
91162- if (!within(addr, mod->module_init, mod->init_text_size)
91163- && !within(addr, mod->module_core, mod->core_text_size))
91164+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91165 mod = NULL;
91166 }
91167 return mod;
91168diff --git a/kernel/notifier.c b/kernel/notifier.c
91169index 4803da6..1c5eea6 100644
91170--- a/kernel/notifier.c
91171+++ b/kernel/notifier.c
91172@@ -5,6 +5,7 @@
91173 #include <linux/rcupdate.h>
91174 #include <linux/vmalloc.h>
91175 #include <linux/reboot.h>
91176+#include <linux/mm.h>
91177
91178 /*
91179 * Notifier list for kernel code which wants to be called
91180@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91181 while ((*nl) != NULL) {
91182 if (n->priority > (*nl)->priority)
91183 break;
91184- nl = &((*nl)->next);
91185+ nl = (struct notifier_block **)&((*nl)->next);
91186 }
91187- n->next = *nl;
91188+ pax_open_kernel();
91189+ *(const void **)&n->next = *nl;
91190 rcu_assign_pointer(*nl, n);
91191+ pax_close_kernel();
91192 return 0;
91193 }
91194
91195@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91196 return 0;
91197 if (n->priority > (*nl)->priority)
91198 break;
91199- nl = &((*nl)->next);
91200+ nl = (struct notifier_block **)&((*nl)->next);
91201 }
91202- n->next = *nl;
91203+ pax_open_kernel();
91204+ *(const void **)&n->next = *nl;
91205 rcu_assign_pointer(*nl, n);
91206+ pax_close_kernel();
91207 return 0;
91208 }
91209
91210@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91211 {
91212 while ((*nl) != NULL) {
91213 if ((*nl) == n) {
91214+ pax_open_kernel();
91215 rcu_assign_pointer(*nl, n->next);
91216+ pax_close_kernel();
91217 return 0;
91218 }
91219- nl = &((*nl)->next);
91220+ nl = (struct notifier_block **)&((*nl)->next);
91221 }
91222 return -ENOENT;
91223 }
91224diff --git a/kernel/padata.c b/kernel/padata.c
91225index 161402f..598814c 100644
91226--- a/kernel/padata.c
91227+++ b/kernel/padata.c
91228@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91229 * seq_nr mod. number of cpus in use.
91230 */
91231
91232- seq_nr = atomic_inc_return(&pd->seq_nr);
91233+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91234 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91235
91236 return padata_index_to_cpu(pd, cpu_index);
91237@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91238 padata_init_pqueues(pd);
91239 padata_init_squeues(pd);
91240 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91241- atomic_set(&pd->seq_nr, -1);
91242+ atomic_set_unchecked(&pd->seq_nr, -1);
91243 atomic_set(&pd->reorder_objects, 0);
91244 atomic_set(&pd->refcnt, 0);
91245 pd->pinst = pinst;
91246diff --git a/kernel/panic.c b/kernel/panic.c
91247index 4d8d6f9..97b9b9c 100644
91248--- a/kernel/panic.c
91249+++ b/kernel/panic.c
91250@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91251 /*
91252 * Stop ourself in panic -- architecture code may override this
91253 */
91254-void __weak panic_smp_self_stop(void)
91255+void __weak __noreturn panic_smp_self_stop(void)
91256 {
91257 while (1)
91258 cpu_relax();
91259@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91260 disable_trace_on_warning();
91261
91262 pr_warn("------------[ cut here ]------------\n");
91263- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91264+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91265 raw_smp_processor_id(), current->pid, file, line, caller);
91266
91267 if (args)
91268@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91269 */
91270 __visible void __stack_chk_fail(void)
91271 {
91272- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91273+ dump_stack();
91274+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91275 __builtin_return_address(0));
91276 }
91277 EXPORT_SYMBOL(__stack_chk_fail);
91278diff --git a/kernel/pid.c b/kernel/pid.c
91279index cd36a5e..11f185d 100644
91280--- a/kernel/pid.c
91281+++ b/kernel/pid.c
91282@@ -33,6 +33,7 @@
91283 #include <linux/rculist.h>
91284 #include <linux/bootmem.h>
91285 #include <linux/hash.h>
91286+#include <linux/security.h>
91287 #include <linux/pid_namespace.h>
91288 #include <linux/init_task.h>
91289 #include <linux/syscalls.h>
91290@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91291
91292 int pid_max = PID_MAX_DEFAULT;
91293
91294-#define RESERVED_PIDS 300
91295+#define RESERVED_PIDS 500
91296
91297 int pid_max_min = RESERVED_PIDS + 1;
91298 int pid_max_max = PID_MAX_LIMIT;
91299@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91300 */
91301 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91302 {
91303+ struct task_struct *task;
91304+
91305 rcu_lockdep_assert(rcu_read_lock_held(),
91306 "find_task_by_pid_ns() needs rcu_read_lock()"
91307 " protection");
91308- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91309+
91310+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91311+
91312+ if (gr_pid_is_chrooted(task))
91313+ return NULL;
91314+
91315+ return task;
91316 }
91317
91318 struct task_struct *find_task_by_vpid(pid_t vnr)
91319@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91320 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91321 }
91322
91323+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91324+{
91325+ rcu_lockdep_assert(rcu_read_lock_held(),
91326+ "find_task_by_pid_ns() needs rcu_read_lock()"
91327+ " protection");
91328+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91329+}
91330+
91331 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91332 {
91333 struct pid *pid;
91334diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91335index a65ba13..f600dbb 100644
91336--- a/kernel/pid_namespace.c
91337+++ b/kernel/pid_namespace.c
91338@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91339 void __user *buffer, size_t *lenp, loff_t *ppos)
91340 {
91341 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91342- struct ctl_table tmp = *table;
91343+ ctl_table_no_const tmp = *table;
91344
91345 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91346 return -EPERM;
91347diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91348index 48b28d3..c63ccaf 100644
91349--- a/kernel/power/Kconfig
91350+++ b/kernel/power/Kconfig
91351@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91352 config HIBERNATION
91353 bool "Hibernation (aka 'suspend to disk')"
91354 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91355+ depends on !GRKERNSEC_KMEM
91356+ depends on !PAX_MEMORY_SANITIZE
91357 select HIBERNATE_CALLBACKS
91358 select LZO_COMPRESS
91359 select LZO_DECOMPRESS
91360diff --git a/kernel/power/process.c b/kernel/power/process.c
91361index 5a6ec86..3a8c884 100644
91362--- a/kernel/power/process.c
91363+++ b/kernel/power/process.c
91364@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91365 unsigned int elapsed_msecs;
91366 bool wakeup = false;
91367 int sleep_usecs = USEC_PER_MSEC;
91368+ bool timedout = false;
91369
91370 do_gettimeofday(&start);
91371
91372@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91373
91374 while (true) {
91375 todo = 0;
91376+ if (time_after(jiffies, end_time))
91377+ timedout = true;
91378 read_lock(&tasklist_lock);
91379 for_each_process_thread(g, p) {
91380 if (p == current || !freeze_task(p))
91381 continue;
91382
91383- if (!freezer_should_skip(p))
91384+ if (!freezer_should_skip(p)) {
91385 todo++;
91386+ if (timedout) {
91387+ printk(KERN_ERR "Task refusing to freeze:\n");
91388+ sched_show_task(p);
91389+ }
91390+ }
91391 }
91392 read_unlock(&tasklist_lock);
91393
91394@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91395 todo += wq_busy;
91396 }
91397
91398- if (!todo || time_after(jiffies, end_time))
91399+ if (!todo || timedout)
91400 break;
91401
91402 if (pm_wakeup_pending()) {
91403diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
91404index cbd69d8..2ca4a8b 100644
91405--- a/kernel/printk/console_cmdline.h
91406+++ b/kernel/printk/console_cmdline.h
91407@@ -3,7 +3,7 @@
91408
91409 struct console_cmdline
91410 {
91411- char name[8]; /* Name of the driver */
91412+ char name[16]; /* Name of the driver */
91413 int index; /* Minor dev. to use */
91414 char *options; /* Options for the driver */
91415 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
91416diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91417index fae29e3..7df1786 100644
91418--- a/kernel/printk/printk.c
91419+++ b/kernel/printk/printk.c
91420@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91421 if (from_file && type != SYSLOG_ACTION_OPEN)
91422 return 0;
91423
91424+#ifdef CONFIG_GRKERNSEC_DMESG
91425+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91426+ return -EPERM;
91427+#endif
91428+
91429 if (syslog_action_restricted(type)) {
91430 if (capable(CAP_SYSLOG))
91431 return 0;
91432@@ -2464,6 +2469,7 @@ void register_console(struct console *newcon)
91433 for (i = 0, c = console_cmdline;
91434 i < MAX_CMDLINECONSOLES && c->name[0];
91435 i++, c++) {
91436+ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
91437 if (strcmp(c->name, newcon->name) != 0)
91438 continue;
91439 if (newcon->index >= 0 &&
91440diff --git a/kernel/profile.c b/kernel/profile.c
91441index 54bf5ba..df6e0a2 100644
91442--- a/kernel/profile.c
91443+++ b/kernel/profile.c
91444@@ -37,7 +37,7 @@ struct profile_hit {
91445 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91446 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91447
91448-static atomic_t *prof_buffer;
91449+static atomic_unchecked_t *prof_buffer;
91450 static unsigned long prof_len, prof_shift;
91451
91452 int prof_on __read_mostly;
91453@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91454 hits[i].pc = 0;
91455 continue;
91456 }
91457- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91458+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91459 hits[i].hits = hits[i].pc = 0;
91460 }
91461 }
91462@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91463 * Add the current hit(s) and flush the write-queue out
91464 * to the global buffer:
91465 */
91466- atomic_add(nr_hits, &prof_buffer[pc]);
91467+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91468 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91469- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91470+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91471 hits[i].pc = hits[i].hits = 0;
91472 }
91473 out:
91474@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91475 {
91476 unsigned long pc;
91477 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91478- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91479+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91480 }
91481 #endif /* !CONFIG_SMP */
91482
91483@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91484 return -EFAULT;
91485 buf++; p++; count--; read++;
91486 }
91487- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91488+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91489 if (copy_to_user(buf, (void *)pnt, count))
91490 return -EFAULT;
91491 read += count;
91492@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91493 }
91494 #endif
91495 profile_discard_flip_buffers();
91496- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91497+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91498 return count;
91499 }
91500
91501diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91502index 1eb9d90..d40d21e 100644
91503--- a/kernel/ptrace.c
91504+++ b/kernel/ptrace.c
91505@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91506 if (seize)
91507 flags |= PT_SEIZED;
91508 rcu_read_lock();
91509- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91510+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91511 flags |= PT_PTRACE_CAP;
91512 rcu_read_unlock();
91513 task->ptrace = flags;
91514@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91515 break;
91516 return -EIO;
91517 }
91518- if (copy_to_user(dst, buf, retval))
91519+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91520 return -EFAULT;
91521 copied += retval;
91522 src += retval;
91523@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
91524 bool seized = child->ptrace & PT_SEIZED;
91525 int ret = -EIO;
91526 siginfo_t siginfo, *si;
91527- void __user *datavp = (void __user *) data;
91528+ void __user *datavp = (__force void __user *) data;
91529 unsigned long __user *datalp = datavp;
91530 unsigned long flags;
91531
91532@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91533 goto out;
91534 }
91535
91536+ if (gr_handle_ptrace(child, request)) {
91537+ ret = -EPERM;
91538+ goto out_put_task_struct;
91539+ }
91540+
91541 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91542 ret = ptrace_attach(child, request, addr, data);
91543 /*
91544 * Some architectures need to do book-keeping after
91545 * a ptrace attach.
91546 */
91547- if (!ret)
91548+ if (!ret) {
91549 arch_ptrace_attach(child);
91550+ gr_audit_ptrace(child);
91551+ }
91552 goto out_put_task_struct;
91553 }
91554
91555@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91556 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91557 if (copied != sizeof(tmp))
91558 return -EIO;
91559- return put_user(tmp, (unsigned long __user *)data);
91560+ return put_user(tmp, (__force unsigned long __user *)data);
91561 }
91562
91563 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91564@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91565 }
91566
91567 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91568- compat_long_t, addr, compat_long_t, data)
91569+ compat_ulong_t, addr, compat_ulong_t, data)
91570 {
91571 struct task_struct *child;
91572 long ret;
91573@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91574 goto out;
91575 }
91576
91577+ if (gr_handle_ptrace(child, request)) {
91578+ ret = -EPERM;
91579+ goto out_put_task_struct;
91580+ }
91581+
91582 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91583 ret = ptrace_attach(child, request, addr, data);
91584 /*
91585 * Some architectures need to do book-keeping after
91586 * a ptrace attach.
91587 */
91588- if (!ret)
91589+ if (!ret) {
91590 arch_ptrace_attach(child);
91591+ gr_audit_ptrace(child);
91592+ }
91593 goto out_put_task_struct;
91594 }
91595
91596diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91597index 4d559ba..053da37 100644
91598--- a/kernel/rcu/rcutorture.c
91599+++ b/kernel/rcu/rcutorture.c
91600@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91601 rcu_torture_count) = { 0 };
91602 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91603 rcu_torture_batch) = { 0 };
91604-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91605-static atomic_t n_rcu_torture_alloc;
91606-static atomic_t n_rcu_torture_alloc_fail;
91607-static atomic_t n_rcu_torture_free;
91608-static atomic_t n_rcu_torture_mberror;
91609-static atomic_t n_rcu_torture_error;
91610+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91611+static atomic_unchecked_t n_rcu_torture_alloc;
91612+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91613+static atomic_unchecked_t n_rcu_torture_free;
91614+static atomic_unchecked_t n_rcu_torture_mberror;
91615+static atomic_unchecked_t n_rcu_torture_error;
91616 static long n_rcu_torture_barrier_error;
91617 static long n_rcu_torture_boost_ktrerror;
91618 static long n_rcu_torture_boost_rterror;
91619@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
91620 static long n_rcu_torture_timers;
91621 static long n_barrier_attempts;
91622 static long n_barrier_successes;
91623-static atomic_long_t n_cbfloods;
91624+static atomic_long_unchecked_t n_cbfloods;
91625 static struct list_head rcu_torture_removed;
91626
91627 static int rcu_torture_writer_state;
91628@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
91629
91630 spin_lock_bh(&rcu_torture_lock);
91631 if (list_empty(&rcu_torture_freelist)) {
91632- atomic_inc(&n_rcu_torture_alloc_fail);
91633+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91634 spin_unlock_bh(&rcu_torture_lock);
91635 return NULL;
91636 }
91637- atomic_inc(&n_rcu_torture_alloc);
91638+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91639 p = rcu_torture_freelist.next;
91640 list_del_init(p);
91641 spin_unlock_bh(&rcu_torture_lock);
91642@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
91643 static void
91644 rcu_torture_free(struct rcu_torture *p)
91645 {
91646- atomic_inc(&n_rcu_torture_free);
91647+ atomic_inc_unchecked(&n_rcu_torture_free);
91648 spin_lock_bh(&rcu_torture_lock);
91649 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91650 spin_unlock_bh(&rcu_torture_lock);
91651@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91652 i = rp->rtort_pipe_count;
91653 if (i > RCU_TORTURE_PIPE_LEN)
91654 i = RCU_TORTURE_PIPE_LEN;
91655- atomic_inc(&rcu_torture_wcount[i]);
91656+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91657 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91658 rp->rtort_mbtest = 0;
91659 return true;
91660@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
91661 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
91662 do {
91663 schedule_timeout_interruptible(cbflood_inter_holdoff);
91664- atomic_long_inc(&n_cbfloods);
91665+ atomic_long_inc_unchecked(&n_cbfloods);
91666 WARN_ON(signal_pending(current));
91667 for (i = 0; i < cbflood_n_burst; i++) {
91668 for (j = 0; j < cbflood_n_per_burst; j++) {
91669@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
91670 i = old_rp->rtort_pipe_count;
91671 if (i > RCU_TORTURE_PIPE_LEN)
91672 i = RCU_TORTURE_PIPE_LEN;
91673- atomic_inc(&rcu_torture_wcount[i]);
91674+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91675 old_rp->rtort_pipe_count++;
91676 switch (synctype[torture_random(&rand) % nsynctypes]) {
91677 case RTWS_DEF_FREE:
91678@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
91679 return;
91680 }
91681 if (p->rtort_mbtest == 0)
91682- atomic_inc(&n_rcu_torture_mberror);
91683+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91684 spin_lock(&rand_lock);
91685 cur_ops->read_delay(&rand);
91686 n_rcu_torture_timers++;
91687@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
91688 continue;
91689 }
91690 if (p->rtort_mbtest == 0)
91691- atomic_inc(&n_rcu_torture_mberror);
91692+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91693 cur_ops->read_delay(&rand);
91694 preempt_disable();
91695 pipe_count = p->rtort_pipe_count;
91696@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
91697 rcu_torture_current,
91698 rcu_torture_current_version,
91699 list_empty(&rcu_torture_freelist),
91700- atomic_read(&n_rcu_torture_alloc),
91701- atomic_read(&n_rcu_torture_alloc_fail),
91702- atomic_read(&n_rcu_torture_free));
91703+ atomic_read_unchecked(&n_rcu_torture_alloc),
91704+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91705+ atomic_read_unchecked(&n_rcu_torture_free));
91706 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
91707- atomic_read(&n_rcu_torture_mberror),
91708+ atomic_read_unchecked(&n_rcu_torture_mberror),
91709 n_rcu_torture_boost_ktrerror,
91710 n_rcu_torture_boost_rterror);
91711 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
91712@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
91713 n_barrier_successes,
91714 n_barrier_attempts,
91715 n_rcu_torture_barrier_error);
91716- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
91717+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
91718
91719 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91720- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91721+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91722 n_rcu_torture_barrier_error != 0 ||
91723 n_rcu_torture_boost_ktrerror != 0 ||
91724 n_rcu_torture_boost_rterror != 0 ||
91725 n_rcu_torture_boost_failure != 0 ||
91726 i > 1) {
91727 pr_cont("%s", "!!! ");
91728- atomic_inc(&n_rcu_torture_error);
91729+ atomic_inc_unchecked(&n_rcu_torture_error);
91730 WARN_ON_ONCE(1);
91731 }
91732 pr_cont("Reader Pipe: ");
91733@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
91734 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91735 pr_cont("Free-Block Circulation: ");
91736 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91737- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
91738+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
91739 }
91740 pr_cont("\n");
91741
91742@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
91743
91744 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91745
91746- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91747+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91748 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91749 else if (torture_onoff_failures())
91750 rcu_torture_print_module_parms(cur_ops,
91751@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
91752
91753 rcu_torture_current = NULL;
91754 rcu_torture_current_version = 0;
91755- atomic_set(&n_rcu_torture_alloc, 0);
91756- atomic_set(&n_rcu_torture_alloc_fail, 0);
91757- atomic_set(&n_rcu_torture_free, 0);
91758- atomic_set(&n_rcu_torture_mberror, 0);
91759- atomic_set(&n_rcu_torture_error, 0);
91760+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91761+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91762+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91763+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91764+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91765 n_rcu_torture_barrier_error = 0;
91766 n_rcu_torture_boost_ktrerror = 0;
91767 n_rcu_torture_boost_rterror = 0;
91768 n_rcu_torture_boost_failure = 0;
91769 n_rcu_torture_boosts = 0;
91770 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91771- atomic_set(&rcu_torture_wcount[i], 0);
91772+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91773 for_each_possible_cpu(cpu) {
91774 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91775 per_cpu(rcu_torture_count, cpu)[i] = 0;
91776diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91777index 0db5649..e6ec167 100644
91778--- a/kernel/rcu/tiny.c
91779+++ b/kernel/rcu/tiny.c
91780@@ -42,7 +42,7 @@
91781 /* Forward declarations for tiny_plugin.h. */
91782 struct rcu_ctrlblk;
91783 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91784-static void rcu_process_callbacks(struct softirq_action *unused);
91785+static void rcu_process_callbacks(void);
91786 static void __call_rcu(struct rcu_head *head,
91787 void (*func)(struct rcu_head *rcu),
91788 struct rcu_ctrlblk *rcp);
91789@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91790 false));
91791 }
91792
91793-static void rcu_process_callbacks(struct softirq_action *unused)
91794+static __latent_entropy void rcu_process_callbacks(void)
91795 {
91796 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91797 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91798diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91799index 858c565..7efd915 100644
91800--- a/kernel/rcu/tiny_plugin.h
91801+++ b/kernel/rcu/tiny_plugin.h
91802@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91803 dump_stack();
91804 }
91805 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91806- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91807+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91808 3 * rcu_jiffies_till_stall_check() + 3;
91809 else if (ULONG_CMP_GE(j, js))
91810- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91811+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91812 }
91813
91814 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91815 {
91816 rcp->ticks_this_gp = 0;
91817 rcp->gp_start = jiffies;
91818- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91819+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91820 }
91821
91822 static void check_cpu_stalls(void)
91823diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91824index 7680fc2..b8e9161 100644
91825--- a/kernel/rcu/tree.c
91826+++ b/kernel/rcu/tree.c
91827@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
91828 */
91829 rdtp = this_cpu_ptr(&rcu_dynticks);
91830 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91831- atomic_add(2, &rdtp->dynticks); /* QS. */
91832+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91833 smp_mb__after_atomic(); /* Later stuff after QS. */
91834 break;
91835 }
91836@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
91837 rcu_prepare_for_idle();
91838 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91839 smp_mb__before_atomic(); /* See above. */
91840- atomic_inc(&rdtp->dynticks);
91841+ atomic_inc_unchecked(&rdtp->dynticks);
91842 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91843- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91844+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91845 rcu_dynticks_task_enter();
91846
91847 /*
91848@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
91849
91850 rcu_dynticks_task_exit();
91851 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91852- atomic_inc(&rdtp->dynticks);
91853+ atomic_inc_unchecked(&rdtp->dynticks);
91854 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91855 smp_mb__after_atomic(); /* See above. */
91856- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91857+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91858 rcu_cleanup_after_idle();
91859 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91860 if (!user && !is_idle_task(current)) {
91861@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
91862 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91863
91864 if (rdtp->dynticks_nmi_nesting == 0 &&
91865- (atomic_read(&rdtp->dynticks) & 0x1))
91866+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91867 return;
91868 rdtp->dynticks_nmi_nesting++;
91869 smp_mb__before_atomic(); /* Force delay from prior write. */
91870- atomic_inc(&rdtp->dynticks);
91871+ atomic_inc_unchecked(&rdtp->dynticks);
91872 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91873 smp_mb__after_atomic(); /* See above. */
91874- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91875+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91876 }
91877
91878 /**
91879@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
91880 return;
91881 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91882 smp_mb__before_atomic(); /* See above. */
91883- atomic_inc(&rdtp->dynticks);
91884+ atomic_inc_unchecked(&rdtp->dynticks);
91885 smp_mb__after_atomic(); /* Force delay to next write. */
91886- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91887+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91888 }
91889
91890 /**
91891@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
91892 */
91893 bool notrace __rcu_is_watching(void)
91894 {
91895- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91896+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91897 }
91898
91899 /**
91900@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91901 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91902 bool *isidle, unsigned long *maxj)
91903 {
91904- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91905+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91906 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91907 if ((rdp->dynticks_snap & 0x1) == 0) {
91908 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91909@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91910 int *rcrmp;
91911 unsigned int snap;
91912
91913- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91914+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91915 snap = (unsigned int)rdp->dynticks_snap;
91916
91917 /*
91918@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91919 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91920 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91921 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91922- ACCESS_ONCE(rdp->cond_resched_completed) =
91923+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
91924 ACCESS_ONCE(rdp->mynode->completed);
91925 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
91926- ACCESS_ONCE(*rcrmp) =
91927+ ACCESS_ONCE_RW(*rcrmp) =
91928 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
91929 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
91930 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
91931@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
91932 rsp->gp_start = j;
91933 smp_wmb(); /* Record start time before stall time. */
91934 j1 = rcu_jiffies_till_stall_check();
91935- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
91936+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
91937 rsp->jiffies_resched = j + j1 / 2;
91938 }
91939
91940@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
91941 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91942 return;
91943 }
91944- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91945+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91946 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91947
91948 /*
91949@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
91950
91951 raw_spin_lock_irqsave(&rnp->lock, flags);
91952 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
91953- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
91954+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
91955 3 * rcu_jiffies_till_stall_check() + 3;
91956 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91957
91958@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
91959 struct rcu_state *rsp;
91960
91961 for_each_rcu_flavor(rsp)
91962- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91963+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91964 }
91965
91966 /*
91967@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
91968 raw_spin_unlock_irq(&rnp->lock);
91969 return 0;
91970 }
91971- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91972+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91973
91974 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
91975 /*
91976@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
91977 rdp = this_cpu_ptr(rsp->rda);
91978 rcu_preempt_check_blocked_tasks(rnp);
91979 rnp->qsmask = rnp->qsmaskinit;
91980- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
91981+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
91982 WARN_ON_ONCE(rnp->completed != rsp->completed);
91983- ACCESS_ONCE(rnp->completed) = rsp->completed;
91984+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
91985 if (rnp == rdp->mynode)
91986 (void)__note_gp_changes(rsp, rnp, rdp);
91987 rcu_preempt_boost_start_gp(rnp);
91988@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
91989 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
91990 raw_spin_lock_irq(&rnp->lock);
91991 smp_mb__after_unlock_lock();
91992- ACCESS_ONCE(rsp->gp_flags) =
91993+ ACCESS_ONCE_RW(rsp->gp_flags) =
91994 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
91995 raw_spin_unlock_irq(&rnp->lock);
91996 }
91997@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91998 rcu_for_each_node_breadth_first(rsp, rnp) {
91999 raw_spin_lock_irq(&rnp->lock);
92000 smp_mb__after_unlock_lock();
92001- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92002+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92003 rdp = this_cpu_ptr(rsp->rda);
92004 if (rnp == rdp->mynode)
92005 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92006@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92007 rcu_nocb_gp_set(rnp, nocb);
92008
92009 /* Declare grace period done. */
92010- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92011+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92012 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92013 rsp->fqs_state = RCU_GP_IDLE;
92014 rdp = this_cpu_ptr(rsp->rda);
92015 /* Advance CBs to reduce false positives below. */
92016 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92017 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92018- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92019+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92020 trace_rcu_grace_period(rsp->name,
92021 ACCESS_ONCE(rsp->gpnum),
92022 TPS("newreq"));
92023@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92024 */
92025 return false;
92026 }
92027- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92028+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92029 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92030 TPS("newreq"));
92031
92032@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92033 rsp->qlen += rdp->qlen;
92034 rdp->n_cbs_orphaned += rdp->qlen;
92035 rdp->qlen_lazy = 0;
92036- ACCESS_ONCE(rdp->qlen) = 0;
92037+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92038 }
92039
92040 /*
92041@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92042 }
92043 smp_mb(); /* List handling before counting for rcu_barrier(). */
92044 rdp->qlen_lazy -= count_lazy;
92045- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92046+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92047 rdp->n_cbs_invoked += count;
92048
92049 /* Reinstate batch limit if we have worked down the excess. */
92050@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92051 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92052 return; /* Someone beat us to it. */
92053 }
92054- ACCESS_ONCE(rsp->gp_flags) =
92055+ ACCESS_ONCE_RW(rsp->gp_flags) =
92056 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
92057 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92058 rcu_gp_kthread_wake(rsp);
92059@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92060 /*
92061 * Do RCU core processing for the current CPU.
92062 */
92063-static void rcu_process_callbacks(struct softirq_action *unused)
92064+static void rcu_process_callbacks(void)
92065 {
92066 struct rcu_state *rsp;
92067
92068@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92069 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92070 if (debug_rcu_head_queue(head)) {
92071 /* Probable double call_rcu(), so leak the callback. */
92072- ACCESS_ONCE(head->func) = rcu_leak_callback;
92073+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92074 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92075 return;
92076 }
92077@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92078 local_irq_restore(flags);
92079 return;
92080 }
92081- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92082+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92083 if (lazy)
92084 rdp->qlen_lazy++;
92085 else
92086@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
92087 * counter wrap on a 32-bit system. Quite a few more CPUs would of
92088 * course be required on a 64-bit system.
92089 */
92090- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
92091+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
92092 (ulong)atomic_long_read(&rsp->expedited_done) +
92093 ULONG_MAX / 8)) {
92094 synchronize_sched();
92095- atomic_long_inc(&rsp->expedited_wrap);
92096+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92097 return;
92098 }
92099
92100@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
92101 * Take a ticket. Note that atomic_inc_return() implies a
92102 * full memory barrier.
92103 */
92104- snap = atomic_long_inc_return(&rsp->expedited_start);
92105+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92106 firstsnap = snap;
92107 if (!try_get_online_cpus()) {
92108 /* CPU hotplug operation in flight, fall back to normal GP. */
92109 wait_rcu_gp(call_rcu_sched);
92110- atomic_long_inc(&rsp->expedited_normal);
92111+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92112 return;
92113 }
92114 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92115@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
92116 for_each_cpu(cpu, cm) {
92117 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
92118
92119- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92120+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92121 cpumask_clear_cpu(cpu, cm);
92122 }
92123 if (cpumask_weight(cm) == 0)
92124@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
92125 synchronize_sched_expedited_cpu_stop,
92126 NULL) == -EAGAIN) {
92127 put_online_cpus();
92128- atomic_long_inc(&rsp->expedited_tryfail);
92129+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92130
92131 /* Check to see if someone else did our work for us. */
92132 s = atomic_long_read(&rsp->expedited_done);
92133 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92134 /* ensure test happens before caller kfree */
92135 smp_mb__before_atomic(); /* ^^^ */
92136- atomic_long_inc(&rsp->expedited_workdone1);
92137+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92138 free_cpumask_var(cm);
92139 return;
92140 }
92141@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
92142 udelay(trycount * num_online_cpus());
92143 } else {
92144 wait_rcu_gp(call_rcu_sched);
92145- atomic_long_inc(&rsp->expedited_normal);
92146+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92147 free_cpumask_var(cm);
92148 return;
92149 }
92150@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
92151 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92152 /* ensure test happens before caller kfree */
92153 smp_mb__before_atomic(); /* ^^^ */
92154- atomic_long_inc(&rsp->expedited_workdone2);
92155+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92156 free_cpumask_var(cm);
92157 return;
92158 }
92159@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
92160 if (!try_get_online_cpus()) {
92161 /* CPU hotplug operation in flight, use normal GP. */
92162 wait_rcu_gp(call_rcu_sched);
92163- atomic_long_inc(&rsp->expedited_normal);
92164+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92165 free_cpumask_var(cm);
92166 return;
92167 }
92168- snap = atomic_long_read(&rsp->expedited_start);
92169+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92170 smp_mb(); /* ensure read is before try_stop_cpus(). */
92171 }
92172- atomic_long_inc(&rsp->expedited_stoppedcpus);
92173+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92174
92175 all_cpus_idle:
92176 free_cpumask_var(cm);
92177@@ -3072,16 +3072,16 @@ all_cpus_idle:
92178 * than we did already did their update.
92179 */
92180 do {
92181- atomic_long_inc(&rsp->expedited_done_tries);
92182+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92183 s = atomic_long_read(&rsp->expedited_done);
92184 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92185 /* ensure test happens before caller kfree */
92186 smp_mb__before_atomic(); /* ^^^ */
92187- atomic_long_inc(&rsp->expedited_done_lost);
92188+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92189 break;
92190 }
92191 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92192- atomic_long_inc(&rsp->expedited_done_exit);
92193+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92194
92195 put_online_cpus();
92196 }
92197@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92198 * ACCESS_ONCE() to prevent the compiler from speculating
92199 * the increment to precede the early-exit check.
92200 */
92201- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92202+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92203 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92204 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92205 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92206@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92207
92208 /* Increment ->n_barrier_done to prevent duplicate work. */
92209 smp_mb(); /* Keep increment after above mechanism. */
92210- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92211+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92212 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92213 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92214 smp_mb(); /* Keep increment before caller's subsequent code. */
92215@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92216 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92217 init_callback_list(rdp);
92218 rdp->qlen_lazy = 0;
92219- ACCESS_ONCE(rdp->qlen) = 0;
92220+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92221 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92222 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92223- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92224+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92225 rdp->cpu = cpu;
92226 rdp->rsp = rsp;
92227 rcu_boot_init_nocb_percpu_data(rdp);
92228@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92229 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92230 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92231 rcu_sysidle_init_percpu_data(rdp->dynticks);
92232- atomic_set(&rdp->dynticks->dynticks,
92233- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92234+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92235+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92236 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92237
92238 /* Add CPU to rcu_node bitmasks. */
92239diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92240index 8e7b184..9c55768 100644
92241--- a/kernel/rcu/tree.h
92242+++ b/kernel/rcu/tree.h
92243@@ -87,11 +87,11 @@ struct rcu_dynticks {
92244 long long dynticks_nesting; /* Track irq/process nesting level. */
92245 /* Process level is worth LLONG_MAX/2. */
92246 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92247- atomic_t dynticks; /* Even value for idle, else odd. */
92248+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92249 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92250 long long dynticks_idle_nesting;
92251 /* irq/process nesting level from idle. */
92252- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92253+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92254 /* "Idle" excludes userspace execution. */
92255 unsigned long dynticks_idle_jiffies;
92256 /* End of last non-NMI non-idle period. */
92257@@ -466,17 +466,17 @@ struct rcu_state {
92258 /* _rcu_barrier(). */
92259 /* End of fields guarded by barrier_mutex. */
92260
92261- atomic_long_t expedited_start; /* Starting ticket. */
92262- atomic_long_t expedited_done; /* Done ticket. */
92263- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92264- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92265- atomic_long_t expedited_workdone1; /* # done by others #1. */
92266- atomic_long_t expedited_workdone2; /* # done by others #2. */
92267- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92268- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92269- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92270- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92271- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92272+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92273+ atomic_long_t expedited_done; /* Done ticket. */
92274+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92275+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92276+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92277+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92278+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92279+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92280+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92281+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92282+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92283
92284 unsigned long jiffies_force_qs; /* Time at which to invoke */
92285 /* force_quiescent_state(). */
92286diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92287index 3ec85cb..3687925 100644
92288--- a/kernel/rcu/tree_plugin.h
92289+++ b/kernel/rcu/tree_plugin.h
92290@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92291 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92292 {
92293 return !rcu_preempted_readers_exp(rnp) &&
92294- ACCESS_ONCE(rnp->expmask) == 0;
92295+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92296 }
92297
92298 /*
92299@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
92300
92301 /* Clean up and exit. */
92302 smp_mb(); /* ensure expedited GP seen before counter increment. */
92303- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92304+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92305 sync_rcu_preempt_exp_count + 1;
92306 unlock_mb_ret:
92307 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92308@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92309 free_cpumask_var(cm);
92310 }
92311
92312-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92313+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92314 .store = &rcu_cpu_kthread_task,
92315 .thread_should_run = rcu_cpu_kthread_should_run,
92316 .thread_fn = rcu_cpu_kthread,
92317@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92318 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92319 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92320 cpu, ticks_value, ticks_title,
92321- atomic_read(&rdtp->dynticks) & 0xfff,
92322+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92323 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92324 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92325 fast_no_hz);
92326@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92327 return;
92328 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92329 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92330- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92331+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92332 wake_up(&rdp_leader->nocb_wq);
92333 }
92334 }
92335@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92336
92337 /* Enqueue the callback on the nocb list and update counts. */
92338 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92339- ACCESS_ONCE(*old_rhpp) = rhp;
92340+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92341 atomic_long_add(rhcount, &rdp->nocb_q_count);
92342 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92343 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92344@@ -2286,7 +2286,7 @@ wait_again:
92345 continue; /* No CBs here, try next follower. */
92346
92347 /* Move callbacks to wait-for-GP list, which is empty. */
92348- ACCESS_ONCE(rdp->nocb_head) = NULL;
92349+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92350 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92351 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92352 rdp->nocb_gp_count_lazy =
92353@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
92354 list = ACCESS_ONCE(rdp->nocb_follower_head);
92355 BUG_ON(!list);
92356 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92357- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92358+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92359 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92360 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92361 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92362@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
92363 list = next;
92364 }
92365 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92366- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92367- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
92368+ ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92369+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
92370 rdp->nocb_p_count_lazy - cl;
92371 rdp->n_nocbs_invoked += c;
92372 }
92373@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92374 if (!rcu_nocb_need_deferred_wakeup(rdp))
92375 return;
92376 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92377- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92378+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92379 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92380 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92381 }
92382@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92383 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92384 "rcuo%c/%d", rsp->abbr, cpu);
92385 BUG_ON(IS_ERR(t));
92386- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
92387+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
92388 }
92389
92390 /*
92391@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
92392
92393 /* Record start of fully idle period. */
92394 j = jiffies;
92395- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92396+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92397 smp_mb__before_atomic();
92398- atomic_inc(&rdtp->dynticks_idle);
92399+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92400 smp_mb__after_atomic();
92401- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92402+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92403 }
92404
92405 /*
92406@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
92407
92408 /* Record end of idle period. */
92409 smp_mb__before_atomic();
92410- atomic_inc(&rdtp->dynticks_idle);
92411+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92412 smp_mb__after_atomic();
92413- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92414+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92415
92416 /*
92417 * If we are the timekeeping CPU, we are permitted to be non-idle
92418@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92419 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92420
92421 /* Pick up current idle and NMI-nesting counter and check. */
92422- cur = atomic_read(&rdtp->dynticks_idle);
92423+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92424 if (cur & 0x1) {
92425 *isidle = false; /* We are not idle! */
92426 return;
92427@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
92428 case RCU_SYSIDLE_NOT:
92429
92430 /* First time all are idle, so note a short idle period. */
92431- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92432+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92433 break;
92434
92435 case RCU_SYSIDLE_SHORT:
92436@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
92437 {
92438 smp_mb();
92439 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92440- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92441+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92442 }
92443
92444 /*
92445@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92446 smp_mb(); /* grace period precedes setting inuse. */
92447
92448 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92449- ACCESS_ONCE(rshp->inuse) = 0;
92450+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92451 }
92452
92453 /*
92454@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
92455 static void rcu_dynticks_task_enter(void)
92456 {
92457 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92458- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
92459+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
92460 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92461 }
92462
92463@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
92464 static void rcu_dynticks_task_exit(void)
92465 {
92466 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92467- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
92468+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
92469 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92470 }
92471diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92472index 5cdc62e..cc52e88 100644
92473--- a/kernel/rcu/tree_trace.c
92474+++ b/kernel/rcu/tree_trace.c
92475@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92476 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92477 rdp->passed_quiesce, rdp->qs_pending);
92478 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92479- atomic_read(&rdp->dynticks->dynticks),
92480+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92481 rdp->dynticks->dynticks_nesting,
92482 rdp->dynticks->dynticks_nmi_nesting,
92483 rdp->dynticks_fqs);
92484@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92485 struct rcu_state *rsp = (struct rcu_state *)m->private;
92486
92487 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92488- atomic_long_read(&rsp->expedited_start),
92489+ atomic_long_read_unchecked(&rsp->expedited_start),
92490 atomic_long_read(&rsp->expedited_done),
92491- atomic_long_read(&rsp->expedited_wrap),
92492- atomic_long_read(&rsp->expedited_tryfail),
92493- atomic_long_read(&rsp->expedited_workdone1),
92494- atomic_long_read(&rsp->expedited_workdone2),
92495- atomic_long_read(&rsp->expedited_normal),
92496- atomic_long_read(&rsp->expedited_stoppedcpus),
92497- atomic_long_read(&rsp->expedited_done_tries),
92498- atomic_long_read(&rsp->expedited_done_lost),
92499- atomic_long_read(&rsp->expedited_done_exit));
92500+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92501+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92502+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92503+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92504+ atomic_long_read_unchecked(&rsp->expedited_normal),
92505+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92506+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92507+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92508+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92509 return 0;
92510 }
92511
92512diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92513index e0d31a3..f4dafe3 100644
92514--- a/kernel/rcu/update.c
92515+++ b/kernel/rcu/update.c
92516@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
92517 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92518 */
92519 if (till_stall_check < 3) {
92520- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92521+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92522 till_stall_check = 3;
92523 } else if (till_stall_check > 300) {
92524- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92525+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92526 till_stall_check = 300;
92527 }
92528 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92529@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
92530 !ACCESS_ONCE(t->on_rq) ||
92531 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
92532 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
92533- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
92534+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
92535 list_del_init(&t->rcu_tasks_holdout_list);
92536 put_task_struct(t);
92537 return;
92538@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
92539 !is_idle_task(t)) {
92540 get_task_struct(t);
92541 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
92542- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
92543+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
92544 list_add(&t->rcu_tasks_holdout_list,
92545 &rcu_tasks_holdouts);
92546 }
92547@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
92548 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
92549 BUG_ON(IS_ERR(t));
92550 smp_mb(); /* Ensure others see full kthread. */
92551- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
92552+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
92553 mutex_unlock(&rcu_tasks_kthread_mutex);
92554 }
92555
92556diff --git a/kernel/resource.c b/kernel/resource.c
92557index 0bcebff..e7cd5b2 100644
92558--- a/kernel/resource.c
92559+++ b/kernel/resource.c
92560@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92561
92562 static int __init ioresources_init(void)
92563 {
92564+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92565+#ifdef CONFIG_GRKERNSEC_PROC_USER
92566+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92567+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92568+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92569+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92570+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92571+#endif
92572+#else
92573 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92574 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92575+#endif
92576 return 0;
92577 }
92578 __initcall(ioresources_init);
92579diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92580index 8a2e230..6020954 100644
92581--- a/kernel/sched/auto_group.c
92582+++ b/kernel/sched/auto_group.c
92583@@ -11,7 +11,7 @@
92584
92585 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92586 static struct autogroup autogroup_default;
92587-static atomic_t autogroup_seq_nr;
92588+static atomic_unchecked_t autogroup_seq_nr;
92589
92590 void __init autogroup_init(struct task_struct *init_task)
92591 {
92592@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92593
92594 kref_init(&ag->kref);
92595 init_rwsem(&ag->lock);
92596- ag->id = atomic_inc_return(&autogroup_seq_nr);
92597+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92598 ag->tg = tg;
92599 #ifdef CONFIG_RT_GROUP_SCHED
92600 /*
92601diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92602index 607f852..486bc87 100644
92603--- a/kernel/sched/completion.c
92604+++ b/kernel/sched/completion.c
92605@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92606 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92607 * or number of jiffies left till timeout) if completed.
92608 */
92609-long __sched
92610+long __sched __intentional_overflow(-1)
92611 wait_for_completion_interruptible_timeout(struct completion *x,
92612 unsigned long timeout)
92613 {
92614@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92615 *
92616 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92617 */
92618-int __sched wait_for_completion_killable(struct completion *x)
92619+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92620 {
92621 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92622 if (t == -ERESTARTSYS)
92623@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92624 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92625 * or number of jiffies left till timeout) if completed.
92626 */
92627-long __sched
92628+long __sched __intentional_overflow(-1)
92629 wait_for_completion_killable_timeout(struct completion *x,
92630 unsigned long timeout)
92631 {
92632diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92633index 5eab11d..537f3b6 100644
92634--- a/kernel/sched/core.c
92635+++ b/kernel/sched/core.c
92636@@ -1897,7 +1897,7 @@ void set_numabalancing_state(bool enabled)
92637 int sysctl_numa_balancing(struct ctl_table *table, int write,
92638 void __user *buffer, size_t *lenp, loff_t *ppos)
92639 {
92640- struct ctl_table t;
92641+ ctl_table_no_const t;
92642 int err;
92643 int state = numabalancing_enabled;
92644
92645@@ -2347,8 +2347,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92646 next->active_mm = oldmm;
92647 atomic_inc(&oldmm->mm_count);
92648 enter_lazy_tlb(oldmm, next);
92649- } else
92650+ } else {
92651 switch_mm(oldmm, mm, next);
92652+ populate_stack();
92653+ }
92654
92655 if (!prev->mm) {
92656 prev->active_mm = NULL;
92657@@ -3147,6 +3149,8 @@ int can_nice(const struct task_struct *p, const int nice)
92658 /* convert nice value [19,-20] to rlimit style value [1,40] */
92659 int nice_rlim = nice_to_rlimit(nice);
92660
92661+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92662+
92663 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92664 capable(CAP_SYS_NICE));
92665 }
92666@@ -3173,7 +3177,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92667 nice = task_nice(current) + increment;
92668
92669 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92670- if (increment < 0 && !can_nice(current, nice))
92671+ if (increment < 0 && (!can_nice(current, nice) ||
92672+ gr_handle_chroot_nice()))
92673 return -EPERM;
92674
92675 retval = security_task_setnice(current, nice);
92676@@ -3468,6 +3473,7 @@ recheck:
92677 if (policy != p->policy && !rlim_rtprio)
92678 return -EPERM;
92679
92680+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92681 /* can't increase priority */
92682 if (attr->sched_priority > p->rt_priority &&
92683 attr->sched_priority > rlim_rtprio)
92684@@ -4968,6 +4974,7 @@ void idle_task_exit(void)
92685
92686 if (mm != &init_mm) {
92687 switch_mm(mm, &init_mm, current);
92688+ populate_stack();
92689 finish_arch_post_lock_switch();
92690 }
92691 mmdrop(mm);
92692@@ -5063,7 +5070,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92693
92694 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92695
92696-static struct ctl_table sd_ctl_dir[] = {
92697+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92698 {
92699 .procname = "sched_domain",
92700 .mode = 0555,
92701@@ -5080,17 +5087,17 @@ static struct ctl_table sd_ctl_root[] = {
92702 {}
92703 };
92704
92705-static struct ctl_table *sd_alloc_ctl_entry(int n)
92706+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92707 {
92708- struct ctl_table *entry =
92709+ ctl_table_no_const *entry =
92710 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92711
92712 return entry;
92713 }
92714
92715-static void sd_free_ctl_entry(struct ctl_table **tablep)
92716+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92717 {
92718- struct ctl_table *entry;
92719+ ctl_table_no_const *entry;
92720
92721 /*
92722 * In the intermediate directories, both the child directory and
92723@@ -5098,22 +5105,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92724 * will always be set. In the lowest directory the names are
92725 * static strings and all have proc handlers.
92726 */
92727- for (entry = *tablep; entry->mode; entry++) {
92728- if (entry->child)
92729- sd_free_ctl_entry(&entry->child);
92730+ for (entry = tablep; entry->mode; entry++) {
92731+ if (entry->child) {
92732+ sd_free_ctl_entry(entry->child);
92733+ pax_open_kernel();
92734+ entry->child = NULL;
92735+ pax_close_kernel();
92736+ }
92737 if (entry->proc_handler == NULL)
92738 kfree(entry->procname);
92739 }
92740
92741- kfree(*tablep);
92742- *tablep = NULL;
92743+ kfree(tablep);
92744 }
92745
92746 static int min_load_idx = 0;
92747 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92748
92749 static void
92750-set_table_entry(struct ctl_table *entry,
92751+set_table_entry(ctl_table_no_const *entry,
92752 const char *procname, void *data, int maxlen,
92753 umode_t mode, proc_handler *proc_handler,
92754 bool load_idx)
92755@@ -5133,7 +5143,7 @@ set_table_entry(struct ctl_table *entry,
92756 static struct ctl_table *
92757 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92758 {
92759- struct ctl_table *table = sd_alloc_ctl_entry(14);
92760+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92761
92762 if (table == NULL)
92763 return NULL;
92764@@ -5171,9 +5181,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92765 return table;
92766 }
92767
92768-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92769+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92770 {
92771- struct ctl_table *entry, *table;
92772+ ctl_table_no_const *entry, *table;
92773 struct sched_domain *sd;
92774 int domain_num = 0, i;
92775 char buf[32];
92776@@ -5200,11 +5210,13 @@ static struct ctl_table_header *sd_sysctl_header;
92777 static void register_sched_domain_sysctl(void)
92778 {
92779 int i, cpu_num = num_possible_cpus();
92780- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92781+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92782 char buf[32];
92783
92784 WARN_ON(sd_ctl_dir[0].child);
92785+ pax_open_kernel();
92786 sd_ctl_dir[0].child = entry;
92787+ pax_close_kernel();
92788
92789 if (entry == NULL)
92790 return;
92791@@ -5227,8 +5239,12 @@ static void unregister_sched_domain_sysctl(void)
92792 if (sd_sysctl_header)
92793 unregister_sysctl_table(sd_sysctl_header);
92794 sd_sysctl_header = NULL;
92795- if (sd_ctl_dir[0].child)
92796- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92797+ if (sd_ctl_dir[0].child) {
92798+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92799+ pax_open_kernel();
92800+ sd_ctl_dir[0].child = NULL;
92801+ pax_close_kernel();
92802+ }
92803 }
92804 #else
92805 static void register_sched_domain_sysctl(void)
92806diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92807index fe331fc..29d620e 100644
92808--- a/kernel/sched/fair.c
92809+++ b/kernel/sched/fair.c
92810@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92811
92812 static void reset_ptenuma_scan(struct task_struct *p)
92813 {
92814- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92815+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92816 p->mm->numa_scan_offset = 0;
92817 }
92818
92819@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92820 * run_rebalance_domains is triggered when needed from the scheduler tick.
92821 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92822 */
92823-static void run_rebalance_domains(struct softirq_action *h)
92824+static __latent_entropy void run_rebalance_domains(void)
92825 {
92826 struct rq *this_rq = this_rq();
92827 enum cpu_idle_type idle = this_rq->idle_balance ?
92828diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92829index 9a2a45c..bb91ace 100644
92830--- a/kernel/sched/sched.h
92831+++ b/kernel/sched/sched.h
92832@@ -1182,7 +1182,7 @@ struct sched_class {
92833 #ifdef CONFIG_FAIR_GROUP_SCHED
92834 void (*task_move_group) (struct task_struct *p, int on_rq);
92835 #endif
92836-};
92837+} __do_const;
92838
92839 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92840 {
92841diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92842index 4ef9687..4f44028 100644
92843--- a/kernel/seccomp.c
92844+++ b/kernel/seccomp.c
92845@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
92846
92847 switch (action) {
92848 case SECCOMP_RET_ERRNO:
92849- /* Set the low-order 16-bits as a errno. */
92850+ /* Set low-order bits as an errno, capped at MAX_ERRNO. */
92851+ if (data > MAX_ERRNO)
92852+ data = MAX_ERRNO;
92853 syscall_set_return_value(current, task_pt_regs(current),
92854 -data, 0);
92855 goto skip;
92856diff --git a/kernel/signal.c b/kernel/signal.c
92857index 16a30529..25ad033 100644
92858--- a/kernel/signal.c
92859+++ b/kernel/signal.c
92860@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92861
92862 int print_fatal_signals __read_mostly;
92863
92864-static void __user *sig_handler(struct task_struct *t, int sig)
92865+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92866 {
92867 return t->sighand->action[sig - 1].sa.sa_handler;
92868 }
92869
92870-static int sig_handler_ignored(void __user *handler, int sig)
92871+static int sig_handler_ignored(__sighandler_t handler, int sig)
92872 {
92873 /* Is it explicitly or implicitly ignored? */
92874 return handler == SIG_IGN ||
92875@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92876
92877 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92878 {
92879- void __user *handler;
92880+ __sighandler_t handler;
92881
92882 handler = sig_handler(t, sig);
92883
92884@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92885 atomic_inc(&user->sigpending);
92886 rcu_read_unlock();
92887
92888+ if (!override_rlimit)
92889+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92890+
92891 if (override_rlimit ||
92892 atomic_read(&user->sigpending) <=
92893 task_rlimit(t, RLIMIT_SIGPENDING)) {
92894@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92895
92896 int unhandled_signal(struct task_struct *tsk, int sig)
92897 {
92898- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92899+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92900 if (is_global_init(tsk))
92901 return 1;
92902 if (handler != SIG_IGN && handler != SIG_DFL)
92903@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92904 }
92905 }
92906
92907+ /* allow glibc communication via tgkill to other threads in our
92908+ thread group */
92909+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92910+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92911+ && gr_handle_signal(t, sig))
92912+ return -EPERM;
92913+
92914 return security_task_kill(t, info, sig, 0);
92915 }
92916
92917@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92918 return send_signal(sig, info, p, 1);
92919 }
92920
92921-static int
92922+int
92923 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92924 {
92925 return send_signal(sig, info, t, 0);
92926@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92927 unsigned long int flags;
92928 int ret, blocked, ignored;
92929 struct k_sigaction *action;
92930+ int is_unhandled = 0;
92931
92932 spin_lock_irqsave(&t->sighand->siglock, flags);
92933 action = &t->sighand->action[sig-1];
92934@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92935 }
92936 if (action->sa.sa_handler == SIG_DFL)
92937 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92938+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92939+ is_unhandled = 1;
92940 ret = specific_send_sig_info(sig, info, t);
92941 spin_unlock_irqrestore(&t->sighand->siglock, flags);
92942
92943+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
92944+ normal operation */
92945+ if (is_unhandled) {
92946+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
92947+ gr_handle_crash(t, sig);
92948+ }
92949+
92950 return ret;
92951 }
92952
92953@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92954 ret = check_kill_permission(sig, info, p);
92955 rcu_read_unlock();
92956
92957- if (!ret && sig)
92958+ if (!ret && sig) {
92959 ret = do_send_sig_info(sig, info, p, true);
92960+ if (!ret)
92961+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
92962+ }
92963
92964 return ret;
92965 }
92966@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
92967 int error = -ESRCH;
92968
92969 rcu_read_lock();
92970- p = find_task_by_vpid(pid);
92971+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
92972+ /* allow glibc communication via tgkill to other threads in our
92973+ thread group */
92974+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
92975+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
92976+ p = find_task_by_vpid_unrestricted(pid);
92977+ else
92978+#endif
92979+ p = find_task_by_vpid(pid);
92980 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
92981 error = check_kill_permission(sig, info, p);
92982 /*
92983@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
92984 }
92985 seg = get_fs();
92986 set_fs(KERNEL_DS);
92987- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
92988- (stack_t __force __user *) &uoss,
92989+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
92990+ (stack_t __force_user *) &uoss,
92991 compat_user_stack_pointer());
92992 set_fs(seg);
92993 if (ret >= 0 && uoss_ptr) {
92994diff --git a/kernel/smpboot.c b/kernel/smpboot.c
92995index 40190f2..8861d40 100644
92996--- a/kernel/smpboot.c
92997+++ b/kernel/smpboot.c
92998@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
92999 }
93000 smpboot_unpark_thread(plug_thread, cpu);
93001 }
93002- list_add(&plug_thread->list, &hotplug_threads);
93003+ pax_list_add(&plug_thread->list, &hotplug_threads);
93004 out:
93005 mutex_unlock(&smpboot_threads_lock);
93006 put_online_cpus();
93007@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93008 {
93009 get_online_cpus();
93010 mutex_lock(&smpboot_threads_lock);
93011- list_del(&plug_thread->list);
93012+ pax_list_del(&plug_thread->list);
93013 smpboot_destroy_threads(plug_thread);
93014 mutex_unlock(&smpboot_threads_lock);
93015 put_online_cpus();
93016diff --git a/kernel/softirq.c b/kernel/softirq.c
93017index c497fcd..e8f90a9 100644
93018--- a/kernel/softirq.c
93019+++ b/kernel/softirq.c
93020@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93021 EXPORT_SYMBOL(irq_stat);
93022 #endif
93023
93024-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93025+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93026
93027 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93028
93029@@ -266,7 +266,7 @@ restart:
93030 kstat_incr_softirqs_this_cpu(vec_nr);
93031
93032 trace_softirq_entry(vec_nr);
93033- h->action(h);
93034+ h->action();
93035 trace_softirq_exit(vec_nr);
93036 if (unlikely(prev_count != preempt_count())) {
93037 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93038@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93039 or_softirq_pending(1UL << nr);
93040 }
93041
93042-void open_softirq(int nr, void (*action)(struct softirq_action *))
93043+void __init open_softirq(int nr, void (*action)(void))
93044 {
93045 softirq_vec[nr].action = action;
93046 }
93047@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93048 }
93049 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93050
93051-static void tasklet_action(struct softirq_action *a)
93052+static void tasklet_action(void)
93053 {
93054 struct tasklet_struct *list;
93055
93056@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
93057 }
93058 }
93059
93060-static void tasklet_hi_action(struct softirq_action *a)
93061+static __latent_entropy void tasklet_hi_action(void)
93062 {
93063 struct tasklet_struct *list;
93064
93065@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
93066 .notifier_call = cpu_callback
93067 };
93068
93069-static struct smp_hotplug_thread softirq_threads = {
93070+static struct smp_hotplug_thread softirq_threads __read_only = {
93071 .store = &ksoftirqd,
93072 .thread_should_run = ksoftirqd_should_run,
93073 .thread_fn = run_ksoftirqd,
93074diff --git a/kernel/sys.c b/kernel/sys.c
93075index ea9c881..2194af5 100644
93076--- a/kernel/sys.c
93077+++ b/kernel/sys.c
93078@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93079 error = -EACCES;
93080 goto out;
93081 }
93082+
93083+ if (gr_handle_chroot_setpriority(p, niceval)) {
93084+ error = -EACCES;
93085+ goto out;
93086+ }
93087+
93088 no_nice = security_task_setnice(p, niceval);
93089 if (no_nice) {
93090 error = no_nice;
93091@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93092 goto error;
93093 }
93094
93095+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93096+ goto error;
93097+
93098+ if (!gid_eq(new->gid, old->gid)) {
93099+ /* make sure we generate a learn log for what will
93100+ end up being a role transition after a full-learning
93101+ policy is generated
93102+ CAP_SETGID is required to perform a transition
93103+ we may not log a CAP_SETGID check above, e.g.
93104+ in the case where new rgid = old egid
93105+ */
93106+ gr_learn_cap(current, new, CAP_SETGID);
93107+ }
93108+
93109 if (rgid != (gid_t) -1 ||
93110 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93111 new->sgid = new->egid;
93112@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93113 old = current_cred();
93114
93115 retval = -EPERM;
93116+
93117+ if (gr_check_group_change(kgid, kgid, kgid))
93118+ goto error;
93119+
93120 if (ns_capable(old->user_ns, CAP_SETGID))
93121 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93122 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93123@@ -411,7 +435,7 @@ error:
93124 /*
93125 * change the user struct in a credentials set to match the new UID
93126 */
93127-static int set_user(struct cred *new)
93128+int set_user(struct cred *new)
93129 {
93130 struct user_struct *new_user;
93131
93132@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93133 goto error;
93134 }
93135
93136+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93137+ goto error;
93138+
93139 if (!uid_eq(new->uid, old->uid)) {
93140+ /* make sure we generate a learn log for what will
93141+ end up being a role transition after a full-learning
93142+ policy is generated
93143+ CAP_SETUID is required to perform a transition
93144+ we may not log a CAP_SETUID check above, e.g.
93145+ in the case where new ruid = old euid
93146+ */
93147+ gr_learn_cap(current, new, CAP_SETUID);
93148 retval = set_user(new);
93149 if (retval < 0)
93150 goto error;
93151@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93152 old = current_cred();
93153
93154 retval = -EPERM;
93155+
93156+ if (gr_check_crash_uid(kuid))
93157+ goto error;
93158+ if (gr_check_user_change(kuid, kuid, kuid))
93159+ goto error;
93160+
93161 if (ns_capable(old->user_ns, CAP_SETUID)) {
93162 new->suid = new->uid = kuid;
93163 if (!uid_eq(kuid, old->uid)) {
93164@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93165 goto error;
93166 }
93167
93168+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93169+ goto error;
93170+
93171 if (ruid != (uid_t) -1) {
93172 new->uid = kruid;
93173 if (!uid_eq(kruid, old->uid)) {
93174@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93175 goto error;
93176 }
93177
93178+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93179+ goto error;
93180+
93181 if (rgid != (gid_t) -1)
93182 new->gid = krgid;
93183 if (egid != (gid_t) -1)
93184@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93185 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93186 ns_capable(old->user_ns, CAP_SETUID)) {
93187 if (!uid_eq(kuid, old->fsuid)) {
93188+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93189+ goto error;
93190+
93191 new->fsuid = kuid;
93192 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93193 goto change_okay;
93194 }
93195 }
93196
93197+error:
93198 abort_creds(new);
93199 return old_fsuid;
93200
93201@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93202 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93203 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93204 ns_capable(old->user_ns, CAP_SETGID)) {
93205+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93206+ goto error;
93207+
93208 if (!gid_eq(kgid, old->fsgid)) {
93209 new->fsgid = kgid;
93210 goto change_okay;
93211 }
93212 }
93213
93214+error:
93215 abort_creds(new);
93216 return old_fsgid;
93217
93218@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93219 return -EFAULT;
93220
93221 down_read(&uts_sem);
93222- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93223+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93224 __OLD_UTS_LEN);
93225 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93226- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93227+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93228 __OLD_UTS_LEN);
93229 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93230- error |= __copy_to_user(&name->release, &utsname()->release,
93231+ error |= __copy_to_user(name->release, &utsname()->release,
93232 __OLD_UTS_LEN);
93233 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93234- error |= __copy_to_user(&name->version, &utsname()->version,
93235+ error |= __copy_to_user(name->version, &utsname()->version,
93236 __OLD_UTS_LEN);
93237 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93238- error |= __copy_to_user(&name->machine, &utsname()->machine,
93239+ error |= __copy_to_user(name->machine, &utsname()->machine,
93240 __OLD_UTS_LEN);
93241 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93242 up_read(&uts_sem);
93243@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93244 */
93245 new_rlim->rlim_cur = 1;
93246 }
93247+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93248+ is changed to a lower value. Since tasks can be created by the same
93249+ user in between this limit change and an execve by this task, force
93250+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93251+ */
93252+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93253+ tsk->flags |= PF_NPROC_EXCEEDED;
93254 }
93255 if (!retval) {
93256 if (old_rlim)
93257diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93258index 137c7f6..eab3b1a 100644
93259--- a/kernel/sysctl.c
93260+++ b/kernel/sysctl.c
93261@@ -94,7 +94,6 @@
93262
93263
93264 #if defined(CONFIG_SYSCTL)
93265-
93266 /* External variables not in a header file. */
93267 extern int max_threads;
93268 extern int suid_dumpable;
93269@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93270
93271 /* Constants used for minimum and maximum */
93272 #ifdef CONFIG_LOCKUP_DETECTOR
93273-static int sixty = 60;
93274+static int sixty __read_only = 60;
93275 #endif
93276
93277-static int __maybe_unused neg_one = -1;
93278+static int __maybe_unused neg_one __read_only = -1;
93279
93280-static int zero;
93281-static int __maybe_unused one = 1;
93282-static int __maybe_unused two = 2;
93283-static int __maybe_unused four = 4;
93284-static unsigned long one_ul = 1;
93285-static int one_hundred = 100;
93286+static int zero __read_only = 0;
93287+static int __maybe_unused one __read_only = 1;
93288+static int __maybe_unused two __read_only = 2;
93289+static int __maybe_unused three __read_only = 3;
93290+static int __maybe_unused four __read_only = 4;
93291+static unsigned long one_ul __read_only = 1;
93292+static int one_hundred __read_only = 100;
93293 #ifdef CONFIG_PRINTK
93294-static int ten_thousand = 10000;
93295+static int ten_thousand __read_only = 10000;
93296 #endif
93297
93298 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93299@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93300 void __user *buffer, size_t *lenp, loff_t *ppos);
93301 #endif
93302
93303-#ifdef CONFIG_PRINTK
93304 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93305 void __user *buffer, size_t *lenp, loff_t *ppos);
93306-#endif
93307
93308 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93309 void __user *buffer, size_t *lenp, loff_t *ppos);
93310@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93311
93312 #endif
93313
93314+extern struct ctl_table grsecurity_table[];
93315+
93316 static struct ctl_table kern_table[];
93317 static struct ctl_table vm_table[];
93318 static struct ctl_table fs_table[];
93319@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93320 int sysctl_legacy_va_layout;
93321 #endif
93322
93323+#ifdef CONFIG_PAX_SOFTMODE
93324+static struct ctl_table pax_table[] = {
93325+ {
93326+ .procname = "softmode",
93327+ .data = &pax_softmode,
93328+ .maxlen = sizeof(unsigned int),
93329+ .mode = 0600,
93330+ .proc_handler = &proc_dointvec,
93331+ },
93332+
93333+ { }
93334+};
93335+#endif
93336+
93337 /* The default sysctl tables: */
93338
93339 static struct ctl_table sysctl_base_table[] = {
93340@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93341 #endif
93342
93343 static struct ctl_table kern_table[] = {
93344+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93345+ {
93346+ .procname = "grsecurity",
93347+ .mode = 0500,
93348+ .child = grsecurity_table,
93349+ },
93350+#endif
93351+
93352+#ifdef CONFIG_PAX_SOFTMODE
93353+ {
93354+ .procname = "pax",
93355+ .mode = 0500,
93356+ .child = pax_table,
93357+ },
93358+#endif
93359+
93360 {
93361 .procname = "sched_child_runs_first",
93362 .data = &sysctl_sched_child_runs_first,
93363@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93364 .data = &modprobe_path,
93365 .maxlen = KMOD_PATH_LEN,
93366 .mode = 0644,
93367- .proc_handler = proc_dostring,
93368+ .proc_handler = proc_dostring_modpriv,
93369 },
93370 {
93371 .procname = "modules_disabled",
93372@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93373 .extra1 = &zero,
93374 .extra2 = &one,
93375 },
93376+#endif
93377 {
93378 .procname = "kptr_restrict",
93379 .data = &kptr_restrict,
93380 .maxlen = sizeof(int),
93381 .mode = 0644,
93382 .proc_handler = proc_dointvec_minmax_sysadmin,
93383+#ifdef CONFIG_GRKERNSEC_HIDESYM
93384+ .extra1 = &two,
93385+#else
93386 .extra1 = &zero,
93387+#endif
93388 .extra2 = &two,
93389 },
93390-#endif
93391 {
93392 .procname = "ngroups_max",
93393 .data = &ngroups_max,
93394@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93395 */
93396 {
93397 .procname = "perf_event_paranoid",
93398- .data = &sysctl_perf_event_paranoid,
93399- .maxlen = sizeof(sysctl_perf_event_paranoid),
93400+ .data = &sysctl_perf_event_legitimately_concerned,
93401+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93402 .mode = 0644,
93403- .proc_handler = proc_dointvec,
93404+ /* go ahead, be a hero */
93405+ .proc_handler = proc_dointvec_minmax_sysadmin,
93406+ .extra1 = &neg_one,
93407+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93408+ .extra2 = &three,
93409+#else
93410+ .extra2 = &two,
93411+#endif
93412 },
93413 {
93414 .procname = "perf_event_mlock_kb",
93415@@ -1343,6 +1384,13 @@ static struct ctl_table vm_table[] = {
93416 .proc_handler = proc_dointvec_minmax,
93417 .extra1 = &zero,
93418 },
93419+ {
93420+ .procname = "heap_stack_gap",
93421+ .data = &sysctl_heap_stack_gap,
93422+ .maxlen = sizeof(sysctl_heap_stack_gap),
93423+ .mode = 0644,
93424+ .proc_handler = proc_doulongvec_minmax,
93425+ },
93426 #else
93427 {
93428 .procname = "nr_trim_pages",
93429@@ -1825,6 +1873,16 @@ int proc_dostring(struct ctl_table *table, int write,
93430 (char __user *)buffer, lenp, ppos);
93431 }
93432
93433+int proc_dostring_modpriv(struct ctl_table *table, int write,
93434+ void __user *buffer, size_t *lenp, loff_t *ppos)
93435+{
93436+ if (write && !capable(CAP_SYS_MODULE))
93437+ return -EPERM;
93438+
93439+ return _proc_do_string(table->data, table->maxlen, write,
93440+ buffer, lenp, ppos);
93441+}
93442+
93443 static size_t proc_skip_spaces(char **buf)
93444 {
93445 size_t ret;
93446@@ -1930,6 +1988,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93447 len = strlen(tmp);
93448 if (len > *size)
93449 len = *size;
93450+ if (len > sizeof(tmp))
93451+ len = sizeof(tmp);
93452 if (copy_to_user(*buf, tmp, len))
93453 return -EFAULT;
93454 *size -= len;
93455@@ -2107,7 +2167,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93456 static int proc_taint(struct ctl_table *table, int write,
93457 void __user *buffer, size_t *lenp, loff_t *ppos)
93458 {
93459- struct ctl_table t;
93460+ ctl_table_no_const t;
93461 unsigned long tmptaint = get_taint();
93462 int err;
93463
93464@@ -2135,7 +2195,6 @@ static int proc_taint(struct ctl_table *table, int write,
93465 return err;
93466 }
93467
93468-#ifdef CONFIG_PRINTK
93469 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93470 void __user *buffer, size_t *lenp, loff_t *ppos)
93471 {
93472@@ -2144,7 +2203,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93473
93474 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93475 }
93476-#endif
93477
93478 struct do_proc_dointvec_minmax_conv_param {
93479 int *min;
93480@@ -2704,6 +2762,12 @@ int proc_dostring(struct ctl_table *table, int write,
93481 return -ENOSYS;
93482 }
93483
93484+int proc_dostring_modpriv(struct ctl_table *table, int write,
93485+ void __user *buffer, size_t *lenp, loff_t *ppos)
93486+{
93487+ return -ENOSYS;
93488+}
93489+
93490 int proc_dointvec(struct ctl_table *table, int write,
93491 void __user *buffer, size_t *lenp, loff_t *ppos)
93492 {
93493@@ -2760,5 +2824,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93494 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93495 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93496 EXPORT_SYMBOL(proc_dostring);
93497+EXPORT_SYMBOL(proc_dostring_modpriv);
93498 EXPORT_SYMBOL(proc_doulongvec_minmax);
93499 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93500diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93501index 670fff8..a247812 100644
93502--- a/kernel/taskstats.c
93503+++ b/kernel/taskstats.c
93504@@ -28,9 +28,12 @@
93505 #include <linux/fs.h>
93506 #include <linux/file.h>
93507 #include <linux/pid_namespace.h>
93508+#include <linux/grsecurity.h>
93509 #include <net/genetlink.h>
93510 #include <linux/atomic.h>
93511
93512+extern int gr_is_taskstats_denied(int pid);
93513+
93514 /*
93515 * Maximum length of a cpumask that can be specified in
93516 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93517@@ -576,6 +579,9 @@ err:
93518
93519 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93520 {
93521+ if (gr_is_taskstats_denied(current->pid))
93522+ return -EACCES;
93523+
93524 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93525 return cmd_attr_register_cpumask(info);
93526 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93527diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93528index a7077d3..dd48a49 100644
93529--- a/kernel/time/alarmtimer.c
93530+++ b/kernel/time/alarmtimer.c
93531@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93532 struct platform_device *pdev;
93533 int error = 0;
93534 int i;
93535- struct k_clock alarm_clock = {
93536+ static struct k_clock alarm_clock = {
93537 .clock_getres = alarm_clock_getres,
93538 .clock_get = alarm_clock_get,
93539 .timer_create = alarm_timer_create,
93540diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93541index d8c724c..6b331a4 100644
93542--- a/kernel/time/hrtimer.c
93543+++ b/kernel/time/hrtimer.c
93544@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93545 local_irq_restore(flags);
93546 }
93547
93548-static void run_hrtimer_softirq(struct softirq_action *h)
93549+static __latent_entropy void run_hrtimer_softirq(void)
93550 {
93551 hrtimer_peek_ahead_timers();
93552 }
93553diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93554index a16b678..8c5bd9d 100644
93555--- a/kernel/time/posix-cpu-timers.c
93556+++ b/kernel/time/posix-cpu-timers.c
93557@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
93558
93559 static __init int init_posix_cpu_timers(void)
93560 {
93561- struct k_clock process = {
93562+ static struct k_clock process = {
93563 .clock_getres = process_cpu_clock_getres,
93564 .clock_get = process_cpu_clock_get,
93565 .timer_create = process_cpu_timer_create,
93566 .nsleep = process_cpu_nsleep,
93567 .nsleep_restart = process_cpu_nsleep_restart,
93568 };
93569- struct k_clock thread = {
93570+ static struct k_clock thread = {
93571 .clock_getres = thread_cpu_clock_getres,
93572 .clock_get = thread_cpu_clock_get,
93573 .timer_create = thread_cpu_timer_create,
93574diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93575index 31ea01f..7fc61ef 100644
93576--- a/kernel/time/posix-timers.c
93577+++ b/kernel/time/posix-timers.c
93578@@ -43,6 +43,7 @@
93579 #include <linux/hash.h>
93580 #include <linux/posix-clock.h>
93581 #include <linux/posix-timers.h>
93582+#include <linux/grsecurity.h>
93583 #include <linux/syscalls.h>
93584 #include <linux/wait.h>
93585 #include <linux/workqueue.h>
93586@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93587 * which we beg off on and pass to do_sys_settimeofday().
93588 */
93589
93590-static struct k_clock posix_clocks[MAX_CLOCKS];
93591+static struct k_clock *posix_clocks[MAX_CLOCKS];
93592
93593 /*
93594 * These ones are defined below.
93595@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93596 */
93597 static __init int init_posix_timers(void)
93598 {
93599- struct k_clock clock_realtime = {
93600+ static struct k_clock clock_realtime = {
93601 .clock_getres = hrtimer_get_res,
93602 .clock_get = posix_clock_realtime_get,
93603 .clock_set = posix_clock_realtime_set,
93604@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93605 .timer_get = common_timer_get,
93606 .timer_del = common_timer_del,
93607 };
93608- struct k_clock clock_monotonic = {
93609+ static struct k_clock clock_monotonic = {
93610 .clock_getres = hrtimer_get_res,
93611 .clock_get = posix_ktime_get_ts,
93612 .nsleep = common_nsleep,
93613@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93614 .timer_get = common_timer_get,
93615 .timer_del = common_timer_del,
93616 };
93617- struct k_clock clock_monotonic_raw = {
93618+ static struct k_clock clock_monotonic_raw = {
93619 .clock_getres = hrtimer_get_res,
93620 .clock_get = posix_get_monotonic_raw,
93621 };
93622- struct k_clock clock_realtime_coarse = {
93623+ static struct k_clock clock_realtime_coarse = {
93624 .clock_getres = posix_get_coarse_res,
93625 .clock_get = posix_get_realtime_coarse,
93626 };
93627- struct k_clock clock_monotonic_coarse = {
93628+ static struct k_clock clock_monotonic_coarse = {
93629 .clock_getres = posix_get_coarse_res,
93630 .clock_get = posix_get_monotonic_coarse,
93631 };
93632- struct k_clock clock_tai = {
93633+ static struct k_clock clock_tai = {
93634 .clock_getres = hrtimer_get_res,
93635 .clock_get = posix_get_tai,
93636 .nsleep = common_nsleep,
93637@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93638 .timer_get = common_timer_get,
93639 .timer_del = common_timer_del,
93640 };
93641- struct k_clock clock_boottime = {
93642+ static struct k_clock clock_boottime = {
93643 .clock_getres = hrtimer_get_res,
93644 .clock_get = posix_get_boottime,
93645 .nsleep = common_nsleep,
93646@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93647 return;
93648 }
93649
93650- posix_clocks[clock_id] = *new_clock;
93651+ posix_clocks[clock_id] = new_clock;
93652 }
93653 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93654
93655@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93656 return (id & CLOCKFD_MASK) == CLOCKFD ?
93657 &clock_posix_dynamic : &clock_posix_cpu;
93658
93659- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93660+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93661 return NULL;
93662- return &posix_clocks[id];
93663+ return posix_clocks[id];
93664 }
93665
93666 static int common_timer_create(struct k_itimer *new_timer)
93667@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93668 struct k_clock *kc = clockid_to_kclock(which_clock);
93669 struct k_itimer *new_timer;
93670 int error, new_timer_id;
93671- sigevent_t event;
93672+ sigevent_t event = { };
93673 int it_id_set = IT_ID_NOT_SET;
93674
93675 if (!kc)
93676@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93677 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93678 return -EFAULT;
93679
93680+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93681+ have their clock_set fptr set to a nosettime dummy function
93682+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93683+ call common_clock_set, which calls do_sys_settimeofday, which
93684+ we hook
93685+ */
93686+
93687 return kc->clock_set(which_clock, &new_tp);
93688 }
93689
93690diff --git a/kernel/time/time.c b/kernel/time/time.c
93691index 2c85b77..6530536 100644
93692--- a/kernel/time/time.c
93693+++ b/kernel/time/time.c
93694@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93695 return error;
93696
93697 if (tz) {
93698+ /* we log in do_settimeofday called below, so don't log twice
93699+ */
93700+ if (!tv)
93701+ gr_log_timechange();
93702+
93703 sys_tz = *tz;
93704 update_vsyscall_tz();
93705 if (firsttime) {
93706diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93707index 6a93185..288c331 100644
93708--- a/kernel/time/timekeeping.c
93709+++ b/kernel/time/timekeeping.c
93710@@ -15,6 +15,7 @@
93711 #include <linux/init.h>
93712 #include <linux/mm.h>
93713 #include <linux/sched.h>
93714+#include <linux/grsecurity.h>
93715 #include <linux/syscore_ops.h>
93716 #include <linux/clocksource.h>
93717 #include <linux/jiffies.h>
93718@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
93719 if (!timespec64_valid_strict(ts))
93720 return -EINVAL;
93721
93722+ gr_log_timechange();
93723+
93724 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93725 write_seqcount_begin(&tk_core.seq);
93726
93727diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93728index 2d3f5c5..7ed7dc5 100644
93729--- a/kernel/time/timer.c
93730+++ b/kernel/time/timer.c
93731@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
93732 /*
93733 * This function runs timers and the timer-tq in bottom half context.
93734 */
93735-static void run_timer_softirq(struct softirq_action *h)
93736+static __latent_entropy void run_timer_softirq(void)
93737 {
93738 struct tvec_base *base = __this_cpu_read(tvec_bases);
93739
93740@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
93741 *
93742 * In all cases the return value is guaranteed to be non-negative.
93743 */
93744-signed long __sched schedule_timeout(signed long timeout)
93745+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93746 {
93747 struct timer_list timer;
93748 unsigned long expire;
93749diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93750index 61ed862..3b52c65 100644
93751--- a/kernel/time/timer_list.c
93752+++ b/kernel/time/timer_list.c
93753@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93754
93755 static void print_name_offset(struct seq_file *m, void *sym)
93756 {
93757+#ifdef CONFIG_GRKERNSEC_HIDESYM
93758+ SEQ_printf(m, "<%p>", NULL);
93759+#else
93760 char symname[KSYM_NAME_LEN];
93761
93762 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93763 SEQ_printf(m, "<%pK>", sym);
93764 else
93765 SEQ_printf(m, "%s", symname);
93766+#endif
93767 }
93768
93769 static void
93770@@ -119,7 +123,11 @@ next_one:
93771 static void
93772 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93773 {
93774+#ifdef CONFIG_GRKERNSEC_HIDESYM
93775+ SEQ_printf(m, " .base: %p\n", NULL);
93776+#else
93777 SEQ_printf(m, " .base: %pK\n", base);
93778+#endif
93779 SEQ_printf(m, " .index: %d\n",
93780 base->index);
93781 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93782@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93783 {
93784 struct proc_dir_entry *pe;
93785
93786+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93787+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93788+#else
93789 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93790+#endif
93791 if (!pe)
93792 return -ENOMEM;
93793 return 0;
93794diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93795index 1fb08f2..ca4bb1e 100644
93796--- a/kernel/time/timer_stats.c
93797+++ b/kernel/time/timer_stats.c
93798@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93799 static unsigned long nr_entries;
93800 static struct entry entries[MAX_ENTRIES];
93801
93802-static atomic_t overflow_count;
93803+static atomic_unchecked_t overflow_count;
93804
93805 /*
93806 * The entries are in a hash-table, for fast lookup:
93807@@ -140,7 +140,7 @@ static void reset_entries(void)
93808 nr_entries = 0;
93809 memset(entries, 0, sizeof(entries));
93810 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93811- atomic_set(&overflow_count, 0);
93812+ atomic_set_unchecked(&overflow_count, 0);
93813 }
93814
93815 static struct entry *alloc_entry(void)
93816@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93817 if (likely(entry))
93818 entry->count++;
93819 else
93820- atomic_inc(&overflow_count);
93821+ atomic_inc_unchecked(&overflow_count);
93822
93823 out_unlock:
93824 raw_spin_unlock_irqrestore(lock, flags);
93825@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93826
93827 static void print_name_offset(struct seq_file *m, unsigned long addr)
93828 {
93829+#ifdef CONFIG_GRKERNSEC_HIDESYM
93830+ seq_printf(m, "<%p>", NULL);
93831+#else
93832 char symname[KSYM_NAME_LEN];
93833
93834 if (lookup_symbol_name(addr, symname) < 0)
93835- seq_printf(m, "<%p>", (void *)addr);
93836+ seq_printf(m, "<%pK>", (void *)addr);
93837 else
93838 seq_printf(m, "%s", symname);
93839+#endif
93840 }
93841
93842 static int tstats_show(struct seq_file *m, void *v)
93843@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93844
93845 seq_puts(m, "Timer Stats Version: v0.3\n");
93846 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93847- if (atomic_read(&overflow_count))
93848- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93849+ if (atomic_read_unchecked(&overflow_count))
93850+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93851 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93852
93853 for (i = 0; i < nr_entries; i++) {
93854@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93855 {
93856 struct proc_dir_entry *pe;
93857
93858+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93859+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93860+#else
93861 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93862+#endif
93863 if (!pe)
93864 return -ENOMEM;
93865 return 0;
93866diff --git a/kernel/torture.c b/kernel/torture.c
93867index dd70993..0bf694b 100644
93868--- a/kernel/torture.c
93869+++ b/kernel/torture.c
93870@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93871 mutex_lock(&fullstop_mutex);
93872 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93873 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93874- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93875+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93876 } else {
93877 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93878 }
93879@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
93880 if (!torture_must_stop()) {
93881 if (stutter > 1) {
93882 schedule_timeout_interruptible(stutter - 1);
93883- ACCESS_ONCE(stutter_pause_test) = 2;
93884+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93885 }
93886 schedule_timeout_interruptible(1);
93887- ACCESS_ONCE(stutter_pause_test) = 1;
93888+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93889 }
93890 if (!torture_must_stop())
93891 schedule_timeout_interruptible(stutter);
93892- ACCESS_ONCE(stutter_pause_test) = 0;
93893+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93894 torture_shutdown_absorb("torture_stutter");
93895 } while (!torture_must_stop());
93896 torture_kthread_stopping("torture_stutter");
93897@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
93898 schedule_timeout_uninterruptible(10);
93899 return true;
93900 }
93901- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93902+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93903 mutex_unlock(&fullstop_mutex);
93904 torture_shutdown_cleanup();
93905 torture_shuffle_cleanup();
93906diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93907index 483cecf..ac46091 100644
93908--- a/kernel/trace/blktrace.c
93909+++ b/kernel/trace/blktrace.c
93910@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93911 struct blk_trace *bt = filp->private_data;
93912 char buf[16];
93913
93914- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93915+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93916
93917 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93918 }
93919@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93920 return 1;
93921
93922 bt = buf->chan->private_data;
93923- atomic_inc(&bt->dropped);
93924+ atomic_inc_unchecked(&bt->dropped);
93925 return 0;
93926 }
93927
93928@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93929
93930 bt->dir = dir;
93931 bt->dev = dev;
93932- atomic_set(&bt->dropped, 0);
93933+ atomic_set_unchecked(&bt->dropped, 0);
93934 INIT_LIST_HEAD(&bt->running_list);
93935
93936 ret = -EIO;
93937diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93938index 224e768..8303c84 100644
93939--- a/kernel/trace/ftrace.c
93940+++ b/kernel/trace/ftrace.c
93941@@ -2372,12 +2372,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
93942 if (unlikely(ftrace_disabled))
93943 return 0;
93944
93945+ ret = ftrace_arch_code_modify_prepare();
93946+ FTRACE_WARN_ON(ret);
93947+ if (ret)
93948+ return 0;
93949+
93950 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
93951+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
93952 if (ret) {
93953 ftrace_bug(ret, rec);
93954- return 0;
93955 }
93956- return 1;
93957+ return ret ? 0 : 1;
93958 }
93959
93960 /*
93961@@ -4754,8 +4759,10 @@ static int ftrace_process_locs(struct module *mod,
93962 if (!count)
93963 return 0;
93964
93965+ pax_open_kernel();
93966 sort(start, count, sizeof(*start),
93967 ftrace_cmp_ips, ftrace_swap_ips);
93968+ pax_close_kernel();
93969
93970 start_pg = ftrace_allocate_pages(count);
93971 if (!start_pg)
93972@@ -5633,7 +5640,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
93973
93974 if (t->ret_stack == NULL) {
93975 atomic_set(&t->tracing_graph_pause, 0);
93976- atomic_set(&t->trace_overrun, 0);
93977+ atomic_set_unchecked(&t->trace_overrun, 0);
93978 t->curr_ret_stack = -1;
93979 /* Make sure the tasks see the -1 first: */
93980 smp_wmb();
93981@@ -5856,7 +5863,7 @@ static void
93982 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
93983 {
93984 atomic_set(&t->tracing_graph_pause, 0);
93985- atomic_set(&t->trace_overrun, 0);
93986+ atomic_set_unchecked(&t->trace_overrun, 0);
93987 t->ftrace_timestamp = 0;
93988 /* make curr_ret_stack visible before we add the ret_stack */
93989 smp_wmb();
93990diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
93991index d2e151c..b68c835 100644
93992--- a/kernel/trace/ring_buffer.c
93993+++ b/kernel/trace/ring_buffer.c
93994@@ -350,9 +350,9 @@ struct buffer_data_page {
93995 */
93996 struct buffer_page {
93997 struct list_head list; /* list of buffer pages */
93998- local_t write; /* index for next write */
93999+ local_unchecked_t write; /* index for next write */
94000 unsigned read; /* index for next read */
94001- local_t entries; /* entries on this page */
94002+ local_unchecked_t entries; /* entries on this page */
94003 unsigned long real_end; /* real end of data */
94004 struct buffer_data_page *page; /* Actual data page */
94005 };
94006@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
94007 unsigned long last_overrun;
94008 local_t entries_bytes;
94009 local_t entries;
94010- local_t overrun;
94011- local_t commit_overrun;
94012+ local_unchecked_t overrun;
94013+ local_unchecked_t commit_overrun;
94014 local_t dropped_events;
94015 local_t committing;
94016 local_t commits;
94017@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94018 *
94019 * We add a counter to the write field to denote this.
94020 */
94021- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94022- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94023+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94024+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94025
94026 /*
94027 * Just make sure we have seen our old_write and synchronize
94028@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94029 * cmpxchg to only update if an interrupt did not already
94030 * do it for us. If the cmpxchg fails, we don't care.
94031 */
94032- (void)local_cmpxchg(&next_page->write, old_write, val);
94033- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94034+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94035+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94036
94037 /*
94038 * No need to worry about races with clearing out the commit.
94039@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94040
94041 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94042 {
94043- return local_read(&bpage->entries) & RB_WRITE_MASK;
94044+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94045 }
94046
94047 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94048 {
94049- return local_read(&bpage->write) & RB_WRITE_MASK;
94050+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94051 }
94052
94053 static int
94054@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94055 * bytes consumed in ring buffer from here.
94056 * Increment overrun to account for the lost events.
94057 */
94058- local_add(page_entries, &cpu_buffer->overrun);
94059+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94060 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94061 }
94062
94063@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94064 * it is our responsibility to update
94065 * the counters.
94066 */
94067- local_add(entries, &cpu_buffer->overrun);
94068+ local_add_unchecked(entries, &cpu_buffer->overrun);
94069 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94070
94071 /*
94072@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94073 if (tail == BUF_PAGE_SIZE)
94074 tail_page->real_end = 0;
94075
94076- local_sub(length, &tail_page->write);
94077+ local_sub_unchecked(length, &tail_page->write);
94078 return;
94079 }
94080
94081@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94082 rb_event_set_padding(event);
94083
94084 /* Set the write back to the previous setting */
94085- local_sub(length, &tail_page->write);
94086+ local_sub_unchecked(length, &tail_page->write);
94087 return;
94088 }
94089
94090@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94091
94092 /* Set write to end of buffer */
94093 length = (tail + length) - BUF_PAGE_SIZE;
94094- local_sub(length, &tail_page->write);
94095+ local_sub_unchecked(length, &tail_page->write);
94096 }
94097
94098 /*
94099@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94100 * about it.
94101 */
94102 if (unlikely(next_page == commit_page)) {
94103- local_inc(&cpu_buffer->commit_overrun);
94104+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94105 goto out_reset;
94106 }
94107
94108@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94109 cpu_buffer->tail_page) &&
94110 (cpu_buffer->commit_page ==
94111 cpu_buffer->reader_page))) {
94112- local_inc(&cpu_buffer->commit_overrun);
94113+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94114 goto out_reset;
94115 }
94116 }
94117@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94118 length += RB_LEN_TIME_EXTEND;
94119
94120 tail_page = cpu_buffer->tail_page;
94121- write = local_add_return(length, &tail_page->write);
94122+ write = local_add_return_unchecked(length, &tail_page->write);
94123
94124 /* set write to only the index of the write */
94125 write &= RB_WRITE_MASK;
94126@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94127 kmemcheck_annotate_bitfield(event, bitfield);
94128 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94129
94130- local_inc(&tail_page->entries);
94131+ local_inc_unchecked(&tail_page->entries);
94132
94133 /*
94134 * If this is the first commit on the page, then update
94135@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94136
94137 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94138 unsigned long write_mask =
94139- local_read(&bpage->write) & ~RB_WRITE_MASK;
94140+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94141 unsigned long event_length = rb_event_length(event);
94142 /*
94143 * This is on the tail page. It is possible that
94144@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94145 */
94146 old_index += write_mask;
94147 new_index += write_mask;
94148- index = local_cmpxchg(&bpage->write, old_index, new_index);
94149+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94150 if (index == old_index) {
94151 /* update counters */
94152 local_sub(event_length, &cpu_buffer->entries_bytes);
94153@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94154
94155 /* Do the likely case first */
94156 if (likely(bpage->page == (void *)addr)) {
94157- local_dec(&bpage->entries);
94158+ local_dec_unchecked(&bpage->entries);
94159 return;
94160 }
94161
94162@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94163 start = bpage;
94164 do {
94165 if (bpage->page == (void *)addr) {
94166- local_dec(&bpage->entries);
94167+ local_dec_unchecked(&bpage->entries);
94168 return;
94169 }
94170 rb_inc_page(cpu_buffer, &bpage);
94171@@ -3200,7 +3200,7 @@ static inline unsigned long
94172 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94173 {
94174 return local_read(&cpu_buffer->entries) -
94175- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94176+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94177 }
94178
94179 /**
94180@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94181 return 0;
94182
94183 cpu_buffer = buffer->buffers[cpu];
94184- ret = local_read(&cpu_buffer->overrun);
94185+ ret = local_read_unchecked(&cpu_buffer->overrun);
94186
94187 return ret;
94188 }
94189@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94190 return 0;
94191
94192 cpu_buffer = buffer->buffers[cpu];
94193- ret = local_read(&cpu_buffer->commit_overrun);
94194+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94195
94196 return ret;
94197 }
94198@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94199 /* if you care about this being correct, lock the buffer */
94200 for_each_buffer_cpu(buffer, cpu) {
94201 cpu_buffer = buffer->buffers[cpu];
94202- overruns += local_read(&cpu_buffer->overrun);
94203+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94204 }
94205
94206 return overruns;
94207@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94208 /*
94209 * Reset the reader page to size zero.
94210 */
94211- local_set(&cpu_buffer->reader_page->write, 0);
94212- local_set(&cpu_buffer->reader_page->entries, 0);
94213+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94214+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94215 local_set(&cpu_buffer->reader_page->page->commit, 0);
94216 cpu_buffer->reader_page->real_end = 0;
94217
94218@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94219 * want to compare with the last_overrun.
94220 */
94221 smp_mb();
94222- overwrite = local_read(&(cpu_buffer->overrun));
94223+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94224
94225 /*
94226 * Here's the tricky part.
94227@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94228
94229 cpu_buffer->head_page
94230 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94231- local_set(&cpu_buffer->head_page->write, 0);
94232- local_set(&cpu_buffer->head_page->entries, 0);
94233+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94234+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94235 local_set(&cpu_buffer->head_page->page->commit, 0);
94236
94237 cpu_buffer->head_page->read = 0;
94238@@ -4186,14 +4186,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94239
94240 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94241 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94242- local_set(&cpu_buffer->reader_page->write, 0);
94243- local_set(&cpu_buffer->reader_page->entries, 0);
94244+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94245+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94246 local_set(&cpu_buffer->reader_page->page->commit, 0);
94247 cpu_buffer->reader_page->read = 0;
94248
94249 local_set(&cpu_buffer->entries_bytes, 0);
94250- local_set(&cpu_buffer->overrun, 0);
94251- local_set(&cpu_buffer->commit_overrun, 0);
94252+ local_set_unchecked(&cpu_buffer->overrun, 0);
94253+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94254 local_set(&cpu_buffer->dropped_events, 0);
94255 local_set(&cpu_buffer->entries, 0);
94256 local_set(&cpu_buffer->committing, 0);
94257@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94258 rb_init_page(bpage);
94259 bpage = reader->page;
94260 reader->page = *data_page;
94261- local_set(&reader->write, 0);
94262- local_set(&reader->entries, 0);
94263+ local_set_unchecked(&reader->write, 0);
94264+ local_set_unchecked(&reader->entries, 0);
94265 reader->read = 0;
94266 *data_page = bpage;
94267
94268diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94269index 361a827..6a319a3 100644
94270--- a/kernel/trace/trace.c
94271+++ b/kernel/trace/trace.c
94272@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94273 return 0;
94274 }
94275
94276-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94277+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94278 {
94279 /* do nothing if flag is already set */
94280 if (!!(trace_flags & mask) == !!enabled)
94281diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94282index 8de48ba..3e5b4fa 100644
94283--- a/kernel/trace/trace.h
94284+++ b/kernel/trace/trace.h
94285@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94286 void trace_printk_init_buffers(void);
94287 void trace_printk_start_comm(void);
94288 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94289-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94290+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94291
94292 /*
94293 * Normal trace_printk() and friends allocates special buffers
94294diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94295index 57b67b1..66082a9 100644
94296--- a/kernel/trace/trace_clock.c
94297+++ b/kernel/trace/trace_clock.c
94298@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94299 return now;
94300 }
94301
94302-static atomic64_t trace_counter;
94303+static atomic64_unchecked_t trace_counter;
94304
94305 /*
94306 * trace_clock_counter(): simply an atomic counter.
94307@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94308 */
94309 u64 notrace trace_clock_counter(void)
94310 {
94311- return atomic64_add_return(1, &trace_counter);
94312+ return atomic64_inc_return_unchecked(&trace_counter);
94313 }
94314diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94315index b03a0ea..2df3168 100644
94316--- a/kernel/trace/trace_events.c
94317+++ b/kernel/trace/trace_events.c
94318@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94319 return 0;
94320 }
94321
94322-struct ftrace_module_file_ops;
94323 static void __add_event_to_tracers(struct ftrace_event_call *call);
94324
94325 /* Add an additional event_call dynamically */
94326diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94327index ba47600..d0e47fa 100644
94328--- a/kernel/trace/trace_functions_graph.c
94329+++ b/kernel/trace/trace_functions_graph.c
94330@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94331
94332 /* The return trace stack is full */
94333 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94334- atomic_inc(&current->trace_overrun);
94335+ atomic_inc_unchecked(&current->trace_overrun);
94336 return -EBUSY;
94337 }
94338
94339@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
94340 *ret = current->ret_stack[index].ret;
94341 trace->func = current->ret_stack[index].func;
94342 trace->calltime = current->ret_stack[index].calltime;
94343- trace->overrun = atomic_read(&current->trace_overrun);
94344+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
94345 trace->depth = index;
94346 }
94347
94348diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94349index 7a9ba62..2e0e4a1 100644
94350--- a/kernel/trace/trace_mmiotrace.c
94351+++ b/kernel/trace/trace_mmiotrace.c
94352@@ -24,7 +24,7 @@ struct header_iter {
94353 static struct trace_array *mmio_trace_array;
94354 static bool overrun_detected;
94355 static unsigned long prev_overruns;
94356-static atomic_t dropped_count;
94357+static atomic_unchecked_t dropped_count;
94358
94359 static void mmio_reset_data(struct trace_array *tr)
94360 {
94361@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
94362
94363 static unsigned long count_overruns(struct trace_iterator *iter)
94364 {
94365- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94366+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94367 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94368
94369 if (over > prev_overruns)
94370@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94371 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94372 sizeof(*entry), 0, pc);
94373 if (!event) {
94374- atomic_inc(&dropped_count);
94375+ atomic_inc_unchecked(&dropped_count);
94376 return;
94377 }
94378 entry = ring_buffer_event_data(event);
94379@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94380 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94381 sizeof(*entry), 0, pc);
94382 if (!event) {
94383- atomic_inc(&dropped_count);
94384+ atomic_inc_unchecked(&dropped_count);
94385 return;
94386 }
94387 entry = ring_buffer_event_data(event);
94388diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94389index b77b9a6..82f19bd 100644
94390--- a/kernel/trace/trace_output.c
94391+++ b/kernel/trace/trace_output.c
94392@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
94393 goto out;
94394 }
94395
94396+ pax_open_kernel();
94397 if (event->funcs->trace == NULL)
94398- event->funcs->trace = trace_nop_print;
94399+ *(void **)&event->funcs->trace = trace_nop_print;
94400 if (event->funcs->raw == NULL)
94401- event->funcs->raw = trace_nop_print;
94402+ *(void **)&event->funcs->raw = trace_nop_print;
94403 if (event->funcs->hex == NULL)
94404- event->funcs->hex = trace_nop_print;
94405+ *(void **)&event->funcs->hex = trace_nop_print;
94406 if (event->funcs->binary == NULL)
94407- event->funcs->binary = trace_nop_print;
94408+ *(void **)&event->funcs->binary = trace_nop_print;
94409+ pax_close_kernel();
94410
94411 key = event->type & (EVENT_HASHSIZE - 1);
94412
94413diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94414index f8b45d8..70ff6c8 100644
94415--- a/kernel/trace/trace_seq.c
94416+++ b/kernel/trace/trace_seq.c
94417@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94418 return 0;
94419 }
94420
94421- seq_buf_path(&s->seq, path, "\n");
94422+ seq_buf_path(&s->seq, path, "\n\\");
94423
94424 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
94425 s->seq.len = save_len;
94426diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94427index 16eddb3..758b308 100644
94428--- a/kernel/trace/trace_stack.c
94429+++ b/kernel/trace/trace_stack.c
94430@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94431 return;
94432
94433 /* we do not handle interrupt stacks yet */
94434- if (!object_is_on_stack(stack))
94435+ if (!object_starts_on_stack(stack))
94436 return;
94437
94438 local_irq_save(flags);
94439diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94440index c6ee36f..78513f3 100644
94441--- a/kernel/trace/trace_syscalls.c
94442+++ b/kernel/trace/trace_syscalls.c
94443@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94444 int num;
94445
94446 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94447+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94448+ return -EINVAL;
94449
94450 mutex_lock(&syscall_trace_lock);
94451 if (!sys_perf_refcount_enter)
94452@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94453 int num;
94454
94455 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94456+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94457+ return;
94458
94459 mutex_lock(&syscall_trace_lock);
94460 sys_perf_refcount_enter--;
94461@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94462 int num;
94463
94464 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94465+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94466+ return -EINVAL;
94467
94468 mutex_lock(&syscall_trace_lock);
94469 if (!sys_perf_refcount_exit)
94470@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94471 int num;
94472
94473 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94474+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94475+ return;
94476
94477 mutex_lock(&syscall_trace_lock);
94478 sys_perf_refcount_exit--;
94479diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94480index 4109f83..fe1f830 100644
94481--- a/kernel/user_namespace.c
94482+++ b/kernel/user_namespace.c
94483@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
94484 !kgid_has_mapping(parent_ns, group))
94485 return -EPERM;
94486
94487+#ifdef CONFIG_GRKERNSEC
94488+ /*
94489+ * This doesn't really inspire confidence:
94490+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94491+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94492+ * Increases kernel attack surface in areas developers
94493+ * previously cared little about ("low importance due
94494+ * to requiring "root" capability")
94495+ * To be removed when this code receives *proper* review
94496+ */
94497+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94498+ !capable(CAP_SETGID))
94499+ return -EPERM;
94500+#endif
94501+
94502 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94503 if (!ns)
94504 return -ENOMEM;
94505@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
94506 if (atomic_read(&current->mm->mm_users) > 1)
94507 return -EINVAL;
94508
94509- if (current->fs->users != 1)
94510+ if (atomic_read(&current->fs->users) != 1)
94511 return -EINVAL;
94512
94513 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94514diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94515index c8eac43..4b5f08f 100644
94516--- a/kernel/utsname_sysctl.c
94517+++ b/kernel/utsname_sysctl.c
94518@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94519 static int proc_do_uts_string(struct ctl_table *table, int write,
94520 void __user *buffer, size_t *lenp, loff_t *ppos)
94521 {
94522- struct ctl_table uts_table;
94523+ ctl_table_no_const uts_table;
94524 int r;
94525 memcpy(&uts_table, table, sizeof(uts_table));
94526 uts_table.data = get_uts(table, write);
94527diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94528index 70bf118..4be3c37 100644
94529--- a/kernel/watchdog.c
94530+++ b/kernel/watchdog.c
94531@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94532 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94533 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94534
94535-static struct smp_hotplug_thread watchdog_threads = {
94536+static struct smp_hotplug_thread watchdog_threads __read_only = {
94537 .store = &softlockup_watchdog,
94538 .thread_should_run = watchdog_should_run,
94539 .thread_fn = watchdog,
94540diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94541index beeeac9..65cbfb3 100644
94542--- a/kernel/workqueue.c
94543+++ b/kernel/workqueue.c
94544@@ -4517,7 +4517,7 @@ static void rebind_workers(struct worker_pool *pool)
94545 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94546 worker_flags |= WORKER_REBOUND;
94547 worker_flags &= ~WORKER_UNBOUND;
94548- ACCESS_ONCE(worker->flags) = worker_flags;
94549+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94550 }
94551
94552 spin_unlock_irq(&pool->lock);
94553diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94554index 5f2ce61..85a0b1b 100644
94555--- a/lib/Kconfig.debug
94556+++ b/lib/Kconfig.debug
94557@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
94558
94559 config DEBUG_WW_MUTEX_SLOWPATH
94560 bool "Wait/wound mutex debugging: Slowpath testing"
94561- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94562+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94563 select DEBUG_LOCK_ALLOC
94564 select DEBUG_SPINLOCK
94565 select DEBUG_MUTEXES
94566@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94567
94568 config DEBUG_LOCK_ALLOC
94569 bool "Lock debugging: detect incorrect freeing of live locks"
94570- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94571+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94572 select DEBUG_SPINLOCK
94573 select DEBUG_MUTEXES
94574 select LOCKDEP
94575@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
94576
94577 config PROVE_LOCKING
94578 bool "Lock debugging: prove locking correctness"
94579- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94580+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94581 select LOCKDEP
94582 select DEBUG_SPINLOCK
94583 select DEBUG_MUTEXES
94584@@ -992,7 +992,7 @@ config LOCKDEP
94585
94586 config LOCK_STAT
94587 bool "Lock usage statistics"
94588- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94589+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94590 select LOCKDEP
94591 select DEBUG_SPINLOCK
94592 select DEBUG_MUTEXES
94593@@ -1453,6 +1453,7 @@ config LATENCYTOP
94594 depends on DEBUG_KERNEL
94595 depends on STACKTRACE_SUPPORT
94596 depends on PROC_FS
94597+ depends on !GRKERNSEC_HIDESYM
94598 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94599 select KALLSYMS
94600 select KALLSYMS_ALL
94601@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94602 config DEBUG_STRICT_USER_COPY_CHECKS
94603 bool "Strict user copy size checks"
94604 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94605- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94606+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94607 help
94608 Enabling this option turns a certain set of sanity checks for user
94609 copy operations into compile time failures.
94610@@ -1597,7 +1598,7 @@ endmenu # runtime tests
94611
94612 config PROVIDE_OHCI1394_DMA_INIT
94613 bool "Remote debugging over FireWire early on boot"
94614- depends on PCI && X86
94615+ depends on PCI && X86 && !GRKERNSEC
94616 help
94617 If you want to debug problems which hang or crash the kernel early
94618 on boot and the crashing machine has a FireWire port, you can use
94619diff --git a/lib/Makefile b/lib/Makefile
94620index 3c3b30b..ca29102 100644
94621--- a/lib/Makefile
94622+++ b/lib/Makefile
94623@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94624 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94625 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94626 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94627-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94628+obj-y += list_debug.o
94629 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94630
94631 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94632diff --git a/lib/average.c b/lib/average.c
94633index 114d1be..ab0350c 100644
94634--- a/lib/average.c
94635+++ b/lib/average.c
94636@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94637 {
94638 unsigned long internal = ACCESS_ONCE(avg->internal);
94639
94640- ACCESS_ONCE(avg->internal) = internal ?
94641+ ACCESS_ONCE_RW(avg->internal) = internal ?
94642 (((internal << avg->weight) - internal) +
94643 (val << avg->factor)) >> avg->weight :
94644 (val << avg->factor);
94645diff --git a/lib/bitmap.c b/lib/bitmap.c
94646index 324ea9e..46b1ae2 100644
94647--- a/lib/bitmap.c
94648+++ b/lib/bitmap.c
94649@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
94650 }
94651 EXPORT_SYMBOL(__bitmap_subset);
94652
94653-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94654+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94655 {
94656 unsigned int k, lim = bits/BITS_PER_LONG;
94657 int w = 0;
94658@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94659 {
94660 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94661 u32 chunk;
94662- const char __user __force *ubuf = (const char __user __force *)buf;
94663+ const char __user *ubuf = (const char __force_user *)buf;
94664
94665 bitmap_zero(maskp, nmaskbits);
94666
94667@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
94668 {
94669 if (!access_ok(VERIFY_READ, ubuf, ulen))
94670 return -EFAULT;
94671- return __bitmap_parse((const char __force *)ubuf,
94672+ return __bitmap_parse((const char __force_kernel *)ubuf,
94673 ulen, 1, maskp, nmaskbits);
94674
94675 }
94676@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94677 {
94678 unsigned a, b;
94679 int c, old_c, totaldigits;
94680- const char __user __force *ubuf = (const char __user __force *)buf;
94681+ const char __user *ubuf = (const char __force_user *)buf;
94682 int exp_digit, in_range;
94683
94684 totaldigits = c = 0;
94685@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94686 {
94687 if (!access_ok(VERIFY_READ, ubuf, ulen))
94688 return -EFAULT;
94689- return __bitmap_parselist((const char __force *)ubuf,
94690+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94691 ulen, 1, maskp, nmaskbits);
94692 }
94693 EXPORT_SYMBOL(bitmap_parselist_user);
94694diff --git a/lib/bug.c b/lib/bug.c
94695index 0c3bd95..5a615a1 100644
94696--- a/lib/bug.c
94697+++ b/lib/bug.c
94698@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94699 return BUG_TRAP_TYPE_NONE;
94700
94701 bug = find_bug(bugaddr);
94702+ if (!bug)
94703+ return BUG_TRAP_TYPE_NONE;
94704
94705 file = NULL;
94706 line = 0;
94707diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94708index 547f7f9..a6d4ba0 100644
94709--- a/lib/debugobjects.c
94710+++ b/lib/debugobjects.c
94711@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94712 if (limit > 4)
94713 return;
94714
94715- is_on_stack = object_is_on_stack(addr);
94716+ is_on_stack = object_starts_on_stack(addr);
94717 if (is_on_stack == onstack)
94718 return;
94719
94720diff --git a/lib/div64.c b/lib/div64.c
94721index 4382ad7..08aa558 100644
94722--- a/lib/div64.c
94723+++ b/lib/div64.c
94724@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94725 EXPORT_SYMBOL(__div64_32);
94726
94727 #ifndef div_s64_rem
94728-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94729+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94730 {
94731 u64 quotient;
94732
94733@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94734 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94735 */
94736 #ifndef div64_u64
94737-u64 div64_u64(u64 dividend, u64 divisor)
94738+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94739 {
94740 u32 high = divisor >> 32;
94741 u64 quot;
94742diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94743index 9722bd2..0d826f4 100644
94744--- a/lib/dma-debug.c
94745+++ b/lib/dma-debug.c
94746@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94747
94748 void dma_debug_add_bus(struct bus_type *bus)
94749 {
94750- struct notifier_block *nb;
94751+ notifier_block_no_const *nb;
94752
94753 if (dma_debug_disabled())
94754 return;
94755@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94756
94757 static void check_for_stack(struct device *dev, void *addr)
94758 {
94759- if (object_is_on_stack(addr))
94760+ if (object_starts_on_stack(addr))
94761 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
94762 "stack [addr=%p]\n", addr);
94763 }
94764diff --git a/lib/inflate.c b/lib/inflate.c
94765index 013a761..c28f3fc 100644
94766--- a/lib/inflate.c
94767+++ b/lib/inflate.c
94768@@ -269,7 +269,7 @@ static void free(void *where)
94769 malloc_ptr = free_mem_ptr;
94770 }
94771 #else
94772-#define malloc(a) kmalloc(a, GFP_KERNEL)
94773+#define malloc(a) kmalloc((a), GFP_KERNEL)
94774 #define free(a) kfree(a)
94775 #endif
94776
94777diff --git a/lib/ioremap.c b/lib/ioremap.c
94778index 0c9216c..863bd89 100644
94779--- a/lib/ioremap.c
94780+++ b/lib/ioremap.c
94781@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94782 unsigned long next;
94783
94784 phys_addr -= addr;
94785- pmd = pmd_alloc(&init_mm, pud, addr);
94786+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94787 if (!pmd)
94788 return -ENOMEM;
94789 do {
94790@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94791 unsigned long next;
94792
94793 phys_addr -= addr;
94794- pud = pud_alloc(&init_mm, pgd, addr);
94795+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94796 if (!pud)
94797 return -ENOMEM;
94798 do {
94799diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94800index bd2bea9..6b3c95e 100644
94801--- a/lib/is_single_threaded.c
94802+++ b/lib/is_single_threaded.c
94803@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94804 struct task_struct *p, *t;
94805 bool ret;
94806
94807+ if (!mm)
94808+ return true;
94809+
94810 if (atomic_read(&task->signal->live) != 1)
94811 return false;
94812
94813diff --git a/lib/kobject.c b/lib/kobject.c
94814index 03d4ab3..46f6374 100644
94815--- a/lib/kobject.c
94816+++ b/lib/kobject.c
94817@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94818
94819
94820 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94821-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94822+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94823
94824-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94825+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94826 {
94827 enum kobj_ns_type type = ops->type;
94828 int error;
94829diff --git a/lib/list_debug.c b/lib/list_debug.c
94830index c24c2f7..f0296f4 100644
94831--- a/lib/list_debug.c
94832+++ b/lib/list_debug.c
94833@@ -11,7 +11,9 @@
94834 #include <linux/bug.h>
94835 #include <linux/kernel.h>
94836 #include <linux/rculist.h>
94837+#include <linux/mm.h>
94838
94839+#ifdef CONFIG_DEBUG_LIST
94840 /*
94841 * Insert a new entry between two known consecutive entries.
94842 *
94843@@ -19,21 +21,40 @@
94844 * the prev/next entries already!
94845 */
94846
94847+static bool __list_add_debug(struct list_head *new,
94848+ struct list_head *prev,
94849+ struct list_head *next)
94850+{
94851+ if (unlikely(next->prev != prev)) {
94852+ printk(KERN_ERR "list_add corruption. next->prev should be "
94853+ "prev (%p), but was %p. (next=%p).\n",
94854+ prev, next->prev, next);
94855+ BUG();
94856+ return false;
94857+ }
94858+ if (unlikely(prev->next != next)) {
94859+ printk(KERN_ERR "list_add corruption. prev->next should be "
94860+ "next (%p), but was %p. (prev=%p).\n",
94861+ next, prev->next, prev);
94862+ BUG();
94863+ return false;
94864+ }
94865+ if (unlikely(new == prev || new == next)) {
94866+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94867+ new, prev, next);
94868+ BUG();
94869+ return false;
94870+ }
94871+ return true;
94872+}
94873+
94874 void __list_add(struct list_head *new,
94875- struct list_head *prev,
94876- struct list_head *next)
94877+ struct list_head *prev,
94878+ struct list_head *next)
94879 {
94880- WARN(next->prev != prev,
94881- "list_add corruption. next->prev should be "
94882- "prev (%p), but was %p. (next=%p).\n",
94883- prev, next->prev, next);
94884- WARN(prev->next != next,
94885- "list_add corruption. prev->next should be "
94886- "next (%p), but was %p. (prev=%p).\n",
94887- next, prev->next, prev);
94888- WARN(new == prev || new == next,
94889- "list_add double add: new=%p, prev=%p, next=%p.\n",
94890- new, prev, next);
94891+ if (!__list_add_debug(new, prev, next))
94892+ return;
94893+
94894 next->prev = new;
94895 new->next = next;
94896 new->prev = prev;
94897@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
94898 }
94899 EXPORT_SYMBOL(__list_add);
94900
94901-void __list_del_entry(struct list_head *entry)
94902+static bool __list_del_entry_debug(struct list_head *entry)
94903 {
94904 struct list_head *prev, *next;
94905
94906 prev = entry->prev;
94907 next = entry->next;
94908
94909- if (WARN(next == LIST_POISON1,
94910- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94911- entry, LIST_POISON1) ||
94912- WARN(prev == LIST_POISON2,
94913- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94914- entry, LIST_POISON2) ||
94915- WARN(prev->next != entry,
94916- "list_del corruption. prev->next should be %p, "
94917- "but was %p\n", entry, prev->next) ||
94918- WARN(next->prev != entry,
94919- "list_del corruption. next->prev should be %p, "
94920- "but was %p\n", entry, next->prev))
94921+ if (unlikely(next == LIST_POISON1)) {
94922+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94923+ entry, LIST_POISON1);
94924+ BUG();
94925+ return false;
94926+ }
94927+ if (unlikely(prev == LIST_POISON2)) {
94928+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94929+ entry, LIST_POISON2);
94930+ BUG();
94931+ return false;
94932+ }
94933+ if (unlikely(entry->prev->next != entry)) {
94934+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
94935+ "but was %p\n", entry, prev->next);
94936+ BUG();
94937+ return false;
94938+ }
94939+ if (unlikely(entry->next->prev != entry)) {
94940+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
94941+ "but was %p\n", entry, next->prev);
94942+ BUG();
94943+ return false;
94944+ }
94945+ return true;
94946+}
94947+
94948+void __list_del_entry(struct list_head *entry)
94949+{
94950+ if (!__list_del_entry_debug(entry))
94951 return;
94952
94953- __list_del(prev, next);
94954+ __list_del(entry->prev, entry->next);
94955 }
94956 EXPORT_SYMBOL(__list_del_entry);
94957
94958@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
94959 void __list_add_rcu(struct list_head *new,
94960 struct list_head *prev, struct list_head *next)
94961 {
94962- WARN(next->prev != prev,
94963- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
94964- prev, next->prev, next);
94965- WARN(prev->next != next,
94966- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
94967- next, prev->next, prev);
94968+ if (!__list_add_debug(new, prev, next))
94969+ return;
94970+
94971 new->next = next;
94972 new->prev = prev;
94973 rcu_assign_pointer(list_next_rcu(prev), new);
94974 next->prev = new;
94975 }
94976 EXPORT_SYMBOL(__list_add_rcu);
94977+#endif
94978+
94979+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
94980+{
94981+#ifdef CONFIG_DEBUG_LIST
94982+ if (!__list_add_debug(new, prev, next))
94983+ return;
94984+#endif
94985+
94986+ pax_open_kernel();
94987+ next->prev = new;
94988+ new->next = next;
94989+ new->prev = prev;
94990+ prev->next = new;
94991+ pax_close_kernel();
94992+}
94993+EXPORT_SYMBOL(__pax_list_add);
94994+
94995+void pax_list_del(struct list_head *entry)
94996+{
94997+#ifdef CONFIG_DEBUG_LIST
94998+ if (!__list_del_entry_debug(entry))
94999+ return;
95000+#endif
95001+
95002+ pax_open_kernel();
95003+ __list_del(entry->prev, entry->next);
95004+ entry->next = LIST_POISON1;
95005+ entry->prev = LIST_POISON2;
95006+ pax_close_kernel();
95007+}
95008+EXPORT_SYMBOL(pax_list_del);
95009+
95010+void pax_list_del_init(struct list_head *entry)
95011+{
95012+ pax_open_kernel();
95013+ __list_del(entry->prev, entry->next);
95014+ INIT_LIST_HEAD(entry);
95015+ pax_close_kernel();
95016+}
95017+EXPORT_SYMBOL(pax_list_del_init);
95018+
95019+void __pax_list_add_rcu(struct list_head *new,
95020+ struct list_head *prev, struct list_head *next)
95021+{
95022+#ifdef CONFIG_DEBUG_LIST
95023+ if (!__list_add_debug(new, prev, next))
95024+ return;
95025+#endif
95026+
95027+ pax_open_kernel();
95028+ new->next = next;
95029+ new->prev = prev;
95030+ rcu_assign_pointer(list_next_rcu(prev), new);
95031+ next->prev = new;
95032+ pax_close_kernel();
95033+}
95034+EXPORT_SYMBOL(__pax_list_add_rcu);
95035+
95036+void pax_list_del_rcu(struct list_head *entry)
95037+{
95038+#ifdef CONFIG_DEBUG_LIST
95039+ if (!__list_del_entry_debug(entry))
95040+ return;
95041+#endif
95042+
95043+ pax_open_kernel();
95044+ __list_del(entry->prev, entry->next);
95045+ entry->next = LIST_POISON1;
95046+ entry->prev = LIST_POISON2;
95047+ pax_close_kernel();
95048+}
95049+EXPORT_SYMBOL(pax_list_del_rcu);
95050diff --git a/lib/lockref.c b/lib/lockref.c
95051index d2233de..fa1a2f6 100644
95052--- a/lib/lockref.c
95053+++ b/lib/lockref.c
95054@@ -48,13 +48,13 @@
95055 void lockref_get(struct lockref *lockref)
95056 {
95057 CMPXCHG_LOOP(
95058- new.count++;
95059+ __lockref_inc(&new);
95060 ,
95061 return;
95062 );
95063
95064 spin_lock(&lockref->lock);
95065- lockref->count++;
95066+ __lockref_inc(lockref);
95067 spin_unlock(&lockref->lock);
95068 }
95069 EXPORT_SYMBOL(lockref_get);
95070@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95071 int retval;
95072
95073 CMPXCHG_LOOP(
95074- new.count++;
95075+ __lockref_inc(&new);
95076 if (!old.count)
95077 return 0;
95078 ,
95079@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95080 spin_lock(&lockref->lock);
95081 retval = 0;
95082 if (lockref->count) {
95083- lockref->count++;
95084+ __lockref_inc(lockref);
95085 retval = 1;
95086 }
95087 spin_unlock(&lockref->lock);
95088@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95089 int lockref_get_or_lock(struct lockref *lockref)
95090 {
95091 CMPXCHG_LOOP(
95092- new.count++;
95093+ __lockref_inc(&new);
95094 if (!old.count)
95095 break;
95096 ,
95097@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95098 spin_lock(&lockref->lock);
95099 if (!lockref->count)
95100 return 0;
95101- lockref->count++;
95102+ __lockref_inc(lockref);
95103 spin_unlock(&lockref->lock);
95104 return 1;
95105 }
95106@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95107 int lockref_put_or_lock(struct lockref *lockref)
95108 {
95109 CMPXCHG_LOOP(
95110- new.count--;
95111+ __lockref_dec(&new);
95112 if (old.count <= 1)
95113 break;
95114 ,
95115@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95116 spin_lock(&lockref->lock);
95117 if (lockref->count <= 1)
95118 return 0;
95119- lockref->count--;
95120+ __lockref_dec(lockref);
95121 spin_unlock(&lockref->lock);
95122 return 1;
95123 }
95124@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95125 int retval;
95126
95127 CMPXCHG_LOOP(
95128- new.count++;
95129+ __lockref_inc(&new);
95130 if ((int)old.count < 0)
95131 return 0;
95132 ,
95133@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95134 spin_lock(&lockref->lock);
95135 retval = 0;
95136 if ((int) lockref->count >= 0) {
95137- lockref->count++;
95138+ __lockref_inc(lockref);
95139 retval = 1;
95140 }
95141 spin_unlock(&lockref->lock);
95142diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95143index 6111bcb..02e816b 100644
95144--- a/lib/percpu-refcount.c
95145+++ b/lib/percpu-refcount.c
95146@@ -31,7 +31,7 @@
95147 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95148 */
95149
95150-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95151+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95152
95153 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95154
95155diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95156index 3291a8e..346a91e 100644
95157--- a/lib/radix-tree.c
95158+++ b/lib/radix-tree.c
95159@@ -67,7 +67,7 @@ struct radix_tree_preload {
95160 int nr;
95161 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95162 };
95163-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95164+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95165
95166 static inline void *ptr_to_indirect(void *ptr)
95167 {
95168diff --git a/lib/random32.c b/lib/random32.c
95169index 0bee183..526f12f 100644
95170--- a/lib/random32.c
95171+++ b/lib/random32.c
95172@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95173 }
95174 #endif
95175
95176-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95177+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95178
95179 /**
95180 * prandom_u32_state - seeded pseudo-random number generator.
95181diff --git a/lib/rbtree.c b/lib/rbtree.c
95182index c16c81a..4dcbda1 100644
95183--- a/lib/rbtree.c
95184+++ b/lib/rbtree.c
95185@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95186 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95187
95188 static const struct rb_augment_callbacks dummy_callbacks = {
95189- dummy_propagate, dummy_copy, dummy_rotate
95190+ .propagate = dummy_propagate,
95191+ .copy = dummy_copy,
95192+ .rotate = dummy_rotate
95193 };
95194
95195 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95196diff --git a/lib/show_mem.c b/lib/show_mem.c
95197index 7de89f4..00d70b7 100644
95198--- a/lib/show_mem.c
95199+++ b/lib/show_mem.c
95200@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
95201 quicklist_total_size());
95202 #endif
95203 #ifdef CONFIG_MEMORY_FAILURE
95204- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95205+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95206 #endif
95207 }
95208diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95209index bb2b201..46abaf9 100644
95210--- a/lib/strncpy_from_user.c
95211+++ b/lib/strncpy_from_user.c
95212@@ -21,7 +21,7 @@
95213 */
95214 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95215 {
95216- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95217+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95218 long res = 0;
95219
95220 /*
95221diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95222index a28df52..3d55877 100644
95223--- a/lib/strnlen_user.c
95224+++ b/lib/strnlen_user.c
95225@@ -26,7 +26,7 @@
95226 */
95227 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95228 {
95229- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95230+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95231 long align, res = 0;
95232 unsigned long c;
95233
95234diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95235index 4abda07..b9d3765 100644
95236--- a/lib/swiotlb.c
95237+++ b/lib/swiotlb.c
95238@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95239
95240 void
95241 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95242- dma_addr_t dev_addr)
95243+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95244 {
95245 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95246
95247diff --git a/lib/usercopy.c b/lib/usercopy.c
95248index 4f5b1dd..7cab418 100644
95249--- a/lib/usercopy.c
95250+++ b/lib/usercopy.c
95251@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95252 WARN(1, "Buffer overflow detected!\n");
95253 }
95254 EXPORT_SYMBOL(copy_from_user_overflow);
95255+
95256+void copy_to_user_overflow(void)
95257+{
95258+ WARN(1, "Buffer overflow detected!\n");
95259+}
95260+EXPORT_SYMBOL(copy_to_user_overflow);
95261diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95262index ec337f6..8484eb2 100644
95263--- a/lib/vsprintf.c
95264+++ b/lib/vsprintf.c
95265@@ -16,6 +16,9 @@
95266 * - scnprintf and vscnprintf
95267 */
95268
95269+#ifdef CONFIG_GRKERNSEC_HIDESYM
95270+#define __INCLUDED_BY_HIDESYM 1
95271+#endif
95272 #include <stdarg.h>
95273 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95274 #include <linux/types.h>
95275@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95276 #ifdef CONFIG_KALLSYMS
95277 if (*fmt == 'B')
95278 sprint_backtrace(sym, value);
95279- else if (*fmt != 'f' && *fmt != 's')
95280+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95281 sprint_symbol(sym, value);
95282 else
95283 sprint_symbol_no_offset(sym, value);
95284@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
95285 return number(buf, end, num, spec);
95286 }
95287
95288+#ifdef CONFIG_GRKERNSEC_HIDESYM
95289+int kptr_restrict __read_mostly = 2;
95290+#else
95291 int kptr_restrict __read_mostly;
95292+#endif
95293
95294 /*
95295 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95296@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
95297 *
95298 * - 'F' For symbolic function descriptor pointers with offset
95299 * - 'f' For simple symbolic function names without offset
95300+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95301 * - 'S' For symbolic direct pointers with offset
95302 * - 's' For symbolic direct pointers without offset
95303+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95304 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95305 * - 'B' For backtraced symbolic direct pointers with offset
95306 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95307@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95308
95309 if (!ptr && *fmt != 'K') {
95310 /*
95311- * Print (null) with the same width as a pointer so it makes
95312+ * Print (nil) with the same width as a pointer so it makes
95313 * tabular output look nice.
95314 */
95315 if (spec.field_width == -1)
95316 spec.field_width = default_width;
95317- return string(buf, end, "(null)", spec);
95318+ return string(buf, end, "(nil)", spec);
95319 }
95320
95321 switch (*fmt) {
95322@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95323 /* Fallthrough */
95324 case 'S':
95325 case 's':
95326+#ifdef CONFIG_GRKERNSEC_HIDESYM
95327+ break;
95328+#else
95329+ return symbol_string(buf, end, ptr, spec, fmt);
95330+#endif
95331+ case 'X':
95332+ ptr = dereference_function_descriptor(ptr);
95333+ case 'A':
95334 case 'B':
95335 return symbol_string(buf, end, ptr, spec, fmt);
95336 case 'R':
95337@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95338 va_end(va);
95339 return buf;
95340 }
95341+ case 'P':
95342+ break;
95343 case 'K':
95344 /*
95345 * %pK cannot be used in IRQ context because its test
95346@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95347 ((const struct file *)ptr)->f_path.dentry,
95348 spec, fmt);
95349 }
95350+
95351+#ifdef CONFIG_GRKERNSEC_HIDESYM
95352+ /* 'P' = approved pointers to copy to userland,
95353+ as in the /proc/kallsyms case, as we make it display nothing
95354+ for non-root users, and the real contents for root users
95355+ 'X' = approved simple symbols
95356+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95357+ above
95358+ */
95359+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95360+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95361+ dump_stack();
95362+ ptr = NULL;
95363+ }
95364+#endif
95365+
95366 spec.flags |= SMALL;
95367 if (spec.field_width == -1) {
95368 spec.field_width = default_width;
95369@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95370 typeof(type) value; \
95371 if (sizeof(type) == 8) { \
95372 args = PTR_ALIGN(args, sizeof(u32)); \
95373- *(u32 *)&value = *(u32 *)args; \
95374- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95375+ *(u32 *)&value = *(const u32 *)args; \
95376+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95377 } else { \
95378 args = PTR_ALIGN(args, sizeof(type)); \
95379- value = *(typeof(type) *)args; \
95380+ value = *(const typeof(type) *)args; \
95381 } \
95382 args += sizeof(type); \
95383 value; \
95384@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95385 case FORMAT_TYPE_STR: {
95386 const char *str_arg = args;
95387 args += strlen(str_arg) + 1;
95388- str = string(str, end, (char *)str_arg, spec);
95389+ str = string(str, end, str_arg, spec);
95390 break;
95391 }
95392
95393diff --git a/localversion-grsec b/localversion-grsec
95394new file mode 100644
95395index 0000000..7cd6065
95396--- /dev/null
95397+++ b/localversion-grsec
95398@@ -0,0 +1 @@
95399+-grsec
95400diff --git a/mm/Kconfig b/mm/Kconfig
95401index 1d1ae6b..0f05885 100644
95402--- a/mm/Kconfig
95403+++ b/mm/Kconfig
95404@@ -341,10 +341,11 @@ config KSM
95405 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95406
95407 config DEFAULT_MMAP_MIN_ADDR
95408- int "Low address space to protect from user allocation"
95409+ int "Low address space to protect from user allocation"
95410 depends on MMU
95411- default 4096
95412- help
95413+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95414+ default 65536
95415+ help
95416 This is the portion of low virtual memory which should be protected
95417 from userspace allocation. Keeping a user from writing to low pages
95418 can help reduce the impact of kernel NULL pointer bugs.
95419@@ -375,7 +376,7 @@ config MEMORY_FAILURE
95420
95421 config HWPOISON_INJECT
95422 tristate "HWPoison pages injector"
95423- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95424+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95425 select PROC_PAGE_MONITOR
95426
95427 config NOMMU_INITIAL_TRIM_EXCESS
95428diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95429index 0ae0df5..82ac56b 100644
95430--- a/mm/backing-dev.c
95431+++ b/mm/backing-dev.c
95432@@ -12,7 +12,7 @@
95433 #include <linux/device.h>
95434 #include <trace/events/writeback.h>
95435
95436-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95437+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95438
95439 struct backing_dev_info default_backing_dev_info = {
95440 .name = "default",
95441@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95442 return err;
95443
95444 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95445- atomic_long_inc_return(&bdi_seq));
95446+ atomic_long_inc_return_unchecked(&bdi_seq));
95447 if (err) {
95448 bdi_destroy(bdi);
95449 return err;
95450diff --git a/mm/filemap.c b/mm/filemap.c
95451index 673e458..7192013 100644
95452--- a/mm/filemap.c
95453+++ b/mm/filemap.c
95454@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95455 struct address_space *mapping = file->f_mapping;
95456
95457 if (!mapping->a_ops->readpage)
95458- return -ENOEXEC;
95459+ return -ENODEV;
95460 file_accessed(file);
95461 vma->vm_ops = &generic_file_vm_ops;
95462 return 0;
95463@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95464 *pos = i_size_read(inode);
95465
95466 if (limit != RLIM_INFINITY) {
95467+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95468 if (*pos >= limit) {
95469 send_sig(SIGXFSZ, current, 0);
95470 return -EFBIG;
95471diff --git a/mm/fremap.c b/mm/fremap.c
95472index 2805d71..8b56e7d 100644
95473--- a/mm/fremap.c
95474+++ b/mm/fremap.c
95475@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95476 retry:
95477 vma = find_vma(mm, start);
95478
95479+#ifdef CONFIG_PAX_SEGMEXEC
95480+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95481+ goto out;
95482+#endif
95483+
95484 /*
95485 * Make sure the vma is shared, that it supports prefaulting,
95486 * and that the remapped range is valid and fully within
95487diff --git a/mm/gup.c b/mm/gup.c
95488index 9b2afbf..647297c 100644
95489--- a/mm/gup.c
95490+++ b/mm/gup.c
95491@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95492 unsigned int fault_flags = 0;
95493 int ret;
95494
95495- /* For mlock, just skip the stack guard page. */
95496- if ((*flags & FOLL_MLOCK) &&
95497- (stack_guard_page_start(vma, address) ||
95498- stack_guard_page_end(vma, address + PAGE_SIZE)))
95499- return -ENOENT;
95500 if (*flags & FOLL_WRITE)
95501 fault_flags |= FAULT_FLAG_WRITE;
95502 if (nonblocking)
95503@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95504 if (!(gup_flags & FOLL_FORCE))
95505 gup_flags |= FOLL_NUMA;
95506
95507- do {
95508+ while (nr_pages) {
95509 struct page *page;
95510 unsigned int foll_flags = gup_flags;
95511 unsigned int page_increm;
95512
95513 /* first iteration or cross vma bound */
95514 if (!vma || start >= vma->vm_end) {
95515- vma = find_extend_vma(mm, start);
95516+ vma = find_vma(mm, start);
95517 if (!vma && in_gate_area(mm, start)) {
95518 int ret;
95519 ret = get_gate_page(mm, start & PAGE_MASK,
95520@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95521 goto next_page;
95522 }
95523
95524- if (!vma || check_vma_flags(vma, gup_flags))
95525+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95526 return i ? : -EFAULT;
95527 if (is_vm_hugetlb_page(vma)) {
95528 i = follow_hugetlb_page(mm, vma, pages, vmas,
95529@@ -518,7 +513,7 @@ next_page:
95530 i += page_increm;
95531 start += page_increm * PAGE_SIZE;
95532 nr_pages -= page_increm;
95533- } while (nr_pages);
95534+ }
95535 return i;
95536 }
95537 EXPORT_SYMBOL(__get_user_pages);
95538diff --git a/mm/highmem.c b/mm/highmem.c
95539index 123bcd3..0de52ba 100644
95540--- a/mm/highmem.c
95541+++ b/mm/highmem.c
95542@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95543 * So no dangers, even with speculative execution.
95544 */
95545 page = pte_page(pkmap_page_table[i]);
95546+ pax_open_kernel();
95547 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95548-
95549+ pax_close_kernel();
95550 set_page_address(page, NULL);
95551 need_flush = 1;
95552 }
95553@@ -259,9 +260,11 @@ start:
95554 }
95555 }
95556 vaddr = PKMAP_ADDR(last_pkmap_nr);
95557+
95558+ pax_open_kernel();
95559 set_pte_at(&init_mm, vaddr,
95560 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95561-
95562+ pax_close_kernel();
95563 pkmap_count[last_pkmap_nr] = 1;
95564 set_page_address(page, (void *)vaddr);
95565
95566diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95567index c49586f..41e5fd9 100644
95568--- a/mm/hugetlb.c
95569+++ b/mm/hugetlb.c
95570@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95571 struct ctl_table *table, int write,
95572 void __user *buffer, size_t *length, loff_t *ppos)
95573 {
95574+ ctl_table_no_const t;
95575 struct hstate *h = &default_hstate;
95576 unsigned long tmp = h->max_huge_pages;
95577 int ret;
95578@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95579 if (!hugepages_supported())
95580 return -ENOTSUPP;
95581
95582- table->data = &tmp;
95583- table->maxlen = sizeof(unsigned long);
95584- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95585+ t = *table;
95586+ t.data = &tmp;
95587+ t.maxlen = sizeof(unsigned long);
95588+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95589 if (ret)
95590 goto out;
95591
95592@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95593 struct hstate *h = &default_hstate;
95594 unsigned long tmp;
95595 int ret;
95596+ ctl_table_no_const hugetlb_table;
95597
95598 if (!hugepages_supported())
95599 return -ENOTSUPP;
95600@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95601 if (write && hstate_is_gigantic(h))
95602 return -EINVAL;
95603
95604- table->data = &tmp;
95605- table->maxlen = sizeof(unsigned long);
95606- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95607+ hugetlb_table = *table;
95608+ hugetlb_table.data = &tmp;
95609+ hugetlb_table.maxlen = sizeof(unsigned long);
95610+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95611 if (ret)
95612 goto out;
95613
95614@@ -2797,6 +2801,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95615 i_mmap_unlock_write(mapping);
95616 }
95617
95618+#ifdef CONFIG_PAX_SEGMEXEC
95619+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95620+{
95621+ struct mm_struct *mm = vma->vm_mm;
95622+ struct vm_area_struct *vma_m;
95623+ unsigned long address_m;
95624+ pte_t *ptep_m;
95625+
95626+ vma_m = pax_find_mirror_vma(vma);
95627+ if (!vma_m)
95628+ return;
95629+
95630+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95631+ address_m = address + SEGMEXEC_TASK_SIZE;
95632+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95633+ get_page(page_m);
95634+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95635+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95636+}
95637+#endif
95638+
95639 /*
95640 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95641 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95642@@ -2909,6 +2934,11 @@ retry_avoidcopy:
95643 make_huge_pte(vma, new_page, 1));
95644 page_remove_rmap(old_page);
95645 hugepage_add_new_anon_rmap(new_page, vma, address);
95646+
95647+#ifdef CONFIG_PAX_SEGMEXEC
95648+ pax_mirror_huge_pte(vma, address, new_page);
95649+#endif
95650+
95651 /* Make the old page be freed below */
95652 new_page = old_page;
95653 }
95654@@ -3069,6 +3099,10 @@ retry:
95655 && (vma->vm_flags & VM_SHARED)));
95656 set_huge_pte_at(mm, address, ptep, new_pte);
95657
95658+#ifdef CONFIG_PAX_SEGMEXEC
95659+ pax_mirror_huge_pte(vma, address, page);
95660+#endif
95661+
95662 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95663 /* Optimization, do the COW without a second fault */
95664 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95665@@ -3135,6 +3169,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95666 struct hstate *h = hstate_vma(vma);
95667 struct address_space *mapping;
95668
95669+#ifdef CONFIG_PAX_SEGMEXEC
95670+ struct vm_area_struct *vma_m;
95671+#endif
95672+
95673 address &= huge_page_mask(h);
95674
95675 ptep = huge_pte_offset(mm, address);
95676@@ -3148,6 +3186,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95677 VM_FAULT_SET_HINDEX(hstate_index(h));
95678 }
95679
95680+#ifdef CONFIG_PAX_SEGMEXEC
95681+ vma_m = pax_find_mirror_vma(vma);
95682+ if (vma_m) {
95683+ unsigned long address_m;
95684+
95685+ if (vma->vm_start > vma_m->vm_start) {
95686+ address_m = address;
95687+ address -= SEGMEXEC_TASK_SIZE;
95688+ vma = vma_m;
95689+ h = hstate_vma(vma);
95690+ } else
95691+ address_m = address + SEGMEXEC_TASK_SIZE;
95692+
95693+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95694+ return VM_FAULT_OOM;
95695+ address_m &= HPAGE_MASK;
95696+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95697+ }
95698+#endif
95699+
95700 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95701 if (!ptep)
95702 return VM_FAULT_OOM;
95703diff --git a/mm/internal.h b/mm/internal.h
95704index efad241..57ae4ca 100644
95705--- a/mm/internal.h
95706+++ b/mm/internal.h
95707@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95708
95709 extern int __isolate_free_page(struct page *page, unsigned int order);
95710 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95711+extern void free_compound_page(struct page *page);
95712 extern void prep_compound_page(struct page *page, unsigned long order);
95713 #ifdef CONFIG_MEMORY_FAILURE
95714 extern bool is_free_buddy_page(struct page *page);
95715@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
95716
95717 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95718 unsigned long, unsigned long,
95719- unsigned long, unsigned long);
95720+ unsigned long, unsigned long) __intentional_overflow(-1);
95721
95722 extern void set_pageblock_order(void);
95723 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95724diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95725index 3cda50c..032ba634 100644
95726--- a/mm/kmemleak.c
95727+++ b/mm/kmemleak.c
95728@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95729
95730 for (i = 0; i < object->trace_len; i++) {
95731 void *ptr = (void *)object->trace[i];
95732- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95733+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95734 }
95735 }
95736
95737@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95738 return -ENOMEM;
95739 }
95740
95741- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95742+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95743 &kmemleak_fops);
95744 if (!dentry)
95745 pr_warning("Failed to create the debugfs kmemleak file\n");
95746diff --git a/mm/maccess.c b/mm/maccess.c
95747index d53adf9..03a24bf 100644
95748--- a/mm/maccess.c
95749+++ b/mm/maccess.c
95750@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95751 set_fs(KERNEL_DS);
95752 pagefault_disable();
95753 ret = __copy_from_user_inatomic(dst,
95754- (__force const void __user *)src, size);
95755+ (const void __force_user *)src, size);
95756 pagefault_enable();
95757 set_fs(old_fs);
95758
95759@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95760
95761 set_fs(KERNEL_DS);
95762 pagefault_disable();
95763- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95764+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95765 pagefault_enable();
95766 set_fs(old_fs);
95767
95768diff --git a/mm/madvise.c b/mm/madvise.c
95769index a271adc..831d82f 100644
95770--- a/mm/madvise.c
95771+++ b/mm/madvise.c
95772@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95773 pgoff_t pgoff;
95774 unsigned long new_flags = vma->vm_flags;
95775
95776+#ifdef CONFIG_PAX_SEGMEXEC
95777+ struct vm_area_struct *vma_m;
95778+#endif
95779+
95780 switch (behavior) {
95781 case MADV_NORMAL:
95782 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95783@@ -126,6 +130,13 @@ success:
95784 /*
95785 * vm_flags is protected by the mmap_sem held in write mode.
95786 */
95787+
95788+#ifdef CONFIG_PAX_SEGMEXEC
95789+ vma_m = pax_find_mirror_vma(vma);
95790+ if (vma_m)
95791+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95792+#endif
95793+
95794 vma->vm_flags = new_flags;
95795
95796 out:
95797@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95798 struct vm_area_struct **prev,
95799 unsigned long start, unsigned long end)
95800 {
95801+
95802+#ifdef CONFIG_PAX_SEGMEXEC
95803+ struct vm_area_struct *vma_m;
95804+#endif
95805+
95806 *prev = vma;
95807 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95808 return -EINVAL;
95809@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95810 zap_page_range(vma, start, end - start, &details);
95811 } else
95812 zap_page_range(vma, start, end - start, NULL);
95813+
95814+#ifdef CONFIG_PAX_SEGMEXEC
95815+ vma_m = pax_find_mirror_vma(vma);
95816+ if (vma_m) {
95817+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95818+ struct zap_details details = {
95819+ .nonlinear_vma = vma_m,
95820+ .last_index = ULONG_MAX,
95821+ };
95822+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95823+ } else
95824+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95825+ }
95826+#endif
95827+
95828 return 0;
95829 }
95830
95831@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95832 if (end < start)
95833 return error;
95834
95835+#ifdef CONFIG_PAX_SEGMEXEC
95836+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95837+ if (end > SEGMEXEC_TASK_SIZE)
95838+ return error;
95839+ } else
95840+#endif
95841+
95842+ if (end > TASK_SIZE)
95843+ return error;
95844+
95845 error = 0;
95846 if (end == start)
95847 return error;
95848diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95849index feb803b..d382029 100644
95850--- a/mm/memory-failure.c
95851+++ b/mm/memory-failure.c
95852@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95853
95854 int sysctl_memory_failure_recovery __read_mostly = 1;
95855
95856-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95857+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95858
95859 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
95860
95861@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
95862 pfn, t->comm, t->pid);
95863 si.si_signo = SIGBUS;
95864 si.si_errno = 0;
95865- si.si_addr = (void *)addr;
95866+ si.si_addr = (void __user *)addr;
95867 #ifdef __ARCH_SI_TRAPNO
95868 si.si_trapno = trapno;
95869 #endif
95870@@ -786,7 +786,7 @@ static struct page_state {
95871 unsigned long res;
95872 char *msg;
95873 int (*action)(struct page *p, unsigned long pfn);
95874-} error_states[] = {
95875+} __do_const error_states[] = {
95876 { reserved, reserved, "reserved kernel", me_kernel },
95877 /*
95878 * free pages are specially detected outside this table:
95879@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95880 nr_pages = 1 << compound_order(hpage);
95881 else /* normal page or thp */
95882 nr_pages = 1;
95883- atomic_long_add(nr_pages, &num_poisoned_pages);
95884+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
95885
95886 /*
95887 * We need/can do nothing about count=0 pages.
95888@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95889 if (PageHWPoison(hpage)) {
95890 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
95891 || (p != hpage && TestSetPageHWPoison(hpage))) {
95892- atomic_long_sub(nr_pages, &num_poisoned_pages);
95893+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95894 unlock_page(hpage);
95895 return 0;
95896 }
95897@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95898 */
95899 if (!PageHWPoison(p)) {
95900 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
95901- atomic_long_sub(nr_pages, &num_poisoned_pages);
95902+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95903 put_page(hpage);
95904 res = 0;
95905 goto out;
95906 }
95907 if (hwpoison_filter(p)) {
95908 if (TestClearPageHWPoison(p))
95909- atomic_long_sub(nr_pages, &num_poisoned_pages);
95910+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95911 unlock_page(hpage);
95912 put_page(hpage);
95913 return 0;
95914@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
95915 return 0;
95916 }
95917 if (TestClearPageHWPoison(p))
95918- atomic_long_dec(&num_poisoned_pages);
95919+ atomic_long_dec_unchecked(&num_poisoned_pages);
95920 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
95921 return 0;
95922 }
95923@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
95924 */
95925 if (TestClearPageHWPoison(page)) {
95926 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
95927- atomic_long_sub(nr_pages, &num_poisoned_pages);
95928+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95929 freeit = 1;
95930 if (PageHuge(page))
95931 clear_page_hwpoison_huge_page(page);
95932@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
95933 if (PageHuge(page)) {
95934 set_page_hwpoison_huge_page(hpage);
95935 dequeue_hwpoisoned_huge_page(hpage);
95936- atomic_long_add(1 << compound_order(hpage),
95937+ atomic_long_add_unchecked(1 << compound_order(hpage),
95938 &num_poisoned_pages);
95939 } else {
95940 SetPageHWPoison(page);
95941- atomic_long_inc(&num_poisoned_pages);
95942+ atomic_long_inc_unchecked(&num_poisoned_pages);
95943 }
95944 }
95945 return ret;
95946@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
95947 put_page(page);
95948 pr_info("soft_offline: %#lx: invalidated\n", pfn);
95949 SetPageHWPoison(page);
95950- atomic_long_inc(&num_poisoned_pages);
95951+ atomic_long_inc_unchecked(&num_poisoned_pages);
95952 return 0;
95953 }
95954
95955@@ -1661,7 +1661,7 @@ static int __soft_offline_page(struct page *page, int flags)
95956 if (!is_free_buddy_page(page))
95957 pr_info("soft offline: %#lx: page leaked\n",
95958 pfn);
95959- atomic_long_inc(&num_poisoned_pages);
95960+ atomic_long_inc_unchecked(&num_poisoned_pages);
95961 }
95962 } else {
95963 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
95964@@ -1731,11 +1731,11 @@ int soft_offline_page(struct page *page, int flags)
95965 if (PageHuge(page)) {
95966 set_page_hwpoison_huge_page(hpage);
95967 dequeue_hwpoisoned_huge_page(hpage);
95968- atomic_long_add(1 << compound_order(hpage),
95969+ atomic_long_add_unchecked(1 << compound_order(hpage),
95970 &num_poisoned_pages);
95971 } else {
95972 SetPageHWPoison(page);
95973- atomic_long_inc(&num_poisoned_pages);
95974+ atomic_long_inc_unchecked(&num_poisoned_pages);
95975 }
95976 }
95977 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
95978diff --git a/mm/memory.c b/mm/memory.c
95979index 2c3536c..e800104 100644
95980--- a/mm/memory.c
95981+++ b/mm/memory.c
95982@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95983 free_pte_range(tlb, pmd, addr);
95984 } while (pmd++, addr = next, addr != end);
95985
95986+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
95987 start &= PUD_MASK;
95988 if (start < floor)
95989 return;
95990@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95991 pmd = pmd_offset(pud, start);
95992 pud_clear(pud);
95993 pmd_free_tlb(tlb, pmd, start);
95994+#endif
95995+
95996 }
95997
95998 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95999@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96000 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96001 } while (pud++, addr = next, addr != end);
96002
96003+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96004 start &= PGDIR_MASK;
96005 if (start < floor)
96006 return;
96007@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96008 pud = pud_offset(pgd, start);
96009 pgd_clear(pgd);
96010 pud_free_tlb(tlb, pud, start);
96011+#endif
96012+
96013 }
96014
96015 /*
96016@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96017 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96018 */
96019 if (vma->vm_ops)
96020- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96021+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96022 vma->vm_ops->fault);
96023 if (vma->vm_file)
96024- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96025+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96026 vma->vm_file->f_op->mmap);
96027 dump_stack();
96028 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96029@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96030 page_add_file_rmap(page);
96031 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96032
96033+#ifdef CONFIG_PAX_SEGMEXEC
96034+ pax_mirror_file_pte(vma, addr, page, ptl);
96035+#endif
96036+
96037 retval = 0;
96038 pte_unmap_unlock(pte, ptl);
96039 return retval;
96040@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96041 if (!page_count(page))
96042 return -EINVAL;
96043 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96044+
96045+#ifdef CONFIG_PAX_SEGMEXEC
96046+ struct vm_area_struct *vma_m;
96047+#endif
96048+
96049 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96050 BUG_ON(vma->vm_flags & VM_PFNMAP);
96051 vma->vm_flags |= VM_MIXEDMAP;
96052+
96053+#ifdef CONFIG_PAX_SEGMEXEC
96054+ vma_m = pax_find_mirror_vma(vma);
96055+ if (vma_m)
96056+ vma_m->vm_flags |= VM_MIXEDMAP;
96057+#endif
96058+
96059 }
96060 return insert_page(vma, addr, page, vma->vm_page_prot);
96061 }
96062@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96063 unsigned long pfn)
96064 {
96065 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96066+ BUG_ON(vma->vm_mirror);
96067
96068 if (addr < vma->vm_start || addr >= vma->vm_end)
96069 return -EFAULT;
96070@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96071
96072 BUG_ON(pud_huge(*pud));
96073
96074- pmd = pmd_alloc(mm, pud, addr);
96075+ pmd = (mm == &init_mm) ?
96076+ pmd_alloc_kernel(mm, pud, addr) :
96077+ pmd_alloc(mm, pud, addr);
96078 if (!pmd)
96079 return -ENOMEM;
96080 do {
96081@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96082 unsigned long next;
96083 int err;
96084
96085- pud = pud_alloc(mm, pgd, addr);
96086+ pud = (mm == &init_mm) ?
96087+ pud_alloc_kernel(mm, pgd, addr) :
96088+ pud_alloc(mm, pgd, addr);
96089 if (!pud)
96090 return -ENOMEM;
96091 do {
96092@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96093 return ret;
96094 }
96095
96096+#ifdef CONFIG_PAX_SEGMEXEC
96097+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96098+{
96099+ struct mm_struct *mm = vma->vm_mm;
96100+ spinlock_t *ptl;
96101+ pte_t *pte, entry;
96102+
96103+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96104+ entry = *pte;
96105+ if (!pte_present(entry)) {
96106+ if (!pte_none(entry)) {
96107+ BUG_ON(pte_file(entry));
96108+ free_swap_and_cache(pte_to_swp_entry(entry));
96109+ pte_clear_not_present_full(mm, address, pte, 0);
96110+ }
96111+ } else {
96112+ struct page *page;
96113+
96114+ flush_cache_page(vma, address, pte_pfn(entry));
96115+ entry = ptep_clear_flush(vma, address, pte);
96116+ BUG_ON(pte_dirty(entry));
96117+ page = vm_normal_page(vma, address, entry);
96118+ if (page) {
96119+ update_hiwater_rss(mm);
96120+ if (PageAnon(page))
96121+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96122+ else
96123+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96124+ page_remove_rmap(page);
96125+ page_cache_release(page);
96126+ }
96127+ }
96128+ pte_unmap_unlock(pte, ptl);
96129+}
96130+
96131+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96132+ *
96133+ * the ptl of the lower mapped page is held on entry and is not released on exit
96134+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96135+ */
96136+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96137+{
96138+ struct mm_struct *mm = vma->vm_mm;
96139+ unsigned long address_m;
96140+ spinlock_t *ptl_m;
96141+ struct vm_area_struct *vma_m;
96142+ pmd_t *pmd_m;
96143+ pte_t *pte_m, entry_m;
96144+
96145+ BUG_ON(!page_m || !PageAnon(page_m));
96146+
96147+ vma_m = pax_find_mirror_vma(vma);
96148+ if (!vma_m)
96149+ return;
96150+
96151+ BUG_ON(!PageLocked(page_m));
96152+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96153+ address_m = address + SEGMEXEC_TASK_SIZE;
96154+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96155+ pte_m = pte_offset_map(pmd_m, address_m);
96156+ ptl_m = pte_lockptr(mm, pmd_m);
96157+ if (ptl != ptl_m) {
96158+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96159+ if (!pte_none(*pte_m))
96160+ goto out;
96161+ }
96162+
96163+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96164+ page_cache_get(page_m);
96165+ page_add_anon_rmap(page_m, vma_m, address_m);
96166+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96167+ set_pte_at(mm, address_m, pte_m, entry_m);
96168+ update_mmu_cache(vma_m, address_m, pte_m);
96169+out:
96170+ if (ptl != ptl_m)
96171+ spin_unlock(ptl_m);
96172+ pte_unmap(pte_m);
96173+ unlock_page(page_m);
96174+}
96175+
96176+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96177+{
96178+ struct mm_struct *mm = vma->vm_mm;
96179+ unsigned long address_m;
96180+ spinlock_t *ptl_m;
96181+ struct vm_area_struct *vma_m;
96182+ pmd_t *pmd_m;
96183+ pte_t *pte_m, entry_m;
96184+
96185+ BUG_ON(!page_m || PageAnon(page_m));
96186+
96187+ vma_m = pax_find_mirror_vma(vma);
96188+ if (!vma_m)
96189+ return;
96190+
96191+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96192+ address_m = address + SEGMEXEC_TASK_SIZE;
96193+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96194+ pte_m = pte_offset_map(pmd_m, address_m);
96195+ ptl_m = pte_lockptr(mm, pmd_m);
96196+ if (ptl != ptl_m) {
96197+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96198+ if (!pte_none(*pte_m))
96199+ goto out;
96200+ }
96201+
96202+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96203+ page_cache_get(page_m);
96204+ page_add_file_rmap(page_m);
96205+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96206+ set_pte_at(mm, address_m, pte_m, entry_m);
96207+ update_mmu_cache(vma_m, address_m, pte_m);
96208+out:
96209+ if (ptl != ptl_m)
96210+ spin_unlock(ptl_m);
96211+ pte_unmap(pte_m);
96212+}
96213+
96214+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96215+{
96216+ struct mm_struct *mm = vma->vm_mm;
96217+ unsigned long address_m;
96218+ spinlock_t *ptl_m;
96219+ struct vm_area_struct *vma_m;
96220+ pmd_t *pmd_m;
96221+ pte_t *pte_m, entry_m;
96222+
96223+ vma_m = pax_find_mirror_vma(vma);
96224+ if (!vma_m)
96225+ return;
96226+
96227+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96228+ address_m = address + SEGMEXEC_TASK_SIZE;
96229+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96230+ pte_m = pte_offset_map(pmd_m, address_m);
96231+ ptl_m = pte_lockptr(mm, pmd_m);
96232+ if (ptl != ptl_m) {
96233+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96234+ if (!pte_none(*pte_m))
96235+ goto out;
96236+ }
96237+
96238+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96239+ set_pte_at(mm, address_m, pte_m, entry_m);
96240+out:
96241+ if (ptl != ptl_m)
96242+ spin_unlock(ptl_m);
96243+ pte_unmap(pte_m);
96244+}
96245+
96246+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96247+{
96248+ struct page *page_m;
96249+ pte_t entry;
96250+
96251+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96252+ goto out;
96253+
96254+ entry = *pte;
96255+ page_m = vm_normal_page(vma, address, entry);
96256+ if (!page_m)
96257+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96258+ else if (PageAnon(page_m)) {
96259+ if (pax_find_mirror_vma(vma)) {
96260+ pte_unmap_unlock(pte, ptl);
96261+ lock_page(page_m);
96262+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96263+ if (pte_same(entry, *pte))
96264+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96265+ else
96266+ unlock_page(page_m);
96267+ }
96268+ } else
96269+ pax_mirror_file_pte(vma, address, page_m, ptl);
96270+
96271+out:
96272+ pte_unmap_unlock(pte, ptl);
96273+}
96274+#endif
96275+
96276 /*
96277 * This routine handles present pages, when users try to write
96278 * to a shared page. It is done by copying the page to a new address
96279@@ -2212,6 +2419,12 @@ gotten:
96280 */
96281 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96282 if (likely(pte_same(*page_table, orig_pte))) {
96283+
96284+#ifdef CONFIG_PAX_SEGMEXEC
96285+ if (pax_find_mirror_vma(vma))
96286+ BUG_ON(!trylock_page(new_page));
96287+#endif
96288+
96289 if (old_page) {
96290 if (!PageAnon(old_page)) {
96291 dec_mm_counter_fast(mm, MM_FILEPAGES);
96292@@ -2265,6 +2478,10 @@ gotten:
96293 page_remove_rmap(old_page);
96294 }
96295
96296+#ifdef CONFIG_PAX_SEGMEXEC
96297+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96298+#endif
96299+
96300 /* Free the old page.. */
96301 new_page = old_page;
96302 ret |= VM_FAULT_WRITE;
96303@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96304 swap_free(entry);
96305 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96306 try_to_free_swap(page);
96307+
96308+#ifdef CONFIG_PAX_SEGMEXEC
96309+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96310+#endif
96311+
96312 unlock_page(page);
96313 if (page != swapcache) {
96314 /*
96315@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96316
96317 /* No need to invalidate - it was non-present before */
96318 update_mmu_cache(vma, address, page_table);
96319+
96320+#ifdef CONFIG_PAX_SEGMEXEC
96321+ pax_mirror_anon_pte(vma, address, page, ptl);
96322+#endif
96323+
96324 unlock:
96325 pte_unmap_unlock(page_table, ptl);
96326 out:
96327@@ -2581,40 +2808,6 @@ out_release:
96328 }
96329
96330 /*
96331- * This is like a special single-page "expand_{down|up}wards()",
96332- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96333- * doesn't hit another vma.
96334- */
96335-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96336-{
96337- address &= PAGE_MASK;
96338- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96339- struct vm_area_struct *prev = vma->vm_prev;
96340-
96341- /*
96342- * Is there a mapping abutting this one below?
96343- *
96344- * That's only ok if it's the same stack mapping
96345- * that has gotten split..
96346- */
96347- if (prev && prev->vm_end == address)
96348- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96349-
96350- return expand_downwards(vma, address - PAGE_SIZE);
96351- }
96352- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96353- struct vm_area_struct *next = vma->vm_next;
96354-
96355- /* As VM_GROWSDOWN but s/below/above/ */
96356- if (next && next->vm_start == address + PAGE_SIZE)
96357- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96358-
96359- return expand_upwards(vma, address + PAGE_SIZE);
96360- }
96361- return 0;
96362-}
96363-
96364-/*
96365 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96366 * but allow concurrent faults), and pte mapped but not yet locked.
96367 * We return with mmap_sem still held, but pte unmapped and unlocked.
96368@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96369 unsigned int flags)
96370 {
96371 struct mem_cgroup *memcg;
96372- struct page *page;
96373+ struct page *page = NULL;
96374 spinlock_t *ptl;
96375 pte_t entry;
96376
96377- pte_unmap(page_table);
96378-
96379- /* Check if we need to add a guard page to the stack */
96380- if (check_stack_guard_page(vma, address) < 0)
96381- return VM_FAULT_SIGSEGV;
96382-
96383- /* Use the zero-page for reads */
96384 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
96385 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96386 vma->vm_page_prot));
96387- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96388+ ptl = pte_lockptr(mm, pmd);
96389+ spin_lock(ptl);
96390 if (!pte_none(*page_table))
96391 goto unlock;
96392 goto setpte;
96393 }
96394
96395 /* Allocate our own private page. */
96396+ pte_unmap(page_table);
96397+
96398 if (unlikely(anon_vma_prepare(vma)))
96399 goto oom;
96400 page = alloc_zeroed_user_highpage_movable(vma, address);
96401@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96402 if (!pte_none(*page_table))
96403 goto release;
96404
96405+#ifdef CONFIG_PAX_SEGMEXEC
96406+ if (pax_find_mirror_vma(vma))
96407+ BUG_ON(!trylock_page(page));
96408+#endif
96409+
96410 inc_mm_counter_fast(mm, MM_ANONPAGES);
96411 page_add_new_anon_rmap(page, vma, address);
96412 mem_cgroup_commit_charge(page, memcg, false);
96413@@ -2677,6 +2871,12 @@ setpte:
96414
96415 /* No need to invalidate - it was non-present before */
96416 update_mmu_cache(vma, address, page_table);
96417+
96418+#ifdef CONFIG_PAX_SEGMEXEC
96419+ if (page)
96420+ pax_mirror_anon_pte(vma, address, page, ptl);
96421+#endif
96422+
96423 unlock:
96424 pte_unmap_unlock(page_table, ptl);
96425 return 0;
96426@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96427 return ret;
96428 }
96429 do_set_pte(vma, address, fault_page, pte, false, false);
96430+
96431+#ifdef CONFIG_PAX_SEGMEXEC
96432+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96433+#endif
96434+
96435 unlock_page(fault_page);
96436 unlock_out:
96437 pte_unmap_unlock(pte, ptl);
96438@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96439 page_cache_release(fault_page);
96440 goto uncharge_out;
96441 }
96442+
96443+#ifdef CONFIG_PAX_SEGMEXEC
96444+ if (pax_find_mirror_vma(vma))
96445+ BUG_ON(!trylock_page(new_page));
96446+#endif
96447+
96448 do_set_pte(vma, address, new_page, pte, true, true);
96449+
96450+#ifdef CONFIG_PAX_SEGMEXEC
96451+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96452+#endif
96453+
96454 mem_cgroup_commit_charge(new_page, memcg, false);
96455 lru_cache_add_active_or_unevictable(new_page, vma);
96456 pte_unmap_unlock(pte, ptl);
96457@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96458 return ret;
96459 }
96460 do_set_pte(vma, address, fault_page, pte, true, false);
96461+
96462+#ifdef CONFIG_PAX_SEGMEXEC
96463+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96464+#endif
96465+
96466 pte_unmap_unlock(pte, ptl);
96467
96468 if (set_page_dirty(fault_page))
96469@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96470 if (flags & FAULT_FLAG_WRITE)
96471 flush_tlb_fix_spurious_fault(vma, address);
96472 }
96473+
96474+#ifdef CONFIG_PAX_SEGMEXEC
96475+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96476+ return 0;
96477+#endif
96478+
96479 unlock:
96480 pte_unmap_unlock(pte, ptl);
96481 return 0;
96482@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96483 pmd_t *pmd;
96484 pte_t *pte;
96485
96486+#ifdef CONFIG_PAX_SEGMEXEC
96487+ struct vm_area_struct *vma_m;
96488+#endif
96489+
96490 if (unlikely(is_vm_hugetlb_page(vma)))
96491 return hugetlb_fault(mm, vma, address, flags);
96492
96493+#ifdef CONFIG_PAX_SEGMEXEC
96494+ vma_m = pax_find_mirror_vma(vma);
96495+ if (vma_m) {
96496+ unsigned long address_m;
96497+ pgd_t *pgd_m;
96498+ pud_t *pud_m;
96499+ pmd_t *pmd_m;
96500+
96501+ if (vma->vm_start > vma_m->vm_start) {
96502+ address_m = address;
96503+ address -= SEGMEXEC_TASK_SIZE;
96504+ vma = vma_m;
96505+ } else
96506+ address_m = address + SEGMEXEC_TASK_SIZE;
96507+
96508+ pgd_m = pgd_offset(mm, address_m);
96509+ pud_m = pud_alloc(mm, pgd_m, address_m);
96510+ if (!pud_m)
96511+ return VM_FAULT_OOM;
96512+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96513+ if (!pmd_m)
96514+ return VM_FAULT_OOM;
96515+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96516+ return VM_FAULT_OOM;
96517+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96518+ }
96519+#endif
96520+
96521 pgd = pgd_offset(mm, address);
96522 pud = pud_alloc(mm, pgd, address);
96523 if (!pud)
96524@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96525 spin_unlock(&mm->page_table_lock);
96526 return 0;
96527 }
96528+
96529+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96530+{
96531+ pud_t *new = pud_alloc_one(mm, address);
96532+ if (!new)
96533+ return -ENOMEM;
96534+
96535+ smp_wmb(); /* See comment in __pte_alloc */
96536+
96537+ spin_lock(&mm->page_table_lock);
96538+ if (pgd_present(*pgd)) /* Another has populated it */
96539+ pud_free(mm, new);
96540+ else
96541+ pgd_populate_kernel(mm, pgd, new);
96542+ spin_unlock(&mm->page_table_lock);
96543+ return 0;
96544+}
96545 #endif /* __PAGETABLE_PUD_FOLDED */
96546
96547 #ifndef __PAGETABLE_PMD_FOLDED
96548@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96549 spin_unlock(&mm->page_table_lock);
96550 return 0;
96551 }
96552+
96553+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96554+{
96555+ pmd_t *new = pmd_alloc_one(mm, address);
96556+ if (!new)
96557+ return -ENOMEM;
96558+
96559+ smp_wmb(); /* See comment in __pte_alloc */
96560+
96561+ spin_lock(&mm->page_table_lock);
96562+#ifndef __ARCH_HAS_4LEVEL_HACK
96563+ if (pud_present(*pud)) /* Another has populated it */
96564+ pmd_free(mm, new);
96565+ else
96566+ pud_populate_kernel(mm, pud, new);
96567+#else
96568+ if (pgd_present(*pud)) /* Another has populated it */
96569+ pmd_free(mm, new);
96570+ else
96571+ pgd_populate_kernel(mm, pud, new);
96572+#endif /* __ARCH_HAS_4LEVEL_HACK */
96573+ spin_unlock(&mm->page_table_lock);
96574+ return 0;
96575+}
96576 #endif /* __PAGETABLE_PMD_FOLDED */
96577
96578 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96579@@ -3550,8 +3850,8 @@ out:
96580 return ret;
96581 }
96582
96583-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96584- void *buf, int len, int write)
96585+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96586+ void *buf, size_t len, int write)
96587 {
96588 resource_size_t phys_addr;
96589 unsigned long prot = 0;
96590@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96591 * Access another process' address space as given in mm. If non-NULL, use the
96592 * given task for page fault accounting.
96593 */
96594-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96595- unsigned long addr, void *buf, int len, int write)
96596+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96597+ unsigned long addr, void *buf, size_t len, int write)
96598 {
96599 struct vm_area_struct *vma;
96600 void *old_buf = buf;
96601@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96602 down_read(&mm->mmap_sem);
96603 /* ignore errors, just check how much was successfully transferred */
96604 while (len) {
96605- int bytes, ret, offset;
96606+ ssize_t bytes, ret, offset;
96607 void *maddr;
96608 struct page *page = NULL;
96609
96610@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96611 *
96612 * The caller must hold a reference on @mm.
96613 */
96614-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96615- void *buf, int len, int write)
96616+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96617+ void *buf, size_t len, int write)
96618 {
96619 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96620 }
96621@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96622 * Source/target buffer must be kernel space,
96623 * Do not walk the page table directly, use get_user_pages
96624 */
96625-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96626- void *buf, int len, int write)
96627+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96628+ void *buf, size_t len, int write)
96629 {
96630 struct mm_struct *mm;
96631- int ret;
96632+ ssize_t ret;
96633
96634 mm = get_task_mm(tsk);
96635 if (!mm)
96636diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96637index 0e0961b..c9143b9 100644
96638--- a/mm/mempolicy.c
96639+++ b/mm/mempolicy.c
96640@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96641 unsigned long vmstart;
96642 unsigned long vmend;
96643
96644+#ifdef CONFIG_PAX_SEGMEXEC
96645+ struct vm_area_struct *vma_m;
96646+#endif
96647+
96648 vma = find_vma(mm, start);
96649 if (!vma || vma->vm_start > start)
96650 return -EFAULT;
96651@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96652 err = vma_replace_policy(vma, new_pol);
96653 if (err)
96654 goto out;
96655+
96656+#ifdef CONFIG_PAX_SEGMEXEC
96657+ vma_m = pax_find_mirror_vma(vma);
96658+ if (vma_m) {
96659+ err = vma_replace_policy(vma_m, new_pol);
96660+ if (err)
96661+ goto out;
96662+ }
96663+#endif
96664+
96665 }
96666
96667 out:
96668@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96669
96670 if (end < start)
96671 return -EINVAL;
96672+
96673+#ifdef CONFIG_PAX_SEGMEXEC
96674+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96675+ if (end > SEGMEXEC_TASK_SIZE)
96676+ return -EINVAL;
96677+ } else
96678+#endif
96679+
96680+ if (end > TASK_SIZE)
96681+ return -EINVAL;
96682+
96683 if (end == start)
96684 return 0;
96685
96686@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96687 */
96688 tcred = __task_cred(task);
96689 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96690- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96691- !capable(CAP_SYS_NICE)) {
96692+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96693 rcu_read_unlock();
96694 err = -EPERM;
96695 goto out_put;
96696@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96697 goto out;
96698 }
96699
96700+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96701+ if (mm != current->mm &&
96702+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96703+ mmput(mm);
96704+ err = -EPERM;
96705+ goto out;
96706+ }
96707+#endif
96708+
96709 err = do_migrate_pages(mm, old, new,
96710 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96711
96712diff --git a/mm/migrate.c b/mm/migrate.c
96713index 344cdf6..07399500 100644
96714--- a/mm/migrate.c
96715+++ b/mm/migrate.c
96716@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96717 */
96718 tcred = __task_cred(task);
96719 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96720- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96721- !capable(CAP_SYS_NICE)) {
96722+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96723 rcu_read_unlock();
96724 err = -EPERM;
96725 goto out;
96726diff --git a/mm/mlock.c b/mm/mlock.c
96727index 73cf098..ab547c7 100644
96728--- a/mm/mlock.c
96729+++ b/mm/mlock.c
96730@@ -14,6 +14,7 @@
96731 #include <linux/pagevec.h>
96732 #include <linux/mempolicy.h>
96733 #include <linux/syscalls.h>
96734+#include <linux/security.h>
96735 #include <linux/sched.h>
96736 #include <linux/export.h>
96737 #include <linux/rmap.h>
96738@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96739 {
96740 unsigned long nstart, end, tmp;
96741 struct vm_area_struct * vma, * prev;
96742- int error;
96743+ int error = 0;
96744
96745 VM_BUG_ON(start & ~PAGE_MASK);
96746 VM_BUG_ON(len != PAGE_ALIGN(len));
96747@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96748 return -EINVAL;
96749 if (end == start)
96750 return 0;
96751+ if (end > TASK_SIZE)
96752+ return -EINVAL;
96753+
96754 vma = find_vma(current->mm, start);
96755 if (!vma || vma->vm_start > start)
96756 return -ENOMEM;
96757@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96758 for (nstart = start ; ; ) {
96759 vm_flags_t newflags;
96760
96761+#ifdef CONFIG_PAX_SEGMEXEC
96762+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96763+ break;
96764+#endif
96765+
96766 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96767
96768 newflags = vma->vm_flags & ~VM_LOCKED;
96769@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96770 locked += current->mm->locked_vm;
96771
96772 /* check against resource limits */
96773+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96774 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96775 error = do_mlock(start, len, 1);
96776
96777@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96778 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96779 vm_flags_t newflags;
96780
96781+#ifdef CONFIG_PAX_SEGMEXEC
96782+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96783+ break;
96784+#endif
96785+
96786 newflags = vma->vm_flags & ~VM_LOCKED;
96787 if (flags & MCL_CURRENT)
96788 newflags |= VM_LOCKED;
96789@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96790 lock_limit >>= PAGE_SHIFT;
96791
96792 ret = -ENOMEM;
96793+
96794+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96795+
96796 down_write(&current->mm->mmap_sem);
96797-
96798 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96799 capable(CAP_IPC_LOCK))
96800 ret = do_mlockall(flags);
96801diff --git a/mm/mmap.c b/mm/mmap.c
96802index 7f684d5..bb9333f 100644
96803--- a/mm/mmap.c
96804+++ b/mm/mmap.c
96805@@ -41,6 +41,7 @@
96806 #include <linux/notifier.h>
96807 #include <linux/memory.h>
96808 #include <linux/printk.h>
96809+#include <linux/random.h>
96810
96811 #include <asm/uaccess.h>
96812 #include <asm/cacheflush.h>
96813@@ -57,6 +58,16 @@
96814 #define arch_rebalance_pgtables(addr, len) (addr)
96815 #endif
96816
96817+static inline void verify_mm_writelocked(struct mm_struct *mm)
96818+{
96819+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96820+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96821+ up_read(&mm->mmap_sem);
96822+ BUG();
96823+ }
96824+#endif
96825+}
96826+
96827 static void unmap_region(struct mm_struct *mm,
96828 struct vm_area_struct *vma, struct vm_area_struct *prev,
96829 unsigned long start, unsigned long end);
96830@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96831 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96832 *
96833 */
96834-pgprot_t protection_map[16] = {
96835+pgprot_t protection_map[16] __read_only = {
96836 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96837 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96838 };
96839
96840-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96841+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96842 {
96843- return __pgprot(pgprot_val(protection_map[vm_flags &
96844+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96845 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96846 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96847+
96848+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96849+ if (!(__supported_pte_mask & _PAGE_NX) &&
96850+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96851+ (vm_flags & (VM_READ | VM_WRITE)))
96852+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96853+#endif
96854+
96855+ return prot;
96856 }
96857 EXPORT_SYMBOL(vm_get_page_prot);
96858
96859@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
96860 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96861 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
96862 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
96863+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96864 /*
96865 * Make sure vm_committed_as in one cacheline and not cacheline shared with
96866 * other variables. It can be updated by several CPUs frequently.
96867@@ -152,7 +173,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
96868 */
96869 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
96870 {
96871- unsigned long free, allowed, reserve;
96872+ long free, allowed, reserve;
96873
96874 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
96875 -(s64)vm_committed_as_batch * num_online_cpus(),
96876@@ -220,7 +241,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
96877 */
96878 if (mm) {
96879 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
96880- allowed -= min(mm->total_vm / 32, reserve);
96881+ allowed -= min_t(long, mm->total_vm / 32, reserve);
96882 }
96883
96884 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
96885@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96886 struct vm_area_struct *next = vma->vm_next;
96887
96888 might_sleep();
96889+ BUG_ON(vma->vm_mirror);
96890 if (vma->vm_ops && vma->vm_ops->close)
96891 vma->vm_ops->close(vma);
96892 if (vma->vm_file)
96893@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
96894
96895 SYSCALL_DEFINE1(brk, unsigned long, brk)
96896 {
96897+ unsigned long rlim;
96898 unsigned long retval;
96899 unsigned long newbrk, oldbrk;
96900 struct mm_struct *mm = current->mm;
96901@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96902 * segment grow beyond its set limit the in case where the limit is
96903 * not page aligned -Ram Gupta
96904 */
96905- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
96906+ rlim = rlimit(RLIMIT_DATA);
96907+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96908+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
96909+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
96910+ rlim = 4096 * PAGE_SIZE;
96911+#endif
96912+ if (check_data_rlimit(rlim, brk, mm->start_brk,
96913 mm->end_data, mm->start_data))
96914 goto out;
96915
96916@@ -978,6 +1007,12 @@ static int
96917 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96918 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96919 {
96920+
96921+#ifdef CONFIG_PAX_SEGMEXEC
96922+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96923+ return 0;
96924+#endif
96925+
96926 if (is_mergeable_vma(vma, file, vm_flags) &&
96927 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96928 if (vma->vm_pgoff == vm_pgoff)
96929@@ -997,6 +1032,12 @@ static int
96930 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96931 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96932 {
96933+
96934+#ifdef CONFIG_PAX_SEGMEXEC
96935+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96936+ return 0;
96937+#endif
96938+
96939 if (is_mergeable_vma(vma, file, vm_flags) &&
96940 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96941 pgoff_t vm_pglen;
96942@@ -1046,6 +1087,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96943 struct vm_area_struct *area, *next;
96944 int err;
96945
96946+#ifdef CONFIG_PAX_SEGMEXEC
96947+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96948+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96949+
96950+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96951+#endif
96952+
96953 /*
96954 * We later require that vma->vm_flags == vm_flags,
96955 * so this tests vma->vm_flags & VM_SPECIAL, too.
96956@@ -1061,6 +1109,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96957 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96958 next = next->vm_next;
96959
96960+#ifdef CONFIG_PAX_SEGMEXEC
96961+ if (prev)
96962+ prev_m = pax_find_mirror_vma(prev);
96963+ if (area)
96964+ area_m = pax_find_mirror_vma(area);
96965+ if (next)
96966+ next_m = pax_find_mirror_vma(next);
96967+#endif
96968+
96969 /*
96970 * Can it merge with the predecessor?
96971 */
96972@@ -1080,9 +1137,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96973 /* cases 1, 6 */
96974 err = vma_adjust(prev, prev->vm_start,
96975 next->vm_end, prev->vm_pgoff, NULL);
96976- } else /* cases 2, 5, 7 */
96977+
96978+#ifdef CONFIG_PAX_SEGMEXEC
96979+ if (!err && prev_m)
96980+ err = vma_adjust(prev_m, prev_m->vm_start,
96981+ next_m->vm_end, prev_m->vm_pgoff, NULL);
96982+#endif
96983+
96984+ } else { /* cases 2, 5, 7 */
96985 err = vma_adjust(prev, prev->vm_start,
96986 end, prev->vm_pgoff, NULL);
96987+
96988+#ifdef CONFIG_PAX_SEGMEXEC
96989+ if (!err && prev_m)
96990+ err = vma_adjust(prev_m, prev_m->vm_start,
96991+ end_m, prev_m->vm_pgoff, NULL);
96992+#endif
96993+
96994+ }
96995 if (err)
96996 return NULL;
96997 khugepaged_enter_vma_merge(prev, vm_flags);
96998@@ -1096,12 +1168,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96999 mpol_equal(policy, vma_policy(next)) &&
97000 can_vma_merge_before(next, vm_flags,
97001 anon_vma, file, pgoff+pglen)) {
97002- if (prev && addr < prev->vm_end) /* case 4 */
97003+ if (prev && addr < prev->vm_end) { /* case 4 */
97004 err = vma_adjust(prev, prev->vm_start,
97005 addr, prev->vm_pgoff, NULL);
97006- else /* cases 3, 8 */
97007+
97008+#ifdef CONFIG_PAX_SEGMEXEC
97009+ if (!err && prev_m)
97010+ err = vma_adjust(prev_m, prev_m->vm_start,
97011+ addr_m, prev_m->vm_pgoff, NULL);
97012+#endif
97013+
97014+ } else { /* cases 3, 8 */
97015 err = vma_adjust(area, addr, next->vm_end,
97016 next->vm_pgoff - pglen, NULL);
97017+
97018+#ifdef CONFIG_PAX_SEGMEXEC
97019+ if (!err && area_m)
97020+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97021+ next_m->vm_pgoff - pglen, NULL);
97022+#endif
97023+
97024+ }
97025 if (err)
97026 return NULL;
97027 khugepaged_enter_vma_merge(area, vm_flags);
97028@@ -1210,8 +1297,10 @@ none:
97029 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97030 struct file *file, long pages)
97031 {
97032- const unsigned long stack_flags
97033- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97034+
97035+#ifdef CONFIG_PAX_RANDMMAP
97036+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97037+#endif
97038
97039 mm->total_vm += pages;
97040
97041@@ -1219,7 +1308,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97042 mm->shared_vm += pages;
97043 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97044 mm->exec_vm += pages;
97045- } else if (flags & stack_flags)
97046+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97047 mm->stack_vm += pages;
97048 }
97049 #endif /* CONFIG_PROC_FS */
97050@@ -1249,6 +1338,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97051 locked += mm->locked_vm;
97052 lock_limit = rlimit(RLIMIT_MEMLOCK);
97053 lock_limit >>= PAGE_SHIFT;
97054+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97055 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97056 return -EAGAIN;
97057 }
97058@@ -1275,7 +1365,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97059 * (the exception is when the underlying filesystem is noexec
97060 * mounted, in which case we dont add PROT_EXEC.)
97061 */
97062- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97063+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97064 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97065 prot |= PROT_EXEC;
97066
97067@@ -1301,7 +1391,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97068 /* Obtain the address to map to. we verify (or select) it and ensure
97069 * that it represents a valid section of the address space.
97070 */
97071- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97072+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97073 if (addr & ~PAGE_MASK)
97074 return addr;
97075
97076@@ -1312,6 +1402,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97077 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97078 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97079
97080+#ifdef CONFIG_PAX_MPROTECT
97081+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97082+
97083+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97084+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97085+ mm->binfmt->handle_mmap)
97086+ mm->binfmt->handle_mmap(file);
97087+#endif
97088+
97089+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97090+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97091+ gr_log_rwxmmap(file);
97092+
97093+#ifdef CONFIG_PAX_EMUPLT
97094+ vm_flags &= ~VM_EXEC;
97095+#else
97096+ return -EPERM;
97097+#endif
97098+
97099+ }
97100+
97101+ if (!(vm_flags & VM_EXEC))
97102+ vm_flags &= ~VM_MAYEXEC;
97103+#else
97104+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97105+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97106+#endif
97107+ else
97108+ vm_flags &= ~VM_MAYWRITE;
97109+ }
97110+#endif
97111+
97112+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97113+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97114+ vm_flags &= ~VM_PAGEEXEC;
97115+#endif
97116+
97117 if (flags & MAP_LOCKED)
97118 if (!can_do_mlock())
97119 return -EPERM;
97120@@ -1399,6 +1526,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97121 vm_flags |= VM_NORESERVE;
97122 }
97123
97124+ if (!gr_acl_handle_mmap(file, prot))
97125+ return -EACCES;
97126+
97127 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97128 if (!IS_ERR_VALUE(addr) &&
97129 ((vm_flags & VM_LOCKED) ||
97130@@ -1492,7 +1622,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97131 vm_flags_t vm_flags = vma->vm_flags;
97132
97133 /* If it was private or non-writable, the write bit is already clear */
97134- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97135+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97136 return 0;
97137
97138 /* The backer wishes to know when pages are first written to? */
97139@@ -1543,7 +1673,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97140 struct rb_node **rb_link, *rb_parent;
97141 unsigned long charged = 0;
97142
97143+#ifdef CONFIG_PAX_SEGMEXEC
97144+ struct vm_area_struct *vma_m = NULL;
97145+#endif
97146+
97147+ /*
97148+ * mm->mmap_sem is required to protect against another thread
97149+ * changing the mappings in case we sleep.
97150+ */
97151+ verify_mm_writelocked(mm);
97152+
97153 /* Check against address space limit. */
97154+
97155+#ifdef CONFIG_PAX_RANDMMAP
97156+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97157+#endif
97158+
97159 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97160 unsigned long nr_pages;
97161
97162@@ -1562,11 +1707,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97163
97164 /* Clear old maps */
97165 error = -ENOMEM;
97166-munmap_back:
97167 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97168 if (do_munmap(mm, addr, len))
97169 return -ENOMEM;
97170- goto munmap_back;
97171+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97172 }
97173
97174 /*
97175@@ -1597,6 +1741,16 @@ munmap_back:
97176 goto unacct_error;
97177 }
97178
97179+#ifdef CONFIG_PAX_SEGMEXEC
97180+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97181+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97182+ if (!vma_m) {
97183+ error = -ENOMEM;
97184+ goto free_vma;
97185+ }
97186+ }
97187+#endif
97188+
97189 vma->vm_mm = mm;
97190 vma->vm_start = addr;
97191 vma->vm_end = addr + len;
97192@@ -1627,6 +1781,13 @@ munmap_back:
97193 if (error)
97194 goto unmap_and_free_vma;
97195
97196+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97197+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97198+ vma->vm_flags |= VM_PAGEEXEC;
97199+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97200+ }
97201+#endif
97202+
97203 /* Can addr have changed??
97204 *
97205 * Answer: Yes, several device drivers can do it in their
97206@@ -1645,6 +1806,12 @@ munmap_back:
97207 }
97208
97209 vma_link(mm, vma, prev, rb_link, rb_parent);
97210+
97211+#ifdef CONFIG_PAX_SEGMEXEC
97212+ if (vma_m)
97213+ BUG_ON(pax_mirror_vma(vma_m, vma));
97214+#endif
97215+
97216 /* Once vma denies write, undo our temporary denial count */
97217 if (file) {
97218 if (vm_flags & VM_SHARED)
97219@@ -1657,6 +1824,7 @@ out:
97220 perf_event_mmap(vma);
97221
97222 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97223+ track_exec_limit(mm, addr, addr + len, vm_flags);
97224 if (vm_flags & VM_LOCKED) {
97225 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97226 vma == get_gate_vma(current->mm)))
97227@@ -1694,6 +1862,12 @@ allow_write_and_free_vma:
97228 if (vm_flags & VM_DENYWRITE)
97229 allow_write_access(file);
97230 free_vma:
97231+
97232+#ifdef CONFIG_PAX_SEGMEXEC
97233+ if (vma_m)
97234+ kmem_cache_free(vm_area_cachep, vma_m);
97235+#endif
97236+
97237 kmem_cache_free(vm_area_cachep, vma);
97238 unacct_error:
97239 if (charged)
97240@@ -1701,7 +1875,63 @@ unacct_error:
97241 return error;
97242 }
97243
97244-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97245+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97246+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97247+{
97248+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97249+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97250+
97251+ return 0;
97252+}
97253+#endif
97254+
97255+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97256+{
97257+ if (!vma) {
97258+#ifdef CONFIG_STACK_GROWSUP
97259+ if (addr > sysctl_heap_stack_gap)
97260+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97261+ else
97262+ vma = find_vma(current->mm, 0);
97263+ if (vma && (vma->vm_flags & VM_GROWSUP))
97264+ return false;
97265+#endif
97266+ return true;
97267+ }
97268+
97269+ if (addr + len > vma->vm_start)
97270+ return false;
97271+
97272+ if (vma->vm_flags & VM_GROWSDOWN)
97273+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97274+#ifdef CONFIG_STACK_GROWSUP
97275+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97276+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97277+#endif
97278+ else if (offset)
97279+ return offset <= vma->vm_start - addr - len;
97280+
97281+ return true;
97282+}
97283+
97284+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97285+{
97286+ if (vma->vm_start < len)
97287+ return -ENOMEM;
97288+
97289+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97290+ if (offset <= vma->vm_start - len)
97291+ return vma->vm_start - len - offset;
97292+ else
97293+ return -ENOMEM;
97294+ }
97295+
97296+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97297+ return vma->vm_start - len - sysctl_heap_stack_gap;
97298+ return -ENOMEM;
97299+}
97300+
97301+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97302 {
97303 /*
97304 * We implement the search by looking for an rbtree node that
97305@@ -1749,11 +1979,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97306 }
97307 }
97308
97309- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97310+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97311 check_current:
97312 /* Check if current node has a suitable gap */
97313 if (gap_start > high_limit)
97314 return -ENOMEM;
97315+
97316+ if (gap_end - gap_start > info->threadstack_offset)
97317+ gap_start += info->threadstack_offset;
97318+ else
97319+ gap_start = gap_end;
97320+
97321+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97322+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97323+ gap_start += sysctl_heap_stack_gap;
97324+ else
97325+ gap_start = gap_end;
97326+ }
97327+ if (vma->vm_flags & VM_GROWSDOWN) {
97328+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97329+ gap_end -= sysctl_heap_stack_gap;
97330+ else
97331+ gap_end = gap_start;
97332+ }
97333 if (gap_end >= low_limit && gap_end - gap_start >= length)
97334 goto found;
97335
97336@@ -1803,7 +2051,7 @@ found:
97337 return gap_start;
97338 }
97339
97340-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97341+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97342 {
97343 struct mm_struct *mm = current->mm;
97344 struct vm_area_struct *vma;
97345@@ -1857,6 +2105,24 @@ check_current:
97346 gap_end = vma->vm_start;
97347 if (gap_end < low_limit)
97348 return -ENOMEM;
97349+
97350+ if (gap_end - gap_start > info->threadstack_offset)
97351+ gap_end -= info->threadstack_offset;
97352+ else
97353+ gap_end = gap_start;
97354+
97355+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97356+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97357+ gap_start += sysctl_heap_stack_gap;
97358+ else
97359+ gap_start = gap_end;
97360+ }
97361+ if (vma->vm_flags & VM_GROWSDOWN) {
97362+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97363+ gap_end -= sysctl_heap_stack_gap;
97364+ else
97365+ gap_end = gap_start;
97366+ }
97367 if (gap_start <= high_limit && gap_end - gap_start >= length)
97368 goto found;
97369
97370@@ -1920,6 +2186,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97371 struct mm_struct *mm = current->mm;
97372 struct vm_area_struct *vma;
97373 struct vm_unmapped_area_info info;
97374+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97375
97376 if (len > TASK_SIZE - mmap_min_addr)
97377 return -ENOMEM;
97378@@ -1927,11 +2194,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97379 if (flags & MAP_FIXED)
97380 return addr;
97381
97382+#ifdef CONFIG_PAX_RANDMMAP
97383+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97384+#endif
97385+
97386 if (addr) {
97387 addr = PAGE_ALIGN(addr);
97388 vma = find_vma(mm, addr);
97389 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97390- (!vma || addr + len <= vma->vm_start))
97391+ check_heap_stack_gap(vma, addr, len, offset))
97392 return addr;
97393 }
97394
97395@@ -1940,6 +2211,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97396 info.low_limit = mm->mmap_base;
97397 info.high_limit = TASK_SIZE;
97398 info.align_mask = 0;
97399+ info.threadstack_offset = offset;
97400 return vm_unmapped_area(&info);
97401 }
97402 #endif
97403@@ -1958,6 +2230,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97404 struct mm_struct *mm = current->mm;
97405 unsigned long addr = addr0;
97406 struct vm_unmapped_area_info info;
97407+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97408
97409 /* requested length too big for entire address space */
97410 if (len > TASK_SIZE - mmap_min_addr)
97411@@ -1966,12 +2239,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97412 if (flags & MAP_FIXED)
97413 return addr;
97414
97415+#ifdef CONFIG_PAX_RANDMMAP
97416+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97417+#endif
97418+
97419 /* requesting a specific address */
97420 if (addr) {
97421 addr = PAGE_ALIGN(addr);
97422 vma = find_vma(mm, addr);
97423 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97424- (!vma || addr + len <= vma->vm_start))
97425+ check_heap_stack_gap(vma, addr, len, offset))
97426 return addr;
97427 }
97428
97429@@ -1980,6 +2257,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97430 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97431 info.high_limit = mm->mmap_base;
97432 info.align_mask = 0;
97433+ info.threadstack_offset = offset;
97434 addr = vm_unmapped_area(&info);
97435
97436 /*
97437@@ -1992,6 +2270,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97438 VM_BUG_ON(addr != -ENOMEM);
97439 info.flags = 0;
97440 info.low_limit = TASK_UNMAPPED_BASE;
97441+
97442+#ifdef CONFIG_PAX_RANDMMAP
97443+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97444+ info.low_limit += mm->delta_mmap;
97445+#endif
97446+
97447 info.high_limit = TASK_SIZE;
97448 addr = vm_unmapped_area(&info);
97449 }
97450@@ -2092,6 +2376,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97451 return vma;
97452 }
97453
97454+#ifdef CONFIG_PAX_SEGMEXEC
97455+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97456+{
97457+ struct vm_area_struct *vma_m;
97458+
97459+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97460+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97461+ BUG_ON(vma->vm_mirror);
97462+ return NULL;
97463+ }
97464+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97465+ vma_m = vma->vm_mirror;
97466+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97467+ BUG_ON(vma->vm_file != vma_m->vm_file);
97468+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97469+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97470+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97471+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97472+ return vma_m;
97473+}
97474+#endif
97475+
97476 /*
97477 * Verify that the stack growth is acceptable and
97478 * update accounting. This is shared with both the
97479@@ -2109,8 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97480
97481 /* Stack limit test */
97482 actual_size = size;
97483- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
97484- actual_size -= PAGE_SIZE;
97485+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
97486 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97487 return -ENOMEM;
97488
97489@@ -2121,6 +2426,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97490 locked = mm->locked_vm + grow;
97491 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97492 limit >>= PAGE_SHIFT;
97493+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97494 if (locked > limit && !capable(CAP_IPC_LOCK))
97495 return -ENOMEM;
97496 }
97497@@ -2150,37 +2456,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97498 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97499 * vma is the last one with address > vma->vm_end. Have to extend vma.
97500 */
97501+#ifndef CONFIG_IA64
97502+static
97503+#endif
97504 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97505 {
97506 int error;
97507+ bool locknext;
97508
97509 if (!(vma->vm_flags & VM_GROWSUP))
97510 return -EFAULT;
97511
97512+ /* Also guard against wrapping around to address 0. */
97513+ if (address < PAGE_ALIGN(address+1))
97514+ address = PAGE_ALIGN(address+1);
97515+ else
97516+ return -ENOMEM;
97517+
97518 /*
97519 * We must make sure the anon_vma is allocated
97520 * so that the anon_vma locking is not a noop.
97521 */
97522 if (unlikely(anon_vma_prepare(vma)))
97523 return -ENOMEM;
97524+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97525+ if (locknext && anon_vma_prepare(vma->vm_next))
97526+ return -ENOMEM;
97527 vma_lock_anon_vma(vma);
97528+ if (locknext)
97529+ vma_lock_anon_vma(vma->vm_next);
97530
97531 /*
97532 * vma->vm_start/vm_end cannot change under us because the caller
97533 * is required to hold the mmap_sem in read mode. We need the
97534- * anon_vma lock to serialize against concurrent expand_stacks.
97535- * Also guard against wrapping around to address 0.
97536+ * anon_vma locks to serialize against concurrent expand_stacks
97537+ * and expand_upwards.
97538 */
97539- if (address < PAGE_ALIGN(address+4))
97540- address = PAGE_ALIGN(address+4);
97541- else {
97542- vma_unlock_anon_vma(vma);
97543- return -ENOMEM;
97544- }
97545 error = 0;
97546
97547 /* Somebody else might have raced and expanded it already */
97548- if (address > vma->vm_end) {
97549+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97550+ error = -ENOMEM;
97551+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97552 unsigned long size, grow;
97553
97554 size = address - vma->vm_start;
97555@@ -2215,6 +2532,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97556 }
97557 }
97558 }
97559+ if (locknext)
97560+ vma_unlock_anon_vma(vma->vm_next);
97561 vma_unlock_anon_vma(vma);
97562 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97563 validate_mm(vma->vm_mm);
97564@@ -2229,6 +2548,8 @@ int expand_downwards(struct vm_area_struct *vma,
97565 unsigned long address)
97566 {
97567 int error;
97568+ bool lockprev = false;
97569+ struct vm_area_struct *prev;
97570
97571 /*
97572 * We must make sure the anon_vma is allocated
97573@@ -2242,6 +2563,15 @@ int expand_downwards(struct vm_area_struct *vma,
97574 if (error)
97575 return error;
97576
97577+ prev = vma->vm_prev;
97578+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97579+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97580+#endif
97581+ if (lockprev && anon_vma_prepare(prev))
97582+ return -ENOMEM;
97583+ if (lockprev)
97584+ vma_lock_anon_vma(prev);
97585+
97586 vma_lock_anon_vma(vma);
97587
97588 /*
97589@@ -2251,9 +2581,17 @@ int expand_downwards(struct vm_area_struct *vma,
97590 */
97591
97592 /* Somebody else might have raced and expanded it already */
97593- if (address < vma->vm_start) {
97594+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97595+ error = -ENOMEM;
97596+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97597 unsigned long size, grow;
97598
97599+#ifdef CONFIG_PAX_SEGMEXEC
97600+ struct vm_area_struct *vma_m;
97601+
97602+ vma_m = pax_find_mirror_vma(vma);
97603+#endif
97604+
97605 size = vma->vm_end - address;
97606 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97607
97608@@ -2278,13 +2616,27 @@ int expand_downwards(struct vm_area_struct *vma,
97609 vma->vm_pgoff -= grow;
97610 anon_vma_interval_tree_post_update_vma(vma);
97611 vma_gap_update(vma);
97612+
97613+#ifdef CONFIG_PAX_SEGMEXEC
97614+ if (vma_m) {
97615+ anon_vma_interval_tree_pre_update_vma(vma_m);
97616+ vma_m->vm_start -= grow << PAGE_SHIFT;
97617+ vma_m->vm_pgoff -= grow;
97618+ anon_vma_interval_tree_post_update_vma(vma_m);
97619+ vma_gap_update(vma_m);
97620+ }
97621+#endif
97622+
97623 spin_unlock(&vma->vm_mm->page_table_lock);
97624
97625+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97626 perf_event_mmap(vma);
97627 }
97628 }
97629 }
97630 vma_unlock_anon_vma(vma);
97631+ if (lockprev)
97632+ vma_unlock_anon_vma(prev);
97633 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97634 validate_mm(vma->vm_mm);
97635 return error;
97636@@ -2384,6 +2736,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97637 do {
97638 long nrpages = vma_pages(vma);
97639
97640+#ifdef CONFIG_PAX_SEGMEXEC
97641+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97642+ vma = remove_vma(vma);
97643+ continue;
97644+ }
97645+#endif
97646+
97647 if (vma->vm_flags & VM_ACCOUNT)
97648 nr_accounted += nrpages;
97649 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97650@@ -2428,6 +2787,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97651 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97652 vma->vm_prev = NULL;
97653 do {
97654+
97655+#ifdef CONFIG_PAX_SEGMEXEC
97656+ if (vma->vm_mirror) {
97657+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97658+ vma->vm_mirror->vm_mirror = NULL;
97659+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97660+ vma->vm_mirror = NULL;
97661+ }
97662+#endif
97663+
97664 vma_rb_erase(vma, &mm->mm_rb);
97665 mm->map_count--;
97666 tail_vma = vma;
97667@@ -2455,14 +2824,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97668 struct vm_area_struct *new;
97669 int err = -ENOMEM;
97670
97671+#ifdef CONFIG_PAX_SEGMEXEC
97672+ struct vm_area_struct *vma_m, *new_m = NULL;
97673+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97674+#endif
97675+
97676 if (is_vm_hugetlb_page(vma) && (addr &
97677 ~(huge_page_mask(hstate_vma(vma)))))
97678 return -EINVAL;
97679
97680+#ifdef CONFIG_PAX_SEGMEXEC
97681+ vma_m = pax_find_mirror_vma(vma);
97682+#endif
97683+
97684 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97685 if (!new)
97686 goto out_err;
97687
97688+#ifdef CONFIG_PAX_SEGMEXEC
97689+ if (vma_m) {
97690+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97691+ if (!new_m) {
97692+ kmem_cache_free(vm_area_cachep, new);
97693+ goto out_err;
97694+ }
97695+ }
97696+#endif
97697+
97698 /* most fields are the same, copy all, and then fixup */
97699 *new = *vma;
97700
97701@@ -2475,6 +2863,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97702 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97703 }
97704
97705+#ifdef CONFIG_PAX_SEGMEXEC
97706+ if (vma_m) {
97707+ *new_m = *vma_m;
97708+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97709+ new_m->vm_mirror = new;
97710+ new->vm_mirror = new_m;
97711+
97712+ if (new_below)
97713+ new_m->vm_end = addr_m;
97714+ else {
97715+ new_m->vm_start = addr_m;
97716+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97717+ }
97718+ }
97719+#endif
97720+
97721 err = vma_dup_policy(vma, new);
97722 if (err)
97723 goto out_free_vma;
97724@@ -2495,6 +2899,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97725 else
97726 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97727
97728+#ifdef CONFIG_PAX_SEGMEXEC
97729+ if (!err && vma_m) {
97730+ struct mempolicy *pol = vma_policy(new);
97731+
97732+ if (anon_vma_clone(new_m, vma_m))
97733+ goto out_free_mpol;
97734+
97735+ mpol_get(pol);
97736+ set_vma_policy(new_m, pol);
97737+
97738+ if (new_m->vm_file)
97739+ get_file(new_m->vm_file);
97740+
97741+ if (new_m->vm_ops && new_m->vm_ops->open)
97742+ new_m->vm_ops->open(new_m);
97743+
97744+ if (new_below)
97745+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97746+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97747+ else
97748+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97749+
97750+ if (err) {
97751+ if (new_m->vm_ops && new_m->vm_ops->close)
97752+ new_m->vm_ops->close(new_m);
97753+ if (new_m->vm_file)
97754+ fput(new_m->vm_file);
97755+ mpol_put(pol);
97756+ }
97757+ }
97758+#endif
97759+
97760 /* Success. */
97761 if (!err)
97762 return 0;
97763@@ -2504,10 +2940,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97764 new->vm_ops->close(new);
97765 if (new->vm_file)
97766 fput(new->vm_file);
97767- unlink_anon_vmas(new);
97768 out_free_mpol:
97769 mpol_put(vma_policy(new));
97770 out_free_vma:
97771+
97772+#ifdef CONFIG_PAX_SEGMEXEC
97773+ if (new_m) {
97774+ unlink_anon_vmas(new_m);
97775+ kmem_cache_free(vm_area_cachep, new_m);
97776+ }
97777+#endif
97778+
97779+ unlink_anon_vmas(new);
97780 kmem_cache_free(vm_area_cachep, new);
97781 out_err:
97782 return err;
97783@@ -2520,6 +2964,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97784 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97785 unsigned long addr, int new_below)
97786 {
97787+
97788+#ifdef CONFIG_PAX_SEGMEXEC
97789+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97790+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97791+ if (mm->map_count >= sysctl_max_map_count-1)
97792+ return -ENOMEM;
97793+ } else
97794+#endif
97795+
97796 if (mm->map_count >= sysctl_max_map_count)
97797 return -ENOMEM;
97798
97799@@ -2531,11 +2984,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97800 * work. This now handles partial unmappings.
97801 * Jeremy Fitzhardinge <jeremy@goop.org>
97802 */
97803+#ifdef CONFIG_PAX_SEGMEXEC
97804 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97805 {
97806+ int ret = __do_munmap(mm, start, len);
97807+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97808+ return ret;
97809+
97810+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97811+}
97812+
97813+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97814+#else
97815+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97816+#endif
97817+{
97818 unsigned long end;
97819 struct vm_area_struct *vma, *prev, *last;
97820
97821+ /*
97822+ * mm->mmap_sem is required to protect against another thread
97823+ * changing the mappings in case we sleep.
97824+ */
97825+ verify_mm_writelocked(mm);
97826+
97827 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97828 return -EINVAL;
97829
97830@@ -2613,6 +3085,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97831 /* Fix up all other VM information */
97832 remove_vma_list(mm, vma);
97833
97834+ track_exec_limit(mm, start, end, 0UL);
97835+
97836 return 0;
97837 }
97838
97839@@ -2621,6 +3095,13 @@ int vm_munmap(unsigned long start, size_t len)
97840 int ret;
97841 struct mm_struct *mm = current->mm;
97842
97843+
97844+#ifdef CONFIG_PAX_SEGMEXEC
97845+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97846+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97847+ return -EINVAL;
97848+#endif
97849+
97850 down_write(&mm->mmap_sem);
97851 ret = do_munmap(mm, start, len);
97852 up_write(&mm->mmap_sem);
97853@@ -2634,16 +3115,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97854 return vm_munmap(addr, len);
97855 }
97856
97857-static inline void verify_mm_writelocked(struct mm_struct *mm)
97858-{
97859-#ifdef CONFIG_DEBUG_VM
97860- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97861- WARN_ON(1);
97862- up_read(&mm->mmap_sem);
97863- }
97864-#endif
97865-}
97866-
97867 /*
97868 * this is really a simplified "do_mmap". it only handles
97869 * anonymous maps. eventually we may be able to do some
97870@@ -2657,6 +3128,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97871 struct rb_node **rb_link, *rb_parent;
97872 pgoff_t pgoff = addr >> PAGE_SHIFT;
97873 int error;
97874+ unsigned long charged;
97875
97876 len = PAGE_ALIGN(len);
97877 if (!len)
97878@@ -2664,10 +3136,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97879
97880 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97881
97882+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97883+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97884+ flags &= ~VM_EXEC;
97885+
97886+#ifdef CONFIG_PAX_MPROTECT
97887+ if (mm->pax_flags & MF_PAX_MPROTECT)
97888+ flags &= ~VM_MAYEXEC;
97889+#endif
97890+
97891+ }
97892+#endif
97893+
97894 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97895 if (error & ~PAGE_MASK)
97896 return error;
97897
97898+ charged = len >> PAGE_SHIFT;
97899+
97900 error = mlock_future_check(mm, mm->def_flags, len);
97901 if (error)
97902 return error;
97903@@ -2681,21 +3167,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97904 /*
97905 * Clear old maps. this also does some error checking for us
97906 */
97907- munmap_back:
97908 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97909 if (do_munmap(mm, addr, len))
97910 return -ENOMEM;
97911- goto munmap_back;
97912+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97913 }
97914
97915 /* Check against address space limits *after* clearing old maps... */
97916- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97917+ if (!may_expand_vm(mm, charged))
97918 return -ENOMEM;
97919
97920 if (mm->map_count > sysctl_max_map_count)
97921 return -ENOMEM;
97922
97923- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
97924+ if (security_vm_enough_memory_mm(mm, charged))
97925 return -ENOMEM;
97926
97927 /* Can we just expand an old private anonymous mapping? */
97928@@ -2709,7 +3194,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97929 */
97930 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97931 if (!vma) {
97932- vm_unacct_memory(len >> PAGE_SHIFT);
97933+ vm_unacct_memory(charged);
97934 return -ENOMEM;
97935 }
97936
97937@@ -2723,10 +3208,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97938 vma_link(mm, vma, prev, rb_link, rb_parent);
97939 out:
97940 perf_event_mmap(vma);
97941- mm->total_vm += len >> PAGE_SHIFT;
97942+ mm->total_vm += charged;
97943 if (flags & VM_LOCKED)
97944- mm->locked_vm += (len >> PAGE_SHIFT);
97945+ mm->locked_vm += charged;
97946 vma->vm_flags |= VM_SOFTDIRTY;
97947+ track_exec_limit(mm, addr, addr + len, flags);
97948 return addr;
97949 }
97950
97951@@ -2788,6 +3274,7 @@ void exit_mmap(struct mm_struct *mm)
97952 while (vma) {
97953 if (vma->vm_flags & VM_ACCOUNT)
97954 nr_accounted += vma_pages(vma);
97955+ vma->vm_mirror = NULL;
97956 vma = remove_vma(vma);
97957 }
97958 vm_unacct_memory(nr_accounted);
97959@@ -2805,6 +3292,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97960 struct vm_area_struct *prev;
97961 struct rb_node **rb_link, *rb_parent;
97962
97963+#ifdef CONFIG_PAX_SEGMEXEC
97964+ struct vm_area_struct *vma_m = NULL;
97965+#endif
97966+
97967+ if (security_mmap_addr(vma->vm_start))
97968+ return -EPERM;
97969+
97970 /*
97971 * The vm_pgoff of a purely anonymous vma should be irrelevant
97972 * until its first write fault, when page's anon_vma and index
97973@@ -2828,7 +3322,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97974 security_vm_enough_memory_mm(mm, vma_pages(vma)))
97975 return -ENOMEM;
97976
97977+#ifdef CONFIG_PAX_SEGMEXEC
97978+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
97979+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97980+ if (!vma_m)
97981+ return -ENOMEM;
97982+ }
97983+#endif
97984+
97985 vma_link(mm, vma, prev, rb_link, rb_parent);
97986+
97987+#ifdef CONFIG_PAX_SEGMEXEC
97988+ if (vma_m)
97989+ BUG_ON(pax_mirror_vma(vma_m, vma));
97990+#endif
97991+
97992 return 0;
97993 }
97994
97995@@ -2847,6 +3355,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97996 struct rb_node **rb_link, *rb_parent;
97997 bool faulted_in_anon_vma = true;
97998
97999+ BUG_ON(vma->vm_mirror);
98000+
98001 /*
98002 * If anonymous vma has not yet been faulted, update new pgoff
98003 * to match new location, to increase its chance of merging.
98004@@ -2911,6 +3421,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98005 return NULL;
98006 }
98007
98008+#ifdef CONFIG_PAX_SEGMEXEC
98009+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98010+{
98011+ struct vm_area_struct *prev_m;
98012+ struct rb_node **rb_link_m, *rb_parent_m;
98013+ struct mempolicy *pol_m;
98014+
98015+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98016+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98017+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98018+ *vma_m = *vma;
98019+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98020+ if (anon_vma_clone(vma_m, vma))
98021+ return -ENOMEM;
98022+ pol_m = vma_policy(vma_m);
98023+ mpol_get(pol_m);
98024+ set_vma_policy(vma_m, pol_m);
98025+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98026+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98027+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98028+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98029+ if (vma_m->vm_file)
98030+ get_file(vma_m->vm_file);
98031+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98032+ vma_m->vm_ops->open(vma_m);
98033+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98034+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98035+ vma_m->vm_mirror = vma;
98036+ vma->vm_mirror = vma_m;
98037+ return 0;
98038+}
98039+#endif
98040+
98041 /*
98042 * Return true if the calling process may expand its vm space by the passed
98043 * number of pages
98044@@ -2922,6 +3465,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98045
98046 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98047
98048+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98049 if (cur + npages > lim)
98050 return 0;
98051 return 1;
98052@@ -3004,6 +3548,22 @@ static struct vm_area_struct *__install_special_mapping(
98053 vma->vm_start = addr;
98054 vma->vm_end = addr + len;
98055
98056+#ifdef CONFIG_PAX_MPROTECT
98057+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98058+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98059+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98060+ return ERR_PTR(-EPERM);
98061+ if (!(vm_flags & VM_EXEC))
98062+ vm_flags &= ~VM_MAYEXEC;
98063+#else
98064+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98065+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98066+#endif
98067+ else
98068+ vm_flags &= ~VM_MAYWRITE;
98069+ }
98070+#endif
98071+
98072 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98073 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98074
98075diff --git a/mm/mprotect.c b/mm/mprotect.c
98076index ace9345..63320dc 100644
98077--- a/mm/mprotect.c
98078+++ b/mm/mprotect.c
98079@@ -24,10 +24,18 @@
98080 #include <linux/migrate.h>
98081 #include <linux/perf_event.h>
98082 #include <linux/ksm.h>
98083+#include <linux/sched/sysctl.h>
98084+
98085+#ifdef CONFIG_PAX_MPROTECT
98086+#include <linux/elf.h>
98087+#include <linux/binfmts.h>
98088+#endif
98089+
98090 #include <asm/uaccess.h>
98091 #include <asm/pgtable.h>
98092 #include <asm/cacheflush.h>
98093 #include <asm/tlbflush.h>
98094+#include <asm/mmu_context.h>
98095
98096 /*
98097 * For a prot_numa update we only hold mmap_sem for read so there is a
98098@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98099 return pages;
98100 }
98101
98102+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98103+/* called while holding the mmap semaphor for writing except stack expansion */
98104+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98105+{
98106+ unsigned long oldlimit, newlimit = 0UL;
98107+
98108+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98109+ return;
98110+
98111+ spin_lock(&mm->page_table_lock);
98112+ oldlimit = mm->context.user_cs_limit;
98113+ if ((prot & VM_EXEC) && oldlimit < end)
98114+ /* USER_CS limit moved up */
98115+ newlimit = end;
98116+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98117+ /* USER_CS limit moved down */
98118+ newlimit = start;
98119+
98120+ if (newlimit) {
98121+ mm->context.user_cs_limit = newlimit;
98122+
98123+#ifdef CONFIG_SMP
98124+ wmb();
98125+ cpus_clear(mm->context.cpu_user_cs_mask);
98126+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98127+#endif
98128+
98129+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98130+ }
98131+ spin_unlock(&mm->page_table_lock);
98132+ if (newlimit == end) {
98133+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98134+
98135+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98136+ if (is_vm_hugetlb_page(vma))
98137+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98138+ else
98139+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98140+ }
98141+}
98142+#endif
98143+
98144 int
98145 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98146 unsigned long start, unsigned long end, unsigned long newflags)
98147@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98148 int error;
98149 int dirty_accountable = 0;
98150
98151+#ifdef CONFIG_PAX_SEGMEXEC
98152+ struct vm_area_struct *vma_m = NULL;
98153+ unsigned long start_m, end_m;
98154+
98155+ start_m = start + SEGMEXEC_TASK_SIZE;
98156+ end_m = end + SEGMEXEC_TASK_SIZE;
98157+#endif
98158+
98159 if (newflags == oldflags) {
98160 *pprev = vma;
98161 return 0;
98162 }
98163
98164+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98165+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98166+
98167+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98168+ return -ENOMEM;
98169+
98170+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98171+ return -ENOMEM;
98172+ }
98173+
98174 /*
98175 * If we make a private mapping writable we increase our commit;
98176 * but (without finer accounting) cannot reduce our commit if we
98177@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98178 }
98179 }
98180
98181+#ifdef CONFIG_PAX_SEGMEXEC
98182+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98183+ if (start != vma->vm_start) {
98184+ error = split_vma(mm, vma, start, 1);
98185+ if (error)
98186+ goto fail;
98187+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98188+ *pprev = (*pprev)->vm_next;
98189+ }
98190+
98191+ if (end != vma->vm_end) {
98192+ error = split_vma(mm, vma, end, 0);
98193+ if (error)
98194+ goto fail;
98195+ }
98196+
98197+ if (pax_find_mirror_vma(vma)) {
98198+ error = __do_munmap(mm, start_m, end_m - start_m);
98199+ if (error)
98200+ goto fail;
98201+ } else {
98202+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98203+ if (!vma_m) {
98204+ error = -ENOMEM;
98205+ goto fail;
98206+ }
98207+ vma->vm_flags = newflags;
98208+ error = pax_mirror_vma(vma_m, vma);
98209+ if (error) {
98210+ vma->vm_flags = oldflags;
98211+ goto fail;
98212+ }
98213+ }
98214+ }
98215+#endif
98216+
98217 /*
98218 * First try to merge with previous and/or next vma.
98219 */
98220@@ -314,7 +418,19 @@ success:
98221 * vm_flags and vm_page_prot are protected by the mmap_sem
98222 * held in write mode.
98223 */
98224+
98225+#ifdef CONFIG_PAX_SEGMEXEC
98226+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98227+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98228+#endif
98229+
98230 vma->vm_flags = newflags;
98231+
98232+#ifdef CONFIG_PAX_MPROTECT
98233+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98234+ mm->binfmt->handle_mprotect(vma, newflags);
98235+#endif
98236+
98237 dirty_accountable = vma_wants_writenotify(vma);
98238 vma_set_page_prot(vma);
98239
98240@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98241 end = start + len;
98242 if (end <= start)
98243 return -ENOMEM;
98244+
98245+#ifdef CONFIG_PAX_SEGMEXEC
98246+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98247+ if (end > SEGMEXEC_TASK_SIZE)
98248+ return -EINVAL;
98249+ } else
98250+#endif
98251+
98252+ if (end > TASK_SIZE)
98253+ return -EINVAL;
98254+
98255 if (!arch_validate_prot(prot))
98256 return -EINVAL;
98257
98258@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98259 /*
98260 * Does the application expect PROT_READ to imply PROT_EXEC:
98261 */
98262- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98263+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98264 prot |= PROT_EXEC;
98265
98266 vm_flags = calc_vm_prot_bits(prot);
98267@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98268 if (start > vma->vm_start)
98269 prev = vma;
98270
98271+#ifdef CONFIG_PAX_MPROTECT
98272+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98273+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98274+#endif
98275+
98276 for (nstart = start ; ; ) {
98277 unsigned long newflags;
98278
98279@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98280
98281 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98282 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98283+ if (prot & (PROT_WRITE | PROT_EXEC))
98284+ gr_log_rwxmprotect(vma);
98285+
98286+ error = -EACCES;
98287+ goto out;
98288+ }
98289+
98290+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98291 error = -EACCES;
98292 goto out;
98293 }
98294@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98295 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98296 if (error)
98297 goto out;
98298+
98299+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98300+
98301 nstart = tmp;
98302
98303 if (nstart < prev->vm_end)
98304diff --git a/mm/mremap.c b/mm/mremap.c
98305index 17fa018..6f7892b 100644
98306--- a/mm/mremap.c
98307+++ b/mm/mremap.c
98308@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98309 continue;
98310 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98311 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98312+
98313+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98314+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98315+ pte = pte_exprotect(pte);
98316+#endif
98317+
98318 pte = move_soft_dirty_pte(pte);
98319 set_pte_at(mm, new_addr, new_pte, pte);
98320 }
98321@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98322 if (is_vm_hugetlb_page(vma))
98323 goto Einval;
98324
98325+#ifdef CONFIG_PAX_SEGMEXEC
98326+ if (pax_find_mirror_vma(vma))
98327+ goto Einval;
98328+#endif
98329+
98330 /* We can't remap across vm area boundaries */
98331 if (old_len > vma->vm_end - addr)
98332 goto Efault;
98333@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98334 unsigned long ret = -EINVAL;
98335 unsigned long charged = 0;
98336 unsigned long map_flags;
98337+ unsigned long pax_task_size = TASK_SIZE;
98338
98339 if (new_addr & ~PAGE_MASK)
98340 goto out;
98341
98342- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98343+#ifdef CONFIG_PAX_SEGMEXEC
98344+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98345+ pax_task_size = SEGMEXEC_TASK_SIZE;
98346+#endif
98347+
98348+ pax_task_size -= PAGE_SIZE;
98349+
98350+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98351 goto out;
98352
98353 /* Check if the location we're moving into overlaps the
98354 * old location at all, and fail if it does.
98355 */
98356- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98357- goto out;
98358-
98359- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98360+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98361 goto out;
98362
98363 ret = do_munmap(mm, new_addr, new_len);
98364@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98365 unsigned long ret = -EINVAL;
98366 unsigned long charged = 0;
98367 bool locked = false;
98368+ unsigned long pax_task_size = TASK_SIZE;
98369
98370 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98371 return ret;
98372@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98373 if (!new_len)
98374 return ret;
98375
98376+#ifdef CONFIG_PAX_SEGMEXEC
98377+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98378+ pax_task_size = SEGMEXEC_TASK_SIZE;
98379+#endif
98380+
98381+ pax_task_size -= PAGE_SIZE;
98382+
98383+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98384+ old_len > pax_task_size || addr > pax_task_size-old_len)
98385+ return ret;
98386+
98387 down_write(&current->mm->mmap_sem);
98388
98389 if (flags & MREMAP_FIXED) {
98390@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98391 new_addr = addr;
98392 }
98393 ret = addr;
98394+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98395 goto out;
98396 }
98397 }
98398@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98399 goto out;
98400 }
98401
98402+ map_flags = vma->vm_flags;
98403 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98404+ if (!(ret & ~PAGE_MASK)) {
98405+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98406+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98407+ }
98408 }
98409 out:
98410 if (ret & ~PAGE_MASK)
98411diff --git a/mm/nommu.c b/mm/nommu.c
98412index 28bd8c4..98a6fe3 100644
98413--- a/mm/nommu.c
98414+++ b/mm/nommu.c
98415@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98416 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98417 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98418 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98419-int heap_stack_gap = 0;
98420
98421 atomic_long_t mmap_pages_allocated;
98422
98423@@ -858,15 +857,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98424 EXPORT_SYMBOL(find_vma);
98425
98426 /*
98427- * find a VMA
98428- * - we don't extend stack VMAs under NOMMU conditions
98429- */
98430-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98431-{
98432- return find_vma(mm, addr);
98433-}
98434-
98435-/*
98436 * expand a stack to a given address
98437 * - not supported under NOMMU conditions
98438 */
98439@@ -1562,6 +1552,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98440
98441 /* most fields are the same, copy all, and then fixup */
98442 *new = *vma;
98443+ INIT_LIST_HEAD(&new->anon_vma_chain);
98444 *region = *vma->vm_region;
98445 new->vm_region = region;
98446
98447@@ -1895,7 +1886,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
98448 */
98449 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
98450 {
98451- unsigned long free, allowed, reserve;
98452+ long free, allowed, reserve;
98453
98454 vm_acct_memory(pages);
98455
98456@@ -1959,7 +1950,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
98457 */
98458 if (mm) {
98459 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
98460- allowed -= min(mm->total_vm / 32, reserve);
98461+ allowed -= min_t(long, mm->total_vm / 32, reserve);
98462 }
98463
98464 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
98465@@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98466 }
98467 EXPORT_SYMBOL(generic_file_remap_pages);
98468
98469-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98470- unsigned long addr, void *buf, int len, int write)
98471+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98472+ unsigned long addr, void *buf, size_t len, int write)
98473 {
98474 struct vm_area_struct *vma;
98475
98476@@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98477 *
98478 * The caller must hold a reference on @mm.
98479 */
98480-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98481- void *buf, int len, int write)
98482+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98483+ void *buf, size_t len, int write)
98484 {
98485 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98486 }
98487@@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98488 * Access another process' address space.
98489 * - source/target buffer must be kernel space
98490 */
98491-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98492+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98493 {
98494 struct mm_struct *mm;
98495
98496diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98497index 6f43352..e44bf41 100644
98498--- a/mm/page-writeback.c
98499+++ b/mm/page-writeback.c
98500@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98501 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98502 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98503 */
98504-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98505+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98506 unsigned long thresh,
98507 unsigned long bg_thresh,
98508 unsigned long dirty,
98509diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98510index 8e20f9c..e235009 100644
98511--- a/mm/page_alloc.c
98512+++ b/mm/page_alloc.c
98513@@ -60,6 +60,7 @@
98514 #include <linux/hugetlb.h>
98515 #include <linux/sched/rt.h>
98516 #include <linux/page_owner.h>
98517+#include <linux/random.h>
98518
98519 #include <asm/sections.h>
98520 #include <asm/tlbflush.h>
98521@@ -358,7 +359,7 @@ out:
98522 * This usage means that zero-order pages may not be compound.
98523 */
98524
98525-static void free_compound_page(struct page *page)
98526+void free_compound_page(struct page *page)
98527 {
98528 __free_pages_ok(page, compound_order(page));
98529 }
98530@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
98531 __mod_zone_freepage_state(zone, (1 << order), migratetype);
98532 }
98533 #else
98534-struct page_ext_operations debug_guardpage_ops = { NULL, };
98535+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
98536 static inline void set_page_guard(struct zone *zone, struct page *page,
98537 unsigned int order, int migratetype) {}
98538 static inline void clear_page_guard(struct zone *zone, struct page *page,
98539@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98540 int i;
98541 int bad = 0;
98542
98543+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98544+ unsigned long index = 1UL << order;
98545+#endif
98546+
98547 VM_BUG_ON_PAGE(PageTail(page), page);
98548 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
98549
98550@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98551 debug_check_no_obj_freed(page_address(page),
98552 PAGE_SIZE << order);
98553 }
98554+
98555+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98556+ for (; index; --index)
98557+ sanitize_highpage(page + index - 1);
98558+#endif
98559+
98560 arch_free_page(page, order);
98561 kernel_map_pages(page, 1 << order, 0);
98562
98563@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98564 local_irq_restore(flags);
98565 }
98566
98567+#ifdef CONFIG_PAX_LATENT_ENTROPY
98568+bool __meminitdata extra_latent_entropy;
98569+
98570+static int __init setup_pax_extra_latent_entropy(char *str)
98571+{
98572+ extra_latent_entropy = true;
98573+ return 0;
98574+}
98575+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98576+
98577+volatile u64 latent_entropy __latent_entropy;
98578+EXPORT_SYMBOL(latent_entropy);
98579+#endif
98580+
98581 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98582 {
98583 unsigned int nr_pages = 1 << order;
98584@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98585 __ClearPageReserved(p);
98586 set_page_count(p, 0);
98587
98588+#ifdef CONFIG_PAX_LATENT_ENTROPY
98589+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98590+ u64 hash = 0;
98591+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98592+ const u64 *data = lowmem_page_address(page);
98593+
98594+ for (index = 0; index < end; index++)
98595+ hash ^= hash + data[index];
98596+ latent_entropy ^= hash;
98597+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98598+ }
98599+#endif
98600+
98601 page_zone(page)->managed_pages += nr_pages;
98602 set_page_refcounted(page);
98603 __free_pages(page, order);
98604@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98605 arch_alloc_page(page, order);
98606 kernel_map_pages(page, 1 << order, 1);
98607
98608+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98609 if (gfp_flags & __GFP_ZERO)
98610 prep_zero_page(page, order, gfp_flags);
98611+#endif
98612
98613 if (order && (gfp_flags & __GFP_COMP))
98614 prep_compound_page(page, order);
98615@@ -1702,7 +1742,7 @@ again:
98616 }
98617
98618 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98619- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98620+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98621 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
98622 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98623
98624@@ -2023,7 +2063,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98625 do {
98626 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98627 high_wmark_pages(zone) - low_wmark_pages(zone) -
98628- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98629+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98630 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98631 } while (zone++ != preferred_zone);
98632 }
98633@@ -2382,8 +2422,15 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
98634 if (high_zoneidx < ZONE_NORMAL)
98635 goto out;
98636 /* The OOM killer does not compensate for light reclaim */
98637- if (!(gfp_mask & __GFP_FS))
98638+ if (!(gfp_mask & __GFP_FS)) {
98639+ /*
98640+ * XXX: Page reclaim didn't yield anything,
98641+ * and the OOM killer can't be invoked, but
98642+ * keep looping as per should_alloc_retry().
98643+ */
98644+ *did_some_progress = 1;
98645 goto out;
98646+ }
98647 /*
98648 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
98649 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
98650@@ -5776,7 +5823,7 @@ static void __setup_per_zone_wmarks(void)
98651
98652 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98653 high_wmark_pages(zone) - low_wmark_pages(zone) -
98654- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98655+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98656
98657 setup_zone_migrate_reserve(zone);
98658 spin_unlock_irqrestore(&zone->lock, flags);
98659diff --git a/mm/percpu.c b/mm/percpu.c
98660index d39e2f4..de5f4b4 100644
98661--- a/mm/percpu.c
98662+++ b/mm/percpu.c
98663@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98664 static unsigned int pcpu_high_unit_cpu __read_mostly;
98665
98666 /* the address of the first chunk which starts with the kernel static area */
98667-void *pcpu_base_addr __read_mostly;
98668+void *pcpu_base_addr __read_only;
98669 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98670
98671 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98672diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98673index 5077afc..846c9ef 100644
98674--- a/mm/process_vm_access.c
98675+++ b/mm/process_vm_access.c
98676@@ -13,6 +13,7 @@
98677 #include <linux/uio.h>
98678 #include <linux/sched.h>
98679 #include <linux/highmem.h>
98680+#include <linux/security.h>
98681 #include <linux/ptrace.h>
98682 #include <linux/slab.h>
98683 #include <linux/syscalls.h>
98684@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98685 ssize_t iov_len;
98686 size_t total_len = iov_iter_count(iter);
98687
98688+ return -ENOSYS; // PaX: until properly audited
98689+
98690 /*
98691 * Work out how many pages of struct pages we're going to need
98692 * when eventually calling get_user_pages
98693 */
98694 for (i = 0; i < riovcnt; i++) {
98695 iov_len = rvec[i].iov_len;
98696- if (iov_len > 0) {
98697- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98698- + iov_len)
98699- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98700- / PAGE_SIZE + 1;
98701- nr_pages = max(nr_pages, nr_pages_iov);
98702- }
98703+ if (iov_len <= 0)
98704+ continue;
98705+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98706+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98707+ nr_pages = max(nr_pages, nr_pages_iov);
98708 }
98709
98710 if (nr_pages == 0)
98711@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98712 goto free_proc_pages;
98713 }
98714
98715+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98716+ rc = -EPERM;
98717+ goto put_task_struct;
98718+ }
98719+
98720 mm = mm_access(task, PTRACE_MODE_ATTACH);
98721 if (!mm || IS_ERR(mm)) {
98722 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98723diff --git a/mm/rmap.c b/mm/rmap.c
98724index 71cd5bd..e259089 100644
98725--- a/mm/rmap.c
98726+++ b/mm/rmap.c
98727@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98728 struct anon_vma *anon_vma = vma->anon_vma;
98729 struct anon_vma_chain *avc;
98730
98731+#ifdef CONFIG_PAX_SEGMEXEC
98732+ struct anon_vma_chain *avc_m = NULL;
98733+#endif
98734+
98735 might_sleep();
98736 if (unlikely(!anon_vma)) {
98737 struct mm_struct *mm = vma->vm_mm;
98738@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98739 if (!avc)
98740 goto out_enomem;
98741
98742+#ifdef CONFIG_PAX_SEGMEXEC
98743+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98744+ if (!avc_m)
98745+ goto out_enomem_free_avc;
98746+#endif
98747+
98748 anon_vma = find_mergeable_anon_vma(vma);
98749 allocated = NULL;
98750 if (!anon_vma) {
98751@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98752 /* page_table_lock to protect against threads */
98753 spin_lock(&mm->page_table_lock);
98754 if (likely(!vma->anon_vma)) {
98755+
98756+#ifdef CONFIG_PAX_SEGMEXEC
98757+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98758+
98759+ if (vma_m) {
98760+ BUG_ON(vma_m->anon_vma);
98761+ vma_m->anon_vma = anon_vma;
98762+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98763+ anon_vma->degree++;
98764+ avc_m = NULL;
98765+ }
98766+#endif
98767+
98768 vma->anon_vma = anon_vma;
98769 anon_vma_chain_link(vma, avc, anon_vma);
98770 /* vma reference or self-parent link for new root */
98771@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98772
98773 if (unlikely(allocated))
98774 put_anon_vma(allocated);
98775+
98776+#ifdef CONFIG_PAX_SEGMEXEC
98777+ if (unlikely(avc_m))
98778+ anon_vma_chain_free(avc_m);
98779+#endif
98780+
98781 if (unlikely(avc))
98782 anon_vma_chain_free(avc);
98783 }
98784 return 0;
98785
98786 out_enomem_free_avc:
98787+
98788+#ifdef CONFIG_PAX_SEGMEXEC
98789+ if (avc_m)
98790+ anon_vma_chain_free(avc_m);
98791+#endif
98792+
98793 anon_vma_chain_free(avc);
98794 out_enomem:
98795 return -ENOMEM;
98796@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98797 * good chance of avoiding scanning the whole hierarchy when it searches where
98798 * page is mapped.
98799 */
98800-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98801+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98802 {
98803 struct anon_vma_chain *avc, *pavc;
98804 struct anon_vma *root = NULL;
98805@@ -296,7 +331,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98806 * the corresponding VMA in the parent process is attached to.
98807 * Returns 0 on success, non-zero on failure.
98808 */
98809-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98810+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98811 {
98812 struct anon_vma_chain *avc;
98813 struct anon_vma *anon_vma;
98814@@ -416,8 +451,10 @@ static void anon_vma_ctor(void *data)
98815 void __init anon_vma_init(void)
98816 {
98817 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98818- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98819- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98820+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98821+ anon_vma_ctor);
98822+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98823+ SLAB_PANIC|SLAB_NO_SANITIZE);
98824 }
98825
98826 /*
98827diff --git a/mm/shmem.c b/mm/shmem.c
98828index 993e6ba..a962ba3 100644
98829--- a/mm/shmem.c
98830+++ b/mm/shmem.c
98831@@ -33,7 +33,7 @@
98832 #include <linux/swap.h>
98833 #include <linux/aio.h>
98834
98835-static struct vfsmount *shm_mnt;
98836+struct vfsmount *shm_mnt;
98837
98838 #ifdef CONFIG_SHMEM
98839 /*
98840@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98841 #define BOGO_DIRENT_SIZE 20
98842
98843 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98844-#define SHORT_SYMLINK_LEN 128
98845+#define SHORT_SYMLINK_LEN 64
98846
98847 /*
98848 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98849@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98850 static int shmem_xattr_validate(const char *name)
98851 {
98852 struct { const char *prefix; size_t len; } arr[] = {
98853+
98854+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98855+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98856+#endif
98857+
98858 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98859 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98860 };
98861@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98862 if (err)
98863 return err;
98864
98865+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98866+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98867+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98868+ return -EOPNOTSUPP;
98869+ if (size > 8)
98870+ return -EINVAL;
98871+ }
98872+#endif
98873+
98874 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98875 }
98876
98877@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98878 int err = -ENOMEM;
98879
98880 /* Round up to L1_CACHE_BYTES to resist false sharing */
98881- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98882- L1_CACHE_BYTES), GFP_KERNEL);
98883+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98884 if (!sbinfo)
98885 return -ENOMEM;
98886
98887diff --git a/mm/slab.c b/mm/slab.c
98888index 65b5dcb..d53d866 100644
98889--- a/mm/slab.c
98890+++ b/mm/slab.c
98891@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98892 if ((x)->max_freeable < i) \
98893 (x)->max_freeable = i; \
98894 } while (0)
98895-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98896-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98897-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98898-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98899+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98900+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98901+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98902+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98903+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
98904+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
98905 #else
98906 #define STATS_INC_ACTIVE(x) do { } while (0)
98907 #define STATS_DEC_ACTIVE(x) do { } while (0)
98908@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98909 #define STATS_INC_ALLOCMISS(x) do { } while (0)
98910 #define STATS_INC_FREEHIT(x) do { } while (0)
98911 #define STATS_INC_FREEMISS(x) do { } while (0)
98912+#define STATS_INC_SANITIZED(x) do { } while (0)
98913+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
98914 #endif
98915
98916 #if DEBUG
98917@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
98918 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98919 */
98920 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98921- const struct page *page, void *obj)
98922+ const struct page *page, const void *obj)
98923 {
98924 u32 offset = (obj - page->s_mem);
98925 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98926@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
98927 * structures first. Without this, further allocations will bug.
98928 */
98929 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
98930- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
98931+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98932 slab_state = PARTIAL_NODE;
98933
98934 slab_early_init = 0;
98935@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
98936
98937 cachep = find_mergeable(size, align, flags, name, ctor);
98938 if (cachep) {
98939- cachep->refcount++;
98940+ atomic_inc(&cachep->refcount);
98941
98942 /*
98943 * Adjust the object sizes so that we clear
98944@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
98945 struct array_cache *ac = cpu_cache_get(cachep);
98946
98947 check_irq_off();
98948+
98949+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98950+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
98951+ STATS_INC_NOT_SANITIZED(cachep);
98952+ else {
98953+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
98954+
98955+ if (cachep->ctor)
98956+ cachep->ctor(objp);
98957+
98958+ STATS_INC_SANITIZED(cachep);
98959+ }
98960+#endif
98961+
98962 kmemleak_free_recursive(objp, cachep->flags);
98963 objp = cache_free_debugcheck(cachep, objp, caller);
98964
98965@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
98966 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
98967 }
98968
98969-void *__kmalloc_node(size_t size, gfp_t flags, int node)
98970+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
98971 {
98972 return __do_kmalloc_node(size, flags, node, _RET_IP_);
98973 }
98974@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
98975 * @flags: the type of memory to allocate (see kmalloc).
98976 * @caller: function caller for debug tracking of the caller
98977 */
98978-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
98979+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
98980 unsigned long caller)
98981 {
98982 struct kmem_cache *cachep;
98983@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
98984
98985 if (unlikely(ZERO_OR_NULL_PTR(objp)))
98986 return;
98987+ VM_BUG_ON(!virt_addr_valid(objp));
98988 local_irq_save(flags);
98989 kfree_debugcheck(objp);
98990 c = virt_to_cache(objp);
98991@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
98992 }
98993 /* cpu stats */
98994 {
98995- unsigned long allochit = atomic_read(&cachep->allochit);
98996- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98997- unsigned long freehit = atomic_read(&cachep->freehit);
98998- unsigned long freemiss = atomic_read(&cachep->freemiss);
98999+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99000+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99001+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99002+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99003
99004 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99005 allochit, allocmiss, freehit, freemiss);
99006 }
99007+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99008+ {
99009+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99010+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99011+
99012+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99013+ }
99014+#endif
99015 #endif
99016 }
99017
99018@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
99019 static int __init slab_proc_init(void)
99020 {
99021 #ifdef CONFIG_DEBUG_SLAB_LEAK
99022- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99023+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99024 #endif
99025 return 0;
99026 }
99027 module_init(slab_proc_init);
99028 #endif
99029
99030+bool is_usercopy_object(const void *ptr)
99031+{
99032+ struct page *page;
99033+ struct kmem_cache *cachep;
99034+
99035+ if (ZERO_OR_NULL_PTR(ptr))
99036+ return false;
99037+
99038+ if (!slab_is_available())
99039+ return false;
99040+
99041+ if (!virt_addr_valid(ptr))
99042+ return false;
99043+
99044+ page = virt_to_head_page(ptr);
99045+
99046+ if (!PageSlab(page))
99047+ return false;
99048+
99049+ cachep = page->slab_cache;
99050+ return cachep->flags & SLAB_USERCOPY;
99051+}
99052+
99053+#ifdef CONFIG_PAX_USERCOPY
99054+const char *check_heap_object(const void *ptr, unsigned long n)
99055+{
99056+ struct page *page;
99057+ struct kmem_cache *cachep;
99058+ unsigned int objnr;
99059+ unsigned long offset;
99060+
99061+ if (ZERO_OR_NULL_PTR(ptr))
99062+ return "<null>";
99063+
99064+ if (!virt_addr_valid(ptr))
99065+ return NULL;
99066+
99067+ page = virt_to_head_page(ptr);
99068+
99069+ if (!PageSlab(page))
99070+ return NULL;
99071+
99072+ cachep = page->slab_cache;
99073+ if (!(cachep->flags & SLAB_USERCOPY))
99074+ return cachep->name;
99075+
99076+ objnr = obj_to_index(cachep, page, ptr);
99077+ BUG_ON(objnr >= cachep->num);
99078+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99079+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99080+ return NULL;
99081+
99082+ return cachep->name;
99083+}
99084+#endif
99085+
99086 /**
99087 * ksize - get the actual amount of memory allocated for a given object
99088 * @objp: Pointer to the object
99089diff --git a/mm/slab.h b/mm/slab.h
99090index 1cf40054..10ad563 100644
99091--- a/mm/slab.h
99092+++ b/mm/slab.h
99093@@ -22,7 +22,7 @@ struct kmem_cache {
99094 unsigned int align; /* Alignment as calculated */
99095 unsigned long flags; /* Active flags on the slab */
99096 const char *name; /* Slab name for sysfs */
99097- int refcount; /* Use counter */
99098+ atomic_t refcount; /* Use counter */
99099 void (*ctor)(void *); /* Called on object slot creation */
99100 struct list_head list; /* List of all slab caches on the system */
99101 };
99102@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
99103 /* The slab cache that manages slab cache information */
99104 extern struct kmem_cache *kmem_cache;
99105
99106+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99107+#ifdef CONFIG_X86_64
99108+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99109+#else
99110+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99111+#endif
99112+enum pax_sanitize_mode {
99113+ PAX_SANITIZE_SLAB_OFF = 0,
99114+ PAX_SANITIZE_SLAB_FAST,
99115+ PAX_SANITIZE_SLAB_FULL,
99116+};
99117+extern enum pax_sanitize_mode pax_sanitize_slab;
99118+#endif
99119+
99120 unsigned long calculate_alignment(unsigned long flags,
99121 unsigned long align, unsigned long size);
99122
99123@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
99124
99125 /* Legal flag mask for kmem_cache_create(), for various configurations */
99126 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99127- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99128+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99129+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99130
99131 #if defined(CONFIG_DEBUG_SLAB)
99132 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99133@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99134 return s;
99135
99136 page = virt_to_head_page(x);
99137+
99138+ BUG_ON(!PageSlab(page));
99139+
99140 cachep = page->slab_cache;
99141 if (slab_equal_or_root(cachep, s))
99142 return cachep;
99143diff --git a/mm/slab_common.c b/mm/slab_common.c
99144index e03dd6f..c475838 100644
99145--- a/mm/slab_common.c
99146+++ b/mm/slab_common.c
99147@@ -25,11 +25,35 @@
99148
99149 #include "slab.h"
99150
99151-enum slab_state slab_state;
99152+enum slab_state slab_state __read_only;
99153 LIST_HEAD(slab_caches);
99154 DEFINE_MUTEX(slab_mutex);
99155 struct kmem_cache *kmem_cache;
99156
99157+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99158+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99159+static int __init pax_sanitize_slab_setup(char *str)
99160+{
99161+ if (!str)
99162+ return 0;
99163+
99164+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99165+ pr_info("PaX slab sanitization: %s\n", "disabled");
99166+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99167+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99168+ pr_info("PaX slab sanitization: %s\n", "fast");
99169+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99170+ } else if (!strcmp(str, "full")) {
99171+ pr_info("PaX slab sanitization: %s\n", "full");
99172+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99173+ } else
99174+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99175+
99176+ return 0;
99177+}
99178+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99179+#endif
99180+
99181 /*
99182 * Set of flags that will prevent slab merging
99183 */
99184@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99185 * Merge control. If this is set then no merging of slab caches will occur.
99186 * (Could be removed. This was introduced to pacify the merge skeptics.)
99187 */
99188-static int slab_nomerge;
99189+static int slab_nomerge = 1;
99190
99191 static int __init setup_slab_nomerge(char *str)
99192 {
99193@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
99194 /*
99195 * We may have set a slab to be unmergeable during bootstrap.
99196 */
99197- if (s->refcount < 0)
99198+ if (atomic_read(&s->refcount) < 0)
99199 return 1;
99200
99201 return 0;
99202@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99203 if (err)
99204 goto out_free_cache;
99205
99206- s->refcount = 1;
99207+ atomic_set(&s->refcount, 1);
99208 list_add(&s->list, &slab_caches);
99209 out:
99210 if (err)
99211@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99212 */
99213 flags &= CACHE_CREATE_MASK;
99214
99215+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99216+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99217+ flags |= SLAB_NO_SANITIZE;
99218+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99219+ flags &= ~SLAB_NO_SANITIZE;
99220+#endif
99221+
99222 s = __kmem_cache_alias(name, size, align, flags, ctor);
99223 if (s)
99224 goto out_unlock;
99225@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99226
99227 mutex_lock(&slab_mutex);
99228
99229- s->refcount--;
99230- if (s->refcount)
99231+ if (!atomic_dec_and_test(&s->refcount))
99232 goto out_unlock;
99233
99234 if (memcg_cleanup_cache_params(s) != 0)
99235@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99236 rcu_barrier();
99237
99238 memcg_free_cache_params(s);
99239-#ifdef SLAB_SUPPORTS_SYSFS
99240+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99241 sysfs_slab_remove(s);
99242 #else
99243 slab_kmem_cache_release(s);
99244@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99245 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99246 name, size, err);
99247
99248- s->refcount = -1; /* Exempt from merging for now */
99249+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99250 }
99251
99252 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99253@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99254
99255 create_boot_cache(s, name, size, flags);
99256 list_add(&s->list, &slab_caches);
99257- s->refcount = 1;
99258+ atomic_set(&s->refcount, 1);
99259 return s;
99260 }
99261
99262@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99263 EXPORT_SYMBOL(kmalloc_dma_caches);
99264 #endif
99265
99266+#ifdef CONFIG_PAX_USERCOPY_SLABS
99267+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99268+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99269+#endif
99270+
99271 /*
99272 * Conversion table for small slabs sizes / 8 to the index in the
99273 * kmalloc array. This is necessary for slabs < 192 since we have non power
99274@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99275 return kmalloc_dma_caches[index];
99276
99277 #endif
99278+
99279+#ifdef CONFIG_PAX_USERCOPY_SLABS
99280+ if (unlikely((flags & GFP_USERCOPY)))
99281+ return kmalloc_usercopy_caches[index];
99282+
99283+#endif
99284+
99285 return kmalloc_caches[index];
99286 }
99287
99288@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99289 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99290 if (!kmalloc_caches[i]) {
99291 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99292- 1 << i, flags);
99293+ 1 << i, SLAB_USERCOPY | flags);
99294 }
99295
99296 /*
99297@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99298 * earlier power of two caches
99299 */
99300 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99301- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99302+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99303
99304 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99305- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99306+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99307 }
99308
99309 /* Kmalloc array is now usable */
99310@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99311 }
99312 }
99313 #endif
99314+
99315+#ifdef CONFIG_PAX_USERCOPY_SLABS
99316+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99317+ struct kmem_cache *s = kmalloc_caches[i];
99318+
99319+ if (s) {
99320+ int size = kmalloc_size(i);
99321+ char *n = kasprintf(GFP_NOWAIT,
99322+ "usercopy-kmalloc-%d", size);
99323+
99324+ BUG_ON(!n);
99325+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99326+ size, SLAB_USERCOPY | flags);
99327+ }
99328+ }
99329+#endif
99330+
99331 }
99332 #endif /* !CONFIG_SLOB */
99333
99334@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
99335 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99336 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99337 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99338+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99339+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99340+#endif
99341 #endif
99342 seq_putc(m, '\n');
99343 }
99344@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
99345 module_init(slab_proc_init);
99346 #endif /* CONFIG_SLABINFO */
99347
99348-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
99349+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
99350 gfp_t flags)
99351 {
99352 void *ret;
99353diff --git a/mm/slob.c b/mm/slob.c
99354index 96a8620..46b3f12 100644
99355--- a/mm/slob.c
99356+++ b/mm/slob.c
99357@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99358 /*
99359 * Return the size of a slob block.
99360 */
99361-static slobidx_t slob_units(slob_t *s)
99362+static slobidx_t slob_units(const slob_t *s)
99363 {
99364 if (s->units > 0)
99365 return s->units;
99366@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99367 /*
99368 * Return the next free slob block pointer after this one.
99369 */
99370-static slob_t *slob_next(slob_t *s)
99371+static slob_t *slob_next(const slob_t *s)
99372 {
99373 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99374 slobidx_t next;
99375@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99376 /*
99377 * Returns true if s is the last free block in its page.
99378 */
99379-static int slob_last(slob_t *s)
99380+static int slob_last(const slob_t *s)
99381 {
99382 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99383 }
99384
99385-static void *slob_new_pages(gfp_t gfp, int order, int node)
99386+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99387 {
99388- void *page;
99389+ struct page *page;
99390
99391 #ifdef CONFIG_NUMA
99392 if (node != NUMA_NO_NODE)
99393@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99394 if (!page)
99395 return NULL;
99396
99397- return page_address(page);
99398+ __SetPageSlab(page);
99399+ return page;
99400 }
99401
99402-static void slob_free_pages(void *b, int order)
99403+static void slob_free_pages(struct page *sp, int order)
99404 {
99405 if (current->reclaim_state)
99406 current->reclaim_state->reclaimed_slab += 1 << order;
99407- free_pages((unsigned long)b, order);
99408+ __ClearPageSlab(sp);
99409+ page_mapcount_reset(sp);
99410+ sp->private = 0;
99411+ __free_pages(sp, order);
99412 }
99413
99414 /*
99415@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99416
99417 /* Not enough space: must allocate a new page */
99418 if (!b) {
99419- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99420- if (!b)
99421+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99422+ if (!sp)
99423 return NULL;
99424- sp = virt_to_page(b);
99425- __SetPageSlab(sp);
99426+ b = page_address(sp);
99427
99428 spin_lock_irqsave(&slob_lock, flags);
99429 sp->units = SLOB_UNITS(PAGE_SIZE);
99430 sp->freelist = b;
99431+ sp->private = 0;
99432 INIT_LIST_HEAD(&sp->lru);
99433 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99434 set_slob_page_free(sp, slob_list);
99435@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99436 /*
99437 * slob_free: entry point into the slob allocator.
99438 */
99439-static void slob_free(void *block, int size)
99440+static void slob_free(struct kmem_cache *c, void *block, int size)
99441 {
99442 struct page *sp;
99443 slob_t *prev, *next, *b = (slob_t *)block;
99444@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99445 if (slob_page_free(sp))
99446 clear_slob_page_free(sp);
99447 spin_unlock_irqrestore(&slob_lock, flags);
99448- __ClearPageSlab(sp);
99449- page_mapcount_reset(sp);
99450- slob_free_pages(b, 0);
99451+ slob_free_pages(sp, 0);
99452 return;
99453 }
99454
99455+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99456+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99457+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99458+#endif
99459+
99460 if (!slob_page_free(sp)) {
99461 /* This slob page is about to become partially free. Easy! */
99462 sp->units = units;
99463@@ -424,11 +431,10 @@ out:
99464 */
99465
99466 static __always_inline void *
99467-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99468+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99469 {
99470- unsigned int *m;
99471- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99472- void *ret;
99473+ slob_t *m;
99474+ void *ret = NULL;
99475
99476 gfp &= gfp_allowed_mask;
99477
99478@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99479
99480 if (!m)
99481 return NULL;
99482- *m = size;
99483+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99484+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99485+ m[0].units = size;
99486+ m[1].units = align;
99487 ret = (void *)m + align;
99488
99489 trace_kmalloc_node(caller, ret,
99490 size, size + align, gfp, node);
99491 } else {
99492 unsigned int order = get_order(size);
99493+ struct page *page;
99494
99495 if (likely(order))
99496 gfp |= __GFP_COMP;
99497- ret = slob_new_pages(gfp, order, node);
99498+ page = slob_new_pages(gfp, order, node);
99499+ if (page) {
99500+ ret = page_address(page);
99501+ page->private = size;
99502+ }
99503
99504 trace_kmalloc_node(caller, ret,
99505 size, PAGE_SIZE << order, gfp, node);
99506 }
99507
99508- kmemleak_alloc(ret, size, 1, gfp);
99509 return ret;
99510 }
99511
99512-void *__kmalloc(size_t size, gfp_t gfp)
99513+static __always_inline void *
99514+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99515+{
99516+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99517+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99518+
99519+ if (!ZERO_OR_NULL_PTR(ret))
99520+ kmemleak_alloc(ret, size, 1, gfp);
99521+ return ret;
99522+}
99523+
99524+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
99525 {
99526 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
99527 }
99528@@ -491,34 +515,112 @@ void kfree(const void *block)
99529 return;
99530 kmemleak_free(block);
99531
99532+ VM_BUG_ON(!virt_addr_valid(block));
99533 sp = virt_to_page(block);
99534- if (PageSlab(sp)) {
99535+ VM_BUG_ON(!PageSlab(sp));
99536+ if (!sp->private) {
99537 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99538- unsigned int *m = (unsigned int *)(block - align);
99539- slob_free(m, *m + align);
99540- } else
99541+ slob_t *m = (slob_t *)(block - align);
99542+ slob_free(NULL, m, m[0].units + align);
99543+ } else {
99544+ __ClearPageSlab(sp);
99545+ page_mapcount_reset(sp);
99546+ sp->private = 0;
99547 __free_pages(sp, compound_order(sp));
99548+ }
99549 }
99550 EXPORT_SYMBOL(kfree);
99551
99552+bool is_usercopy_object(const void *ptr)
99553+{
99554+ if (!slab_is_available())
99555+ return false;
99556+
99557+ // PAX: TODO
99558+
99559+ return false;
99560+}
99561+
99562+#ifdef CONFIG_PAX_USERCOPY
99563+const char *check_heap_object(const void *ptr, unsigned long n)
99564+{
99565+ struct page *page;
99566+ const slob_t *free;
99567+ const void *base;
99568+ unsigned long flags;
99569+
99570+ if (ZERO_OR_NULL_PTR(ptr))
99571+ return "<null>";
99572+
99573+ if (!virt_addr_valid(ptr))
99574+ return NULL;
99575+
99576+ page = virt_to_head_page(ptr);
99577+ if (!PageSlab(page))
99578+ return NULL;
99579+
99580+ if (page->private) {
99581+ base = page;
99582+ if (base <= ptr && n <= page->private - (ptr - base))
99583+ return NULL;
99584+ return "<slob>";
99585+ }
99586+
99587+ /* some tricky double walking to find the chunk */
99588+ spin_lock_irqsave(&slob_lock, flags);
99589+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99590+ free = page->freelist;
99591+
99592+ while (!slob_last(free) && (void *)free <= ptr) {
99593+ base = free + slob_units(free);
99594+ free = slob_next(free);
99595+ }
99596+
99597+ while (base < (void *)free) {
99598+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99599+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99600+ int offset;
99601+
99602+ if (ptr < base + align)
99603+ break;
99604+
99605+ offset = ptr - base - align;
99606+ if (offset >= m) {
99607+ base += size;
99608+ continue;
99609+ }
99610+
99611+ if (n > m - offset)
99612+ break;
99613+
99614+ spin_unlock_irqrestore(&slob_lock, flags);
99615+ return NULL;
99616+ }
99617+
99618+ spin_unlock_irqrestore(&slob_lock, flags);
99619+ return "<slob>";
99620+}
99621+#endif
99622+
99623 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99624 size_t ksize(const void *block)
99625 {
99626 struct page *sp;
99627 int align;
99628- unsigned int *m;
99629+ slob_t *m;
99630
99631 BUG_ON(!block);
99632 if (unlikely(block == ZERO_SIZE_PTR))
99633 return 0;
99634
99635 sp = virt_to_page(block);
99636- if (unlikely(!PageSlab(sp)))
99637- return PAGE_SIZE << compound_order(sp);
99638+ VM_BUG_ON(!PageSlab(sp));
99639+ if (sp->private)
99640+ return sp->private;
99641
99642 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99643- m = (unsigned int *)(block - align);
99644- return SLOB_UNITS(*m) * SLOB_UNIT;
99645+ m = (slob_t *)(block - align);
99646+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99647 }
99648 EXPORT_SYMBOL(ksize);
99649
99650@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99651
99652 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99653 {
99654- void *b;
99655+ void *b = NULL;
99656
99657 flags &= gfp_allowed_mask;
99658
99659 lockdep_trace_alloc(flags);
99660
99661+#ifdef CONFIG_PAX_USERCOPY_SLABS
99662+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99663+#else
99664 if (c->size < PAGE_SIZE) {
99665 b = slob_alloc(c->size, flags, c->align, node);
99666 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99667 SLOB_UNITS(c->size) * SLOB_UNIT,
99668 flags, node);
99669 } else {
99670- b = slob_new_pages(flags, get_order(c->size), node);
99671+ struct page *sp;
99672+
99673+ sp = slob_new_pages(flags, get_order(c->size), node);
99674+ if (sp) {
99675+ b = page_address(sp);
99676+ sp->private = c->size;
99677+ }
99678 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99679 PAGE_SIZE << get_order(c->size),
99680 flags, node);
99681 }
99682+#endif
99683
99684 if (b && c->ctor)
99685 c->ctor(b);
99686@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
99687 EXPORT_SYMBOL(kmem_cache_alloc);
99688
99689 #ifdef CONFIG_NUMA
99690-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99691+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
99692 {
99693 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
99694 }
99695@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99696 EXPORT_SYMBOL(kmem_cache_alloc_node);
99697 #endif
99698
99699-static void __kmem_cache_free(void *b, int size)
99700+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99701 {
99702- if (size < PAGE_SIZE)
99703- slob_free(b, size);
99704+ struct page *sp;
99705+
99706+ sp = virt_to_page(b);
99707+ BUG_ON(!PageSlab(sp));
99708+ if (!sp->private)
99709+ slob_free(c, b, size);
99710 else
99711- slob_free_pages(b, get_order(size));
99712+ slob_free_pages(sp, get_order(size));
99713 }
99714
99715 static void kmem_rcu_free(struct rcu_head *head)
99716@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99717 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99718 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99719
99720- __kmem_cache_free(b, slob_rcu->size);
99721+ __kmem_cache_free(NULL, b, slob_rcu->size);
99722 }
99723
99724 void kmem_cache_free(struct kmem_cache *c, void *b)
99725 {
99726+ int size = c->size;
99727+
99728+#ifdef CONFIG_PAX_USERCOPY_SLABS
99729+ if (size + c->align < PAGE_SIZE) {
99730+ size += c->align;
99731+ b -= c->align;
99732+ }
99733+#endif
99734+
99735 kmemleak_free_recursive(b, c->flags);
99736 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99737 struct slob_rcu *slob_rcu;
99738- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99739- slob_rcu->size = c->size;
99740+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99741+ slob_rcu->size = size;
99742 call_rcu(&slob_rcu->head, kmem_rcu_free);
99743 } else {
99744- __kmem_cache_free(b, c->size);
99745+ __kmem_cache_free(c, b, size);
99746 }
99747
99748+#ifdef CONFIG_PAX_USERCOPY_SLABS
99749+ trace_kfree(_RET_IP_, b);
99750+#else
99751 trace_kmem_cache_free(_RET_IP_, b);
99752+#endif
99753+
99754 }
99755 EXPORT_SYMBOL(kmem_cache_free);
99756
99757diff --git a/mm/slub.c b/mm/slub.c
99758index fe376fe..2f5757c 100644
99759--- a/mm/slub.c
99760+++ b/mm/slub.c
99761@@ -197,7 +197,7 @@ struct track {
99762
99763 enum track_item { TRACK_ALLOC, TRACK_FREE };
99764
99765-#ifdef CONFIG_SYSFS
99766+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99767 static int sysfs_slab_add(struct kmem_cache *);
99768 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99769 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99770@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
99771 if (!t->addr)
99772 return;
99773
99774- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99775+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99776 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99777 #ifdef CONFIG_STACKTRACE
99778 {
99779@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99780
99781 slab_free_hook(s, x);
99782
99783+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99784+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99785+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99786+ if (s->ctor)
99787+ s->ctor(x);
99788+ }
99789+#endif
99790+
99791 redo:
99792 /*
99793 * Determine the currently cpus per cpu slab.
99794@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99795 s->inuse = size;
99796
99797 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99798+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99799+ (!(flags & SLAB_NO_SANITIZE)) ||
99800+#endif
99801 s->ctor)) {
99802 /*
99803 * Relocate free pointer after the object if it is not
99804@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
99805
99806 __setup("slub_min_objects=", setup_slub_min_objects);
99807
99808-void *__kmalloc(size_t size, gfp_t flags)
99809+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
99810 {
99811 struct kmem_cache *s;
99812 void *ret;
99813@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
99814 return ptr;
99815 }
99816
99817-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99818+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99819 {
99820 struct kmem_cache *s;
99821 void *ret;
99822@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99823 EXPORT_SYMBOL(__kmalloc_node);
99824 #endif
99825
99826+bool is_usercopy_object(const void *ptr)
99827+{
99828+ struct page *page;
99829+ struct kmem_cache *s;
99830+
99831+ if (ZERO_OR_NULL_PTR(ptr))
99832+ return false;
99833+
99834+ if (!slab_is_available())
99835+ return false;
99836+
99837+ if (!virt_addr_valid(ptr))
99838+ return false;
99839+
99840+ page = virt_to_head_page(ptr);
99841+
99842+ if (!PageSlab(page))
99843+ return false;
99844+
99845+ s = page->slab_cache;
99846+ return s->flags & SLAB_USERCOPY;
99847+}
99848+
99849+#ifdef CONFIG_PAX_USERCOPY
99850+const char *check_heap_object(const void *ptr, unsigned long n)
99851+{
99852+ struct page *page;
99853+ struct kmem_cache *s;
99854+ unsigned long offset;
99855+
99856+ if (ZERO_OR_NULL_PTR(ptr))
99857+ return "<null>";
99858+
99859+ if (!virt_addr_valid(ptr))
99860+ return NULL;
99861+
99862+ page = virt_to_head_page(ptr);
99863+
99864+ if (!PageSlab(page))
99865+ return NULL;
99866+
99867+ s = page->slab_cache;
99868+ if (!(s->flags & SLAB_USERCOPY))
99869+ return s->name;
99870+
99871+ offset = (ptr - page_address(page)) % s->size;
99872+ if (offset <= s->object_size && n <= s->object_size - offset)
99873+ return NULL;
99874+
99875+ return s->name;
99876+}
99877+#endif
99878+
99879 size_t ksize(const void *object)
99880 {
99881 struct page *page;
99882@@ -3336,6 +3400,7 @@ void kfree(const void *x)
99883 if (unlikely(ZERO_OR_NULL_PTR(x)))
99884 return;
99885
99886+ VM_BUG_ON(!virt_addr_valid(x));
99887 page = virt_to_head_page(x);
99888 if (unlikely(!PageSlab(page))) {
99889 BUG_ON(!PageCompound(page));
99890@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99891 int i;
99892 struct kmem_cache *c;
99893
99894- s->refcount++;
99895+ atomic_inc(&s->refcount);
99896
99897 /*
99898 * Adjust the object sizes so that we clear
99899@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99900 }
99901
99902 if (sysfs_slab_alias(s, name)) {
99903- s->refcount--;
99904+ atomic_dec(&s->refcount);
99905 s = NULL;
99906 }
99907 }
99908@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
99909 }
99910 #endif
99911
99912-#ifdef CONFIG_SYSFS
99913+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99914 static int count_inuse(struct page *page)
99915 {
99916 return page->inuse;
99917@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
99918 len += sprintf(buf + len, "%7ld ", l->count);
99919
99920 if (l->addr)
99921+#ifdef CONFIG_GRKERNSEC_HIDESYM
99922+ len += sprintf(buf + len, "%pS", NULL);
99923+#else
99924 len += sprintf(buf + len, "%pS", (void *)l->addr);
99925+#endif
99926 else
99927 len += sprintf(buf + len, "<not-available>");
99928
99929@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
99930 validate_slab_cache(kmalloc_caches[9]);
99931 }
99932 #else
99933-#ifdef CONFIG_SYSFS
99934+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99935 static void resiliency_test(void) {};
99936 #endif
99937 #endif
99938
99939-#ifdef CONFIG_SYSFS
99940+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99941 enum slab_stat_type {
99942 SL_ALL, /* All slabs */
99943 SL_PARTIAL, /* Only partially allocated slabs */
99944@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
99945 {
99946 if (!s->ctor)
99947 return 0;
99948+#ifdef CONFIG_GRKERNSEC_HIDESYM
99949+ return sprintf(buf, "%pS\n", NULL);
99950+#else
99951 return sprintf(buf, "%pS\n", s->ctor);
99952+#endif
99953 }
99954 SLAB_ATTR_RO(ctor);
99955
99956 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99957 {
99958- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
99959+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
99960 }
99961 SLAB_ATTR_RO(aliases);
99962
99963@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
99964 SLAB_ATTR_RO(cache_dma);
99965 #endif
99966
99967+#ifdef CONFIG_PAX_USERCOPY_SLABS
99968+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
99969+{
99970+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
99971+}
99972+SLAB_ATTR_RO(usercopy);
99973+#endif
99974+
99975+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99976+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
99977+{
99978+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
99979+}
99980+SLAB_ATTR_RO(sanitize);
99981+#endif
99982+
99983 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
99984 {
99985 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
99986@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
99987 * as well as cause other issues like converting a mergeable
99988 * cache into an umergeable one.
99989 */
99990- if (s->refcount > 1)
99991+ if (atomic_read(&s->refcount) > 1)
99992 return -EINVAL;
99993
99994 s->flags &= ~SLAB_TRACE;
99995@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
99996 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
99997 size_t length)
99998 {
99999- if (s->refcount > 1)
100000+ if (atomic_read(&s->refcount) > 1)
100001 return -EINVAL;
100002
100003 s->flags &= ~SLAB_FAILSLAB;
100004@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
100005 #ifdef CONFIG_ZONE_DMA
100006 &cache_dma_attr.attr,
100007 #endif
100008+#ifdef CONFIG_PAX_USERCOPY_SLABS
100009+ &usercopy_attr.attr,
100010+#endif
100011+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100012+ &sanitize_attr.attr,
100013+#endif
100014 #ifdef CONFIG_NUMA
100015 &remote_node_defrag_ratio_attr.attr,
100016 #endif
100017@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
100018 return name;
100019 }
100020
100021+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100022 static int sysfs_slab_add(struct kmem_cache *s)
100023 {
100024 int err;
100025@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
100026 kobject_del(&s->kobj);
100027 kobject_put(&s->kobj);
100028 }
100029+#endif
100030
100031 /*
100032 * Need to buffer aliases during bootup until sysfs becomes
100033@@ -5161,6 +5258,7 @@ struct saved_alias {
100034
100035 static struct saved_alias *alias_list;
100036
100037+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100038 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100039 {
100040 struct saved_alias *al;
100041@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100042 alias_list = al;
100043 return 0;
100044 }
100045+#endif
100046
100047 static int __init slab_sysfs_init(void)
100048 {
100049diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100050index 4cba9c2..b4f9fcc 100644
100051--- a/mm/sparse-vmemmap.c
100052+++ b/mm/sparse-vmemmap.c
100053@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100054 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100055 if (!p)
100056 return NULL;
100057- pud_populate(&init_mm, pud, p);
100058+ pud_populate_kernel(&init_mm, pud, p);
100059 }
100060 return pud;
100061 }
100062@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100063 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100064 if (!p)
100065 return NULL;
100066- pgd_populate(&init_mm, pgd, p);
100067+ pgd_populate_kernel(&init_mm, pgd, p);
100068 }
100069 return pgd;
100070 }
100071diff --git a/mm/sparse.c b/mm/sparse.c
100072index d1b48b6..6e8590e 100644
100073--- a/mm/sparse.c
100074+++ b/mm/sparse.c
100075@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100076
100077 for (i = 0; i < PAGES_PER_SECTION; i++) {
100078 if (PageHWPoison(&memmap[i])) {
100079- atomic_long_sub(1, &num_poisoned_pages);
100080+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100081 ClearPageHWPoison(&memmap[i]);
100082 }
100083 }
100084diff --git a/mm/swap.c b/mm/swap.c
100085index 8a12b33..7068e78 100644
100086--- a/mm/swap.c
100087+++ b/mm/swap.c
100088@@ -31,6 +31,7 @@
100089 #include <linux/memcontrol.h>
100090 #include <linux/gfp.h>
100091 #include <linux/uio.h>
100092+#include <linux/hugetlb.h>
100093
100094 #include "internal.h"
100095
100096@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100097
100098 __page_cache_release(page);
100099 dtor = get_compound_page_dtor(page);
100100+ if (!PageHuge(page))
100101+ BUG_ON(dtor != free_compound_page);
100102 (*dtor)(page);
100103 }
100104
100105diff --git a/mm/swapfile.c b/mm/swapfile.c
100106index 63f55cc..31874e6 100644
100107--- a/mm/swapfile.c
100108+++ b/mm/swapfile.c
100109@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100110
100111 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100112 /* Activity counter to indicate that a swapon or swapoff has occurred */
100113-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100114+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100115
100116 static inline unsigned char swap_count(unsigned char ent)
100117 {
100118@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100119 spin_unlock(&swap_lock);
100120
100121 err = 0;
100122- atomic_inc(&proc_poll_event);
100123+ atomic_inc_unchecked(&proc_poll_event);
100124 wake_up_interruptible(&proc_poll_wait);
100125
100126 out_dput:
100127@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100128
100129 poll_wait(file, &proc_poll_wait, wait);
100130
100131- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100132- seq->poll_event = atomic_read(&proc_poll_event);
100133+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100134+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100135 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100136 }
100137
100138@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100139 return ret;
100140
100141 seq = file->private_data;
100142- seq->poll_event = atomic_read(&proc_poll_event);
100143+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100144 return 0;
100145 }
100146
100147@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100148 (frontswap_map) ? "FS" : "");
100149
100150 mutex_unlock(&swapon_mutex);
100151- atomic_inc(&proc_poll_event);
100152+ atomic_inc_unchecked(&proc_poll_event);
100153 wake_up_interruptible(&proc_poll_wait);
100154
100155 if (S_ISREG(inode->i_mode))
100156diff --git a/mm/util.c b/mm/util.c
100157index fec39d4..3e60325 100644
100158--- a/mm/util.c
100159+++ b/mm/util.c
100160@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
100161 void arch_pick_mmap_layout(struct mm_struct *mm)
100162 {
100163 mm->mmap_base = TASK_UNMAPPED_BASE;
100164+
100165+#ifdef CONFIG_PAX_RANDMMAP
100166+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100167+ mm->mmap_base += mm->delta_mmap;
100168+#endif
100169+
100170 mm->get_unmapped_area = arch_get_unmapped_area;
100171 }
100172 #endif
100173@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100174 if (!mm->arg_end)
100175 goto out_mm; /* Shh! No looking before we're done */
100176
100177+ if (gr_acl_handle_procpidmem(task))
100178+ goto out_mm;
100179+
100180 len = mm->arg_end - mm->arg_start;
100181
100182 if (len > buflen)
100183diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100184index 39c3388..7d976d4 100644
100185--- a/mm/vmalloc.c
100186+++ b/mm/vmalloc.c
100187@@ -39,20 +39,65 @@ struct vfree_deferred {
100188 struct work_struct wq;
100189 };
100190 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100191+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100192+
100193+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100194+struct stack_deferred_llist {
100195+ struct llist_head list;
100196+ void *stack;
100197+ void *lowmem_stack;
100198+};
100199+
100200+struct stack_deferred {
100201+ struct stack_deferred_llist list;
100202+ struct work_struct wq;
100203+};
100204+
100205+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100206+#endif
100207
100208 static void __vunmap(const void *, int);
100209
100210-static void free_work(struct work_struct *w)
100211+static void vfree_work(struct work_struct *w)
100212+{
100213+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100214+ struct llist_node *llnode = llist_del_all(&p->list);
100215+ while (llnode) {
100216+ void *x = llnode;
100217+ llnode = llist_next(llnode);
100218+ __vunmap(x, 1);
100219+ }
100220+}
100221+
100222+static void vunmap_work(struct work_struct *w)
100223 {
100224 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100225 struct llist_node *llnode = llist_del_all(&p->list);
100226 while (llnode) {
100227 void *p = llnode;
100228 llnode = llist_next(llnode);
100229- __vunmap(p, 1);
100230+ __vunmap(p, 0);
100231 }
100232 }
100233
100234+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100235+static void unmap_work(struct work_struct *w)
100236+{
100237+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100238+ struct llist_node *llnode = llist_del_all(&p->list.list);
100239+ while (llnode) {
100240+ struct stack_deferred_llist *x =
100241+ llist_entry((struct llist_head *)llnode,
100242+ struct stack_deferred_llist, list);
100243+ void *stack = ACCESS_ONCE(x->stack);
100244+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100245+ llnode = llist_next(llnode);
100246+ __vunmap(stack, 0);
100247+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100248+ }
100249+}
100250+#endif
100251+
100252 /*** Page table manipulation functions ***/
100253
100254 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100255@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100256
100257 pte = pte_offset_kernel(pmd, addr);
100258 do {
100259- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100260- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100261+
100262+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100263+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100264+ BUG_ON(!pte_exec(*pte));
100265+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100266+ continue;
100267+ }
100268+#endif
100269+
100270+ {
100271+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100272+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100273+ }
100274 } while (pte++, addr += PAGE_SIZE, addr != end);
100275 }
100276
100277@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100278 pte = pte_alloc_kernel(pmd, addr);
100279 if (!pte)
100280 return -ENOMEM;
100281+
100282+ pax_open_kernel();
100283 do {
100284 struct page *page = pages[*nr];
100285
100286- if (WARN_ON(!pte_none(*pte)))
100287+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100288+ if (pgprot_val(prot) & _PAGE_NX)
100289+#endif
100290+
100291+ if (!pte_none(*pte)) {
100292+ pax_close_kernel();
100293+ WARN_ON(1);
100294 return -EBUSY;
100295- if (WARN_ON(!page))
100296+ }
100297+ if (!page) {
100298+ pax_close_kernel();
100299+ WARN_ON(1);
100300 return -ENOMEM;
100301+ }
100302 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100303 (*nr)++;
100304 } while (pte++, addr += PAGE_SIZE, addr != end);
100305+ pax_close_kernel();
100306 return 0;
100307 }
100308
100309@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100310 pmd_t *pmd;
100311 unsigned long next;
100312
100313- pmd = pmd_alloc(&init_mm, pud, addr);
100314+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100315 if (!pmd)
100316 return -ENOMEM;
100317 do {
100318@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100319 pud_t *pud;
100320 unsigned long next;
100321
100322- pud = pud_alloc(&init_mm, pgd, addr);
100323+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100324 if (!pud)
100325 return -ENOMEM;
100326 do {
100327@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
100328 if (addr >= MODULES_VADDR && addr < MODULES_END)
100329 return 1;
100330 #endif
100331+
100332+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100333+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100334+ return 1;
100335+#endif
100336+
100337 return is_vmalloc_addr(x);
100338 }
100339
100340@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100341
100342 if (!pgd_none(*pgd)) {
100343 pud_t *pud = pud_offset(pgd, addr);
100344+#ifdef CONFIG_X86
100345+ if (!pud_large(*pud))
100346+#endif
100347 if (!pud_none(*pud)) {
100348 pmd_t *pmd = pmd_offset(pud, addr);
100349+#ifdef CONFIG_X86
100350+ if (!pmd_large(*pmd))
100351+#endif
100352 if (!pmd_none(*pmd)) {
100353 pte_t *ptep, pte;
100354
100355@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
100356 * Allocate a region of KVA of the specified size and alignment, within the
100357 * vstart and vend.
100358 */
100359-static struct vmap_area *alloc_vmap_area(unsigned long size,
100360+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
100361 unsigned long align,
100362 unsigned long vstart, unsigned long vend,
100363 int node, gfp_t gfp_mask)
100364@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
100365 for_each_possible_cpu(i) {
100366 struct vmap_block_queue *vbq;
100367 struct vfree_deferred *p;
100368+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100369+ struct stack_deferred *p2;
100370+#endif
100371
100372 vbq = &per_cpu(vmap_block_queue, i);
100373 spin_lock_init(&vbq->lock);
100374 INIT_LIST_HEAD(&vbq->free);
100375+
100376 p = &per_cpu(vfree_deferred, i);
100377 init_llist_head(&p->list);
100378- INIT_WORK(&p->wq, free_work);
100379+ INIT_WORK(&p->wq, vfree_work);
100380+
100381+ p = &per_cpu(vunmap_deferred, i);
100382+ init_llist_head(&p->list);
100383+ INIT_WORK(&p->wq, vunmap_work);
100384+
100385+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100386+ p2 = &per_cpu(stack_deferred, i);
100387+ init_llist_head(&p2->list.list);
100388+ INIT_WORK(&p2->wq, unmap_work);
100389+#endif
100390 }
100391
100392 /* Import existing vmlist entries. */
100393@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100394 struct vm_struct *area;
100395
100396 BUG_ON(in_interrupt());
100397+
100398+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100399+ if (flags & VM_KERNEXEC) {
100400+ if (start != VMALLOC_START || end != VMALLOC_END)
100401+ return NULL;
100402+ start = (unsigned long)MODULES_EXEC_VADDR;
100403+ end = (unsigned long)MODULES_EXEC_END;
100404+ }
100405+#endif
100406+
100407 if (flags & VM_IOREMAP)
100408 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100409
100410@@ -1511,13 +1616,37 @@ EXPORT_SYMBOL(vfree);
100411 */
100412 void vunmap(const void *addr)
100413 {
100414- BUG_ON(in_interrupt());
100415- might_sleep();
100416- if (addr)
100417+ if (!addr)
100418+ return;
100419+
100420+ if (unlikely(in_interrupt())) {
100421+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
100422+ if (llist_add((struct llist_node *)addr, &p->list))
100423+ schedule_work(&p->wq);
100424+ } else {
100425+ might_sleep();
100426 __vunmap(addr, 0);
100427+ }
100428 }
100429 EXPORT_SYMBOL(vunmap);
100430
100431+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100432+void unmap_process_stacks(struct task_struct *task)
100433+{
100434+ if (unlikely(in_interrupt())) {
100435+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
100436+ struct stack_deferred_llist *list = task->stack;
100437+ list->stack = task->stack;
100438+ list->lowmem_stack = task->lowmem_stack;
100439+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100440+ schedule_work(&p->wq);
100441+ } else {
100442+ __vunmap(task->stack, 0);
100443+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100444+ }
100445+}
100446+#endif
100447+
100448 /**
100449 * vmap - map an array of pages into virtually contiguous space
100450 * @pages: array of page pointers
100451@@ -1538,6 +1667,11 @@ void *vmap(struct page **pages, unsigned int count,
100452 if (count > totalram_pages)
100453 return NULL;
100454
100455+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100456+ if (!(pgprot_val(prot) & _PAGE_NX))
100457+ flags |= VM_KERNEXEC;
100458+#endif
100459+
100460 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100461 __builtin_return_address(0));
100462 if (!area)
100463@@ -1640,6 +1774,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100464 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100465 goto fail;
100466
100467+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100468+ if (!(pgprot_val(prot) & _PAGE_NX))
100469+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100470+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100471+ else
100472+#endif
100473+
100474 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100475 start, end, node, gfp_mask, caller);
100476 if (!area)
100477@@ -1816,10 +1957,9 @@ EXPORT_SYMBOL(vzalloc_node);
100478 * For tight control over page level allocator and protection flags
100479 * use __vmalloc() instead.
100480 */
100481-
100482 void *vmalloc_exec(unsigned long size)
100483 {
100484- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100485+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100486 NUMA_NO_NODE, __builtin_return_address(0));
100487 }
100488
100489@@ -2126,6 +2266,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100490 {
100491 struct vm_struct *area;
100492
100493+ BUG_ON(vma->vm_mirror);
100494+
100495 size = PAGE_ALIGN(size);
100496
100497 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100498@@ -2608,7 +2750,11 @@ static int s_show(struct seq_file *m, void *p)
100499 v->addr, v->addr + v->size, v->size);
100500
100501 if (v->caller)
100502+#ifdef CONFIG_GRKERNSEC_HIDESYM
100503+ seq_printf(m, " %pK", v->caller);
100504+#else
100505 seq_printf(m, " %pS", v->caller);
100506+#endif
100507
100508 if (v->nr_pages)
100509 seq_printf(m, " pages=%d", v->nr_pages);
100510diff --git a/mm/vmstat.c b/mm/vmstat.c
100511index 1284f89..2e895e31 100644
100512--- a/mm/vmstat.c
100513+++ b/mm/vmstat.c
100514@@ -24,6 +24,7 @@
100515 #include <linux/mm_inline.h>
100516 #include <linux/page_ext.h>
100517 #include <linux/page_owner.h>
100518+#include <linux/grsecurity.h>
100519
100520 #include "internal.h"
100521
100522@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
100523 *
100524 * vm_stat contains the global counters
100525 */
100526-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100527+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100528 EXPORT_SYMBOL(vm_stat);
100529
100530 #ifdef CONFIG_SMP
100531@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
100532
100533 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100534 if (diff[i]) {
100535- atomic_long_add(diff[i], &vm_stat[i]);
100536+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100537 changes++;
100538 }
100539 return changes;
100540@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
100541 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100542 if (v) {
100543
100544- atomic_long_add(v, &zone->vm_stat[i]);
100545+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100546 global_diff[i] += v;
100547 #ifdef CONFIG_NUMA
100548 /* 3 seconds idle till flush */
100549@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
100550
100551 v = p->vm_stat_diff[i];
100552 p->vm_stat_diff[i] = 0;
100553- atomic_long_add(v, &zone->vm_stat[i]);
100554+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100555 global_diff[i] += v;
100556 }
100557 }
100558@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100559 if (pset->vm_stat_diff[i]) {
100560 int v = pset->vm_stat_diff[i];
100561 pset->vm_stat_diff[i] = 0;
100562- atomic_long_add(v, &zone->vm_stat[i]);
100563- atomic_long_add(v, &vm_stat[i]);
100564+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100565+ atomic_long_add_unchecked(v, &vm_stat[i]);
100566 }
100567 }
100568 #endif
100569@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100570 stat_items_size += sizeof(struct vm_event_state);
100571 #endif
100572
100573- v = kmalloc(stat_items_size, GFP_KERNEL);
100574+ v = kzalloc(stat_items_size, GFP_KERNEL);
100575 m->private = v;
100576 if (!v)
100577 return ERR_PTR(-ENOMEM);
100578+
100579+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100580+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100581+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100582+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100583+ && !in_group_p(grsec_proc_gid)
100584+#endif
100585+ )
100586+ return (unsigned long *)m->private + *pos;
100587+#endif
100588+#endif
100589+
100590 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100591 v[i] = global_page_state(i);
100592 v += NR_VM_ZONE_STAT_ITEMS;
100593@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
100594 cpu_notifier_register_done();
100595 #endif
100596 #ifdef CONFIG_PROC_FS
100597- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100598- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100599- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100600- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100601+ {
100602+ mode_t gr_mode = S_IRUGO;
100603+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100604+ gr_mode = S_IRUSR;
100605+#endif
100606+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100607+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100608+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100609+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100610+ }
100611 #endif
100612 return 0;
100613 }
100614diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100615index 64c6bed..b79a5de 100644
100616--- a/net/8021q/vlan.c
100617+++ b/net/8021q/vlan.c
100618@@ -481,7 +481,7 @@ out:
100619 return NOTIFY_DONE;
100620 }
100621
100622-static struct notifier_block vlan_notifier_block __read_mostly = {
100623+static struct notifier_block vlan_notifier_block = {
100624 .notifier_call = vlan_device_event,
100625 };
100626
100627@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100628 err = -EPERM;
100629 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100630 break;
100631- if ((args.u.name_type >= 0) &&
100632- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100633+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100634 struct vlan_net *vn;
100635
100636 vn = net_generic(net, vlan_net_id);
100637diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
100638index 8ac8a5c..991defc 100644
100639--- a/net/8021q/vlan_netlink.c
100640+++ b/net/8021q/vlan_netlink.c
100641@@ -238,7 +238,7 @@ nla_put_failure:
100642 return -EMSGSIZE;
100643 }
100644
100645-struct rtnl_link_ops vlan_link_ops __read_mostly = {
100646+struct rtnl_link_ops vlan_link_ops = {
100647 .kind = "vlan",
100648 .maxtype = IFLA_VLAN_MAX,
100649 .policy = vlan_policy,
100650diff --git a/net/9p/client.c b/net/9p/client.c
100651index e86a9bea..e91f70e 100644
100652--- a/net/9p/client.c
100653+++ b/net/9p/client.c
100654@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100655 len - inline_len);
100656 } else {
100657 err = copy_from_user(ename + inline_len,
100658- uidata, len - inline_len);
100659+ (char __force_user *)uidata, len - inline_len);
100660 if (err) {
100661 err = -EFAULT;
100662 goto out_err;
100663@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100664 kernel_buf = 1;
100665 indata = data;
100666 } else
100667- indata = (__force char *)udata;
100668+ indata = (__force_kernel char *)udata;
100669 /*
100670 * response header len is 11
100671 * PDU Header(7) + IO Size (4)
100672@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100673 kernel_buf = 1;
100674 odata = data;
100675 } else
100676- odata = (char *)udata;
100677+ odata = (char __force_kernel *)udata;
100678 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100679 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100680 fid->fid, offset, rsize);
100681diff --git a/net/9p/mod.c b/net/9p/mod.c
100682index 6ab36ae..6f1841b 100644
100683--- a/net/9p/mod.c
100684+++ b/net/9p/mod.c
100685@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100686 void v9fs_register_trans(struct p9_trans_module *m)
100687 {
100688 spin_lock(&v9fs_trans_lock);
100689- list_add_tail(&m->list, &v9fs_trans_list);
100690+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100691 spin_unlock(&v9fs_trans_lock);
100692 }
100693 EXPORT_SYMBOL(v9fs_register_trans);
100694@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100695 void v9fs_unregister_trans(struct p9_trans_module *m)
100696 {
100697 spin_lock(&v9fs_trans_lock);
100698- list_del_init(&m->list);
100699+ pax_list_del_init((struct list_head *)&m->list);
100700 spin_unlock(&v9fs_trans_lock);
100701 }
100702 EXPORT_SYMBOL(v9fs_unregister_trans);
100703diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100704index 80d08f6..de63fd1 100644
100705--- a/net/9p/trans_fd.c
100706+++ b/net/9p/trans_fd.c
100707@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100708 oldfs = get_fs();
100709 set_fs(get_ds());
100710 /* The cast to a user pointer is valid due to the set_fs() */
100711- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100712+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100713 set_fs(oldfs);
100714
100715 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100716diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100717index af46bc4..f9adfcd 100644
100718--- a/net/appletalk/atalk_proc.c
100719+++ b/net/appletalk/atalk_proc.c
100720@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100721 struct proc_dir_entry *p;
100722 int rc = -ENOMEM;
100723
100724- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100725+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100726 if (!atalk_proc_dir)
100727 goto out;
100728
100729diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100730index 876fbe8..8bbea9f 100644
100731--- a/net/atm/atm_misc.c
100732+++ b/net/atm/atm_misc.c
100733@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100734 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100735 return 1;
100736 atm_return(vcc, truesize);
100737- atomic_inc(&vcc->stats->rx_drop);
100738+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100739 return 0;
100740 }
100741 EXPORT_SYMBOL(atm_charge);
100742@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100743 }
100744 }
100745 atm_return(vcc, guess);
100746- atomic_inc(&vcc->stats->rx_drop);
100747+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100748 return NULL;
100749 }
100750 EXPORT_SYMBOL(atm_alloc_charge);
100751@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100752
100753 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100754 {
100755-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100756+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100757 __SONET_ITEMS
100758 #undef __HANDLE_ITEM
100759 }
100760@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100761
100762 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100763 {
100764-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100765+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100766 __SONET_ITEMS
100767 #undef __HANDLE_ITEM
100768 }
100769diff --git a/net/atm/lec.c b/net/atm/lec.c
100770index 4b98f89..5a2f6cb 100644
100771--- a/net/atm/lec.c
100772+++ b/net/atm/lec.c
100773@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100774 }
100775
100776 static struct lane2_ops lane2_ops = {
100777- lane2_resolve, /* resolve, spec 3.1.3 */
100778- lane2_associate_req, /* associate_req, spec 3.1.4 */
100779- NULL /* associate indicator, spec 3.1.5 */
100780+ .resolve = lane2_resolve,
100781+ .associate_req = lane2_associate_req,
100782+ .associate_indicator = NULL
100783 };
100784
100785 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100786diff --git a/net/atm/lec.h b/net/atm/lec.h
100787index 4149db1..f2ab682 100644
100788--- a/net/atm/lec.h
100789+++ b/net/atm/lec.h
100790@@ -48,7 +48,7 @@ struct lane2_ops {
100791 const u8 *tlvs, u32 sizeoftlvs);
100792 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100793 const u8 *tlvs, u32 sizeoftlvs);
100794-};
100795+} __no_const;
100796
100797 /*
100798 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100799diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100800index d1b2d9a..d549f7f 100644
100801--- a/net/atm/mpoa_caches.c
100802+++ b/net/atm/mpoa_caches.c
100803@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100804
100805
100806 static struct in_cache_ops ingress_ops = {
100807- in_cache_add_entry, /* add_entry */
100808- in_cache_get, /* get */
100809- in_cache_get_with_mask, /* get_with_mask */
100810- in_cache_get_by_vcc, /* get_by_vcc */
100811- in_cache_put, /* put */
100812- in_cache_remove_entry, /* remove_entry */
100813- cache_hit, /* cache_hit */
100814- clear_count_and_expired, /* clear_count */
100815- check_resolving_entries, /* check_resolving */
100816- refresh_entries, /* refresh */
100817- in_destroy_cache /* destroy_cache */
100818+ .add_entry = in_cache_add_entry,
100819+ .get = in_cache_get,
100820+ .get_with_mask = in_cache_get_with_mask,
100821+ .get_by_vcc = in_cache_get_by_vcc,
100822+ .put = in_cache_put,
100823+ .remove_entry = in_cache_remove_entry,
100824+ .cache_hit = cache_hit,
100825+ .clear_count = clear_count_and_expired,
100826+ .check_resolving = check_resolving_entries,
100827+ .refresh = refresh_entries,
100828+ .destroy_cache = in_destroy_cache
100829 };
100830
100831 static struct eg_cache_ops egress_ops = {
100832- eg_cache_add_entry, /* add_entry */
100833- eg_cache_get_by_cache_id, /* get_by_cache_id */
100834- eg_cache_get_by_tag, /* get_by_tag */
100835- eg_cache_get_by_vcc, /* get_by_vcc */
100836- eg_cache_get_by_src_ip, /* get_by_src_ip */
100837- eg_cache_put, /* put */
100838- eg_cache_remove_entry, /* remove_entry */
100839- update_eg_cache_entry, /* update */
100840- clear_expired, /* clear_expired */
100841- eg_destroy_cache /* destroy_cache */
100842+ .add_entry = eg_cache_add_entry,
100843+ .get_by_cache_id = eg_cache_get_by_cache_id,
100844+ .get_by_tag = eg_cache_get_by_tag,
100845+ .get_by_vcc = eg_cache_get_by_vcc,
100846+ .get_by_src_ip = eg_cache_get_by_src_ip,
100847+ .put = eg_cache_put,
100848+ .remove_entry = eg_cache_remove_entry,
100849+ .update = update_eg_cache_entry,
100850+ .clear_expired = clear_expired,
100851+ .destroy_cache = eg_destroy_cache
100852 };
100853
100854
100855diff --git a/net/atm/proc.c b/net/atm/proc.c
100856index bbb6461..cf04016 100644
100857--- a/net/atm/proc.c
100858+++ b/net/atm/proc.c
100859@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100860 const struct k_atm_aal_stats *stats)
100861 {
100862 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100863- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100864- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100865- atomic_read(&stats->rx_drop));
100866+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100867+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100868+ atomic_read_unchecked(&stats->rx_drop));
100869 }
100870
100871 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100872diff --git a/net/atm/resources.c b/net/atm/resources.c
100873index 0447d5d..3cf4728 100644
100874--- a/net/atm/resources.c
100875+++ b/net/atm/resources.c
100876@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100877 static void copy_aal_stats(struct k_atm_aal_stats *from,
100878 struct atm_aal_stats *to)
100879 {
100880-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100881+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100882 __AAL_STAT_ITEMS
100883 #undef __HANDLE_ITEM
100884 }
100885@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100886 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100887 struct atm_aal_stats *to)
100888 {
100889-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100890+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100891 __AAL_STAT_ITEMS
100892 #undef __HANDLE_ITEM
100893 }
100894diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100895index 919a5ce..cc6b444 100644
100896--- a/net/ax25/sysctl_net_ax25.c
100897+++ b/net/ax25/sysctl_net_ax25.c
100898@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100899 {
100900 char path[sizeof("net/ax25/") + IFNAMSIZ];
100901 int k;
100902- struct ctl_table *table;
100903+ ctl_table_no_const *table;
100904
100905 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
100906 if (!table)
100907diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
100908index 1e80539..676c37a 100644
100909--- a/net/batman-adv/bat_iv_ogm.c
100910+++ b/net/batman-adv/bat_iv_ogm.c
100911@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
100912
100913 /* randomize initial seqno to avoid collision */
100914 get_random_bytes(&random_seqno, sizeof(random_seqno));
100915- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100916+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100917
100918 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
100919 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
100920@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
100921 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
100922
100923 /* change sequence number to network order */
100924- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
100925+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
100926 batadv_ogm_packet->seqno = htonl(seqno);
100927- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
100928+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
100929
100930 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
100931
100932@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
100933 return;
100934
100935 /* could be changed by schedule_own_packet() */
100936- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
100937+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
100938
100939 if (ogm_packet->flags & BATADV_DIRECTLINK)
100940 has_directlink_flag = true;
100941diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
100942index 00f9e14..e1c7203 100644
100943--- a/net/batman-adv/fragmentation.c
100944+++ b/net/batman-adv/fragmentation.c
100945@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
100946 frag_header.packet_type = BATADV_UNICAST_FRAG;
100947 frag_header.version = BATADV_COMPAT_VERSION;
100948 frag_header.ttl = BATADV_TTL;
100949- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
100950+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
100951 frag_header.reserved = 0;
100952 frag_header.no = 0;
100953 frag_header.total_size = htons(skb->len);
100954diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
100955index 5467955..75ad4e3 100644
100956--- a/net/batman-adv/soft-interface.c
100957+++ b/net/batman-adv/soft-interface.c
100958@@ -296,7 +296,7 @@ send:
100959 primary_if->net_dev->dev_addr);
100960
100961 /* set broadcast sequence number */
100962- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
100963+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
100964 bcast_packet->seqno = htonl(seqno);
100965
100966 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
100967@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100968 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
100969
100970 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
100971- atomic_set(&bat_priv->bcast_seqno, 1);
100972+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
100973 atomic_set(&bat_priv->tt.vn, 0);
100974 atomic_set(&bat_priv->tt.local_changes, 0);
100975 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
100976@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100977
100978 /* randomize initial seqno to avoid collision */
100979 get_random_bytes(&random_seqno, sizeof(random_seqno));
100980- atomic_set(&bat_priv->frag_seqno, random_seqno);
100981+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
100982
100983 bat_priv->primary_if = NULL;
100984 bat_priv->num_ifaces = 0;
100985@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
100986 return 0;
100987 }
100988
100989-struct rtnl_link_ops batadv_link_ops __read_mostly = {
100990+struct rtnl_link_ops batadv_link_ops = {
100991 .kind = "batadv",
100992 .priv_size = sizeof(struct batadv_priv),
100993 .setup = batadv_softif_init_early,
100994diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
100995index 8854c05..ee5d5497 100644
100996--- a/net/batman-adv/types.h
100997+++ b/net/batman-adv/types.h
100998@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
100999 struct batadv_hard_iface_bat_iv {
101000 unsigned char *ogm_buff;
101001 int ogm_buff_len;
101002- atomic_t ogm_seqno;
101003+ atomic_unchecked_t ogm_seqno;
101004 };
101005
101006 /**
101007@@ -768,7 +768,7 @@ struct batadv_priv {
101008 atomic_t bonding;
101009 atomic_t fragmentation;
101010 atomic_t packet_size_max;
101011- atomic_t frag_seqno;
101012+ atomic_unchecked_t frag_seqno;
101013 #ifdef CONFIG_BATMAN_ADV_BLA
101014 atomic_t bridge_loop_avoidance;
101015 #endif
101016@@ -787,7 +787,7 @@ struct batadv_priv {
101017 #endif
101018 uint32_t isolation_mark;
101019 uint32_t isolation_mark_mask;
101020- atomic_t bcast_seqno;
101021+ atomic_unchecked_t bcast_seqno;
101022 atomic_t bcast_queue_left;
101023 atomic_t batman_queue_left;
101024 char num_ifaces;
101025diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
101026index 2c245fd..dccf543 100644
101027--- a/net/bluetooth/hci_sock.c
101028+++ b/net/bluetooth/hci_sock.c
101029@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
101030 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
101031 }
101032
101033- len = min_t(unsigned int, len, sizeof(uf));
101034+ len = min((size_t)len, sizeof(uf));
101035 if (copy_from_user(&uf, optval, len)) {
101036 err = -EFAULT;
101037 break;
101038diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
101039index d04dc00..d25d576 100644
101040--- a/net/bluetooth/l2cap_core.c
101041+++ b/net/bluetooth/l2cap_core.c
101042@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
101043 break;
101044
101045 case L2CAP_CONF_RFC:
101046- if (olen == sizeof(rfc))
101047- memcpy(&rfc, (void *)val, olen);
101048+ if (olen != sizeof(rfc))
101049+ break;
101050+
101051+ memcpy(&rfc, (void *)val, olen);
101052
101053 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
101054 rfc.mode != chan->mode)
101055diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
101056index f65caf4..c07110c 100644
101057--- a/net/bluetooth/l2cap_sock.c
101058+++ b/net/bluetooth/l2cap_sock.c
101059@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101060 struct sock *sk = sock->sk;
101061 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
101062 struct l2cap_options opts;
101063- int len, err = 0;
101064+ int err = 0;
101065+ size_t len = optlen;
101066 u32 opt;
101067
101068 BT_DBG("sk %p", sk);
101069@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101070 opts.max_tx = chan->max_tx;
101071 opts.txwin_size = chan->tx_win;
101072
101073- len = min_t(unsigned int, sizeof(opts), optlen);
101074+ len = min(sizeof(opts), len);
101075 if (copy_from_user((char *) &opts, optval, len)) {
101076 err = -EFAULT;
101077 break;
101078@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101079 struct bt_security sec;
101080 struct bt_power pwr;
101081 struct l2cap_conn *conn;
101082- int len, err = 0;
101083+ int err = 0;
101084+ size_t len = optlen;
101085 u32 opt;
101086
101087 BT_DBG("sk %p", sk);
101088@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101089
101090 sec.level = BT_SECURITY_LOW;
101091
101092- len = min_t(unsigned int, sizeof(sec), optlen);
101093+ len = min(sizeof(sec), len);
101094 if (copy_from_user((char *) &sec, optval, len)) {
101095 err = -EFAULT;
101096 break;
101097@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101098
101099 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101100
101101- len = min_t(unsigned int, sizeof(pwr), optlen);
101102+ len = min(sizeof(pwr), len);
101103 if (copy_from_user((char *) &pwr, optval, len)) {
101104 err = -EFAULT;
101105 break;
101106diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101107index 2348176..b9b6cf2 100644
101108--- a/net/bluetooth/rfcomm/sock.c
101109+++ b/net/bluetooth/rfcomm/sock.c
101110@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101111 struct sock *sk = sock->sk;
101112 struct bt_security sec;
101113 int err = 0;
101114- size_t len;
101115+ size_t len = optlen;
101116 u32 opt;
101117
101118 BT_DBG("sk %p", sk);
101119@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101120
101121 sec.level = BT_SECURITY_LOW;
101122
101123- len = min_t(unsigned int, sizeof(sec), optlen);
101124+ len = min(sizeof(sec), len);
101125 if (copy_from_user((char *) &sec, optval, len)) {
101126 err = -EFAULT;
101127 break;
101128diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101129index 8e385a0..a5bdd8e 100644
101130--- a/net/bluetooth/rfcomm/tty.c
101131+++ b/net/bluetooth/rfcomm/tty.c
101132@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101133 BT_DBG("tty %p id %d", tty, tty->index);
101134
101135 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101136- dev->channel, dev->port.count);
101137+ dev->channel, atomic_read(&dev->port.count));
101138
101139 err = tty_port_open(&dev->port, tty, filp);
101140 if (err)
101141@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101142 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101143
101144 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101145- dev->port.count);
101146+ atomic_read(&dev->port.count));
101147
101148 tty_port_close(&dev->port, tty, filp);
101149 }
101150diff --git a/net/bridge/br.c b/net/bridge/br.c
101151index 44425af..4ee730e 100644
101152--- a/net/bridge/br.c
101153+++ b/net/bridge/br.c
101154@@ -147,6 +147,8 @@ static int __init br_init(void)
101155 {
101156 int err;
101157
101158+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
101159+
101160 err = stp_proto_register(&br_stp_proto);
101161 if (err < 0) {
101162 pr_err("bridge: can't register sap for STP\n");
101163diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
101164index 9f5eb55..45ab9c5 100644
101165--- a/net/bridge/br_netlink.c
101166+++ b/net/bridge/br_netlink.c
101167@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
101168 .get_link_af_size = br_get_link_af_size,
101169 };
101170
101171-struct rtnl_link_ops br_link_ops __read_mostly = {
101172+struct rtnl_link_ops br_link_ops = {
101173 .kind = "bridge",
101174 .priv_size = sizeof(struct net_bridge),
101175 .setup = br_dev_setup,
101176diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101177index d9a8c05..8dadc6c6 100644
101178--- a/net/bridge/netfilter/ebtables.c
101179+++ b/net/bridge/netfilter/ebtables.c
101180@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101181 tmp.valid_hooks = t->table->valid_hooks;
101182 }
101183 mutex_unlock(&ebt_mutex);
101184- if (copy_to_user(user, &tmp, *len) != 0) {
101185+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101186 BUGPRINT("c2u Didn't work\n");
101187 ret = -EFAULT;
101188 break;
101189@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101190 goto out;
101191 tmp.valid_hooks = t->valid_hooks;
101192
101193- if (copy_to_user(user, &tmp, *len) != 0) {
101194+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101195 ret = -EFAULT;
101196 break;
101197 }
101198@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101199 tmp.entries_size = t->table->entries_size;
101200 tmp.valid_hooks = t->table->valid_hooks;
101201
101202- if (copy_to_user(user, &tmp, *len) != 0) {
101203+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101204 ret = -EFAULT;
101205 break;
101206 }
101207diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101208index f5afda1..dcf770a 100644
101209--- a/net/caif/cfctrl.c
101210+++ b/net/caif/cfctrl.c
101211@@ -10,6 +10,7 @@
101212 #include <linux/spinlock.h>
101213 #include <linux/slab.h>
101214 #include <linux/pkt_sched.h>
101215+#include <linux/sched.h>
101216 #include <net/caif/caif_layer.h>
101217 #include <net/caif/cfpkt.h>
101218 #include <net/caif/cfctrl.h>
101219@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101220 memset(&dev_info, 0, sizeof(dev_info));
101221 dev_info.id = 0xff;
101222 cfsrvl_init(&this->serv, 0, &dev_info, false);
101223- atomic_set(&this->req_seq_no, 1);
101224- atomic_set(&this->rsp_seq_no, 1);
101225+ atomic_set_unchecked(&this->req_seq_no, 1);
101226+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101227 this->serv.layer.receive = cfctrl_recv;
101228 sprintf(this->serv.layer.name, "ctrl");
101229 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101230@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101231 struct cfctrl_request_info *req)
101232 {
101233 spin_lock_bh(&ctrl->info_list_lock);
101234- atomic_inc(&ctrl->req_seq_no);
101235- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101236+ atomic_inc_unchecked(&ctrl->req_seq_no);
101237+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101238 list_add_tail(&req->list, &ctrl->list);
101239 spin_unlock_bh(&ctrl->info_list_lock);
101240 }
101241@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101242 if (p != first)
101243 pr_warn("Requests are not received in order\n");
101244
101245- atomic_set(&ctrl->rsp_seq_no,
101246+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101247 p->sequence_no);
101248 list_del(&p->list);
101249 goto out;
101250diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101251index 67a4a36..8d28068 100644
101252--- a/net/caif/chnl_net.c
101253+++ b/net/caif/chnl_net.c
101254@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101255 };
101256
101257
101258-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101259+static struct rtnl_link_ops ipcaif_link_ops = {
101260 .kind = "caif",
101261 .priv_size = sizeof(struct chnl_net),
101262 .setup = ipcaif_net_setup,
101263diff --git a/net/can/af_can.c b/net/can/af_can.c
101264index 66e0804..da61b8f 100644
101265--- a/net/can/af_can.c
101266+++ b/net/can/af_can.c
101267@@ -881,7 +881,7 @@ static const struct net_proto_family can_family_ops = {
101268 };
101269
101270 /* notifier block for netdevice event */
101271-static struct notifier_block can_netdev_notifier __read_mostly = {
101272+static struct notifier_block can_netdev_notifier = {
101273 .notifier_call = can_notifier,
101274 };
101275
101276diff --git a/net/can/bcm.c b/net/can/bcm.c
101277index ee9ffd9..dfdf3d4 100644
101278--- a/net/can/bcm.c
101279+++ b/net/can/bcm.c
101280@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101281 }
101282
101283 /* create /proc/net/can-bcm directory */
101284- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101285+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101286 return 0;
101287 }
101288
101289diff --git a/net/can/gw.c b/net/can/gw.c
101290index 295f62e..0c3b09e 100644
101291--- a/net/can/gw.c
101292+++ b/net/can/gw.c
101293@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101294 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101295
101296 static HLIST_HEAD(cgw_list);
101297-static struct notifier_block notifier;
101298
101299 static struct kmem_cache *cgw_cache __read_mostly;
101300
101301@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101302 return err;
101303 }
101304
101305+static struct notifier_block notifier = {
101306+ .notifier_call = cgw_notifier
101307+};
101308+
101309 static __init int cgw_module_init(void)
101310 {
101311 /* sanitize given module parameter */
101312@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101313 return -ENOMEM;
101314
101315 /* set notifier */
101316- notifier.notifier_call = cgw_notifier;
101317 register_netdevice_notifier(&notifier);
101318
101319 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101320diff --git a/net/can/proc.c b/net/can/proc.c
101321index 1a19b98..df2b4ec 100644
101322--- a/net/can/proc.c
101323+++ b/net/can/proc.c
101324@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101325 void can_init_proc(void)
101326 {
101327 /* create /proc/net/can directory */
101328- can_dir = proc_mkdir("can", init_net.proc_net);
101329+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101330
101331 if (!can_dir) {
101332 printk(KERN_INFO "can: failed to create /proc/net/can . "
101333diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101334index 33a2f20..371bd09 100644
101335--- a/net/ceph/messenger.c
101336+++ b/net/ceph/messenger.c
101337@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101338 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101339
101340 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101341-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101342+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101343
101344 static struct page *zero_page; /* used in certain error cases */
101345
101346@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101347 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101348 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101349
101350- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101351+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101352 s = addr_str[i];
101353
101354 switch (ss->ss_family) {
101355diff --git a/net/compat.c b/net/compat.c
101356index 3236b41..7d8687f 100644
101357--- a/net/compat.c
101358+++ b/net/compat.c
101359@@ -93,20 +93,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
101360
101361 #define CMSG_COMPAT_FIRSTHDR(msg) \
101362 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101363- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101364+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101365 (struct compat_cmsghdr __user *)NULL)
101366
101367 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101368 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101369 (ucmlen) <= (unsigned long) \
101370 ((mhdr)->msg_controllen - \
101371- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101372+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101373
101374 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101375 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101376 {
101377 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101378- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101379+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101380 msg->msg_controllen)
101381 return NULL;
101382 return (struct compat_cmsghdr __user *)ptr;
101383@@ -196,7 +196,7 @@ Efault:
101384
101385 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101386 {
101387- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101388+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101389 struct compat_cmsghdr cmhdr;
101390 struct compat_timeval ctv;
101391 struct compat_timespec cts[3];
101392@@ -252,7 +252,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101393
101394 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101395 {
101396- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101397+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101398 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101399 int fdnum = scm->fp->count;
101400 struct file **fp = scm->fp->fp;
101401@@ -340,7 +340,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101402 return -EFAULT;
101403 old_fs = get_fs();
101404 set_fs(KERNEL_DS);
101405- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101406+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101407 set_fs(old_fs);
101408
101409 return err;
101410@@ -401,7 +401,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101411 len = sizeof(ktime);
101412 old_fs = get_fs();
101413 set_fs(KERNEL_DS);
101414- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101415+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101416 set_fs(old_fs);
101417
101418 if (!err) {
101419@@ -544,7 +544,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101420 case MCAST_JOIN_GROUP:
101421 case MCAST_LEAVE_GROUP:
101422 {
101423- struct compat_group_req __user *gr32 = (void *)optval;
101424+ struct compat_group_req __user *gr32 = (void __user *)optval;
101425 struct group_req __user *kgr =
101426 compat_alloc_user_space(sizeof(struct group_req));
101427 u32 interface;
101428@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101429 case MCAST_BLOCK_SOURCE:
101430 case MCAST_UNBLOCK_SOURCE:
101431 {
101432- struct compat_group_source_req __user *gsr32 = (void *)optval;
101433+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101434 struct group_source_req __user *kgsr = compat_alloc_user_space(
101435 sizeof(struct group_source_req));
101436 u32 interface;
101437@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101438 }
101439 case MCAST_MSFILTER:
101440 {
101441- struct compat_group_filter __user *gf32 = (void *)optval;
101442+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101443 struct group_filter __user *kgf;
101444 u32 interface, fmode, numsrc;
101445
101446@@ -624,7 +624,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101447 char __user *optval, int __user *optlen,
101448 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101449 {
101450- struct compat_group_filter __user *gf32 = (void *)optval;
101451+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101452 struct group_filter __user *kgf;
101453 int __user *koptlen;
101454 u32 interface, fmode, numsrc;
101455@@ -777,7 +777,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101456
101457 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101458 return -EINVAL;
101459- if (copy_from_user(a, args, nas[call]))
101460+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101461 return -EFAULT;
101462 a0 = a[0];
101463 a1 = a[1];
101464diff --git a/net/core/datagram.c b/net/core/datagram.c
101465index df493d6..1145766 100644
101466--- a/net/core/datagram.c
101467+++ b/net/core/datagram.c
101468@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101469 }
101470
101471 kfree_skb(skb);
101472- atomic_inc(&sk->sk_drops);
101473+ atomic_inc_unchecked(&sk->sk_drops);
101474 sk_mem_reclaim_partial(sk);
101475
101476 return err;
101477diff --git a/net/core/dev.c b/net/core/dev.c
101478index 7fe8292..133045e 100644
101479--- a/net/core/dev.c
101480+++ b/net/core/dev.c
101481@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101482 {
101483 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101484 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101485- atomic_long_inc(&dev->rx_dropped);
101486+ atomic_long_inc_unchecked(&dev->rx_dropped);
101487 kfree_skb(skb);
101488 return NET_RX_DROP;
101489 }
101490 }
101491
101492 if (unlikely(!is_skb_forwardable(dev, skb))) {
101493- atomic_long_inc(&dev->rx_dropped);
101494+ atomic_long_inc_unchecked(&dev->rx_dropped);
101495 kfree_skb(skb);
101496 return NET_RX_DROP;
101497 }
101498@@ -2958,7 +2958,7 @@ recursion_alert:
101499 drop:
101500 rcu_read_unlock_bh();
101501
101502- atomic_long_inc(&dev->tx_dropped);
101503+ atomic_long_inc_unchecked(&dev->tx_dropped);
101504 kfree_skb_list(skb);
101505 return rc;
101506 out:
101507@@ -3301,7 +3301,7 @@ enqueue:
101508
101509 local_irq_restore(flags);
101510
101511- atomic_long_inc(&skb->dev->rx_dropped);
101512+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101513 kfree_skb(skb);
101514 return NET_RX_DROP;
101515 }
101516@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
101517 }
101518 EXPORT_SYMBOL(netif_rx_ni);
101519
101520-static void net_tx_action(struct softirq_action *h)
101521+static __latent_entropy void net_tx_action(void)
101522 {
101523 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101524
101525@@ -3711,7 +3711,7 @@ ncls:
101526 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101527 } else {
101528 drop:
101529- atomic_long_inc(&skb->dev->rx_dropped);
101530+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101531 kfree_skb(skb);
101532 /* Jamal, now you will not able to escape explaining
101533 * me how you were going to use this. :-)
101534@@ -4599,7 +4599,7 @@ out_unlock:
101535 return work;
101536 }
101537
101538-static void net_rx_action(struct softirq_action *h)
101539+static __latent_entropy void net_rx_action(void)
101540 {
101541 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101542 unsigned long time_limit = jiffies + 2;
101543@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101544 } else {
101545 netdev_stats_to_stats64(storage, &dev->stats);
101546 }
101547- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101548- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101549+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101550+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101551 return storage;
101552 }
101553 EXPORT_SYMBOL(dev_get_stats);
101554diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101555index b94b1d2..da3ed7c 100644
101556--- a/net/core/dev_ioctl.c
101557+++ b/net/core/dev_ioctl.c
101558@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
101559 no_module = !dev;
101560 if (no_module && capable(CAP_NET_ADMIN))
101561 no_module = request_module("netdev-%s", name);
101562- if (no_module && capable(CAP_SYS_MODULE))
101563+ if (no_module && capable(CAP_SYS_MODULE)) {
101564+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101565+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101566+#else
101567 request_module("%s", name);
101568+#endif
101569+ }
101570 }
101571 EXPORT_SYMBOL(dev_load);
101572
101573diff --git a/net/core/filter.c b/net/core/filter.c
101574index ec9baea..dd6195d 100644
101575--- a/net/core/filter.c
101576+++ b/net/core/filter.c
101577@@ -533,7 +533,11 @@ do_pass:
101578
101579 /* Unkown instruction. */
101580 default:
101581- goto err;
101582+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101583+ fp->code, fp->jt, fp->jf, fp->k);
101584+ kfree(addrs);
101585+ BUG();
101586+ return -EINVAL;
101587 }
101588
101589 insn++;
101590@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101591 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101592 int pc, ret = 0;
101593
101594- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101595+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101596
101597 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101598 if (!masks)
101599@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101600 if (!fp)
101601 return -ENOMEM;
101602
101603- memcpy(fp->insns, fprog->filter, fsize);
101604+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101605
101606 fp->len = fprog->len;
101607 /* Since unattached filters are not copied back to user
101608diff --git a/net/core/flow.c b/net/core/flow.c
101609index a0348fd..340f65d 100644
101610--- a/net/core/flow.c
101611+++ b/net/core/flow.c
101612@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101613 static int flow_entry_valid(struct flow_cache_entry *fle,
101614 struct netns_xfrm *xfrm)
101615 {
101616- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101617+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101618 return 0;
101619 if (fle->object && !fle->object->ops->check(fle->object))
101620 return 0;
101621@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101622 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101623 fcp->hash_count++;
101624 }
101625- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101626+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101627 flo = fle->object;
101628 if (!flo)
101629 goto ret_object;
101630@@ -263,7 +263,7 @@ nocache:
101631 }
101632 flo = resolver(net, key, family, dir, flo, ctx);
101633 if (fle) {
101634- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101635+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101636 if (!IS_ERR(flo))
101637 fle->object = flo;
101638 else
101639@@ -379,7 +379,7 @@ done:
101640 static void flow_cache_flush_task(struct work_struct *work)
101641 {
101642 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
101643- flow_cache_gc_work);
101644+ flow_cache_flush_work);
101645 struct net *net = container_of(xfrm, struct net, xfrm);
101646
101647 flow_cache_flush(net);
101648diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101649index 8d614c9..55752ea 100644
101650--- a/net/core/neighbour.c
101651+++ b/net/core/neighbour.c
101652@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101653 void __user *buffer, size_t *lenp, loff_t *ppos)
101654 {
101655 int size, ret;
101656- struct ctl_table tmp = *ctl;
101657+ ctl_table_no_const tmp = *ctl;
101658
101659 tmp.extra1 = &zero;
101660 tmp.extra2 = &unres_qlen_max;
101661@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101662 void __user *buffer,
101663 size_t *lenp, loff_t *ppos)
101664 {
101665- struct ctl_table tmp = *ctl;
101666+ ctl_table_no_const tmp = *ctl;
101667 int ret;
101668
101669 tmp.extra1 = &zero;
101670diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101671index 2bf8329..2eb1423 100644
101672--- a/net/core/net-procfs.c
101673+++ b/net/core/net-procfs.c
101674@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101675 struct rtnl_link_stats64 temp;
101676 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101677
101678- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101679+ if (gr_proc_is_restricted())
101680+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101681+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101682+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101683+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101684+ else
101685+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101686 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101687 dev->name, stats->rx_bytes, stats->rx_packets,
101688 stats->rx_errors,
101689@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101690 return 0;
101691 }
101692
101693-static const struct seq_operations dev_seq_ops = {
101694+const struct seq_operations dev_seq_ops = {
101695 .start = dev_seq_start,
101696 .next = dev_seq_next,
101697 .stop = dev_seq_stop,
101698@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101699
101700 static int softnet_seq_open(struct inode *inode, struct file *file)
101701 {
101702- return seq_open(file, &softnet_seq_ops);
101703+ return seq_open_restrict(file, &softnet_seq_ops);
101704 }
101705
101706 static const struct file_operations softnet_seq_fops = {
101707@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101708 else
101709 seq_printf(seq, "%04x", ntohs(pt->type));
101710
101711+#ifdef CONFIG_GRKERNSEC_HIDESYM
101712+ seq_printf(seq, " %-8s %pf\n",
101713+ pt->dev ? pt->dev->name : "", NULL);
101714+#else
101715 seq_printf(seq, " %-8s %pf\n",
101716 pt->dev ? pt->dev->name : "", pt->func);
101717+#endif
101718 }
101719
101720 return 0;
101721diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101722index 9993412..2a4672b 100644
101723--- a/net/core/net-sysfs.c
101724+++ b/net/core/net-sysfs.c
101725@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101726 {
101727 struct net_device *netdev = to_net_dev(dev);
101728 return sprintf(buf, fmt_dec,
101729- atomic_read(&netdev->carrier_changes));
101730+ atomic_read_unchecked(&netdev->carrier_changes));
101731 }
101732 static DEVICE_ATTR_RO(carrier_changes);
101733
101734diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101735index ce780c7..6d296b3 100644
101736--- a/net/core/net_namespace.c
101737+++ b/net/core/net_namespace.c
101738@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
101739 int error;
101740 LIST_HEAD(net_exit_list);
101741
101742- list_add_tail(&ops->list, list);
101743+ pax_list_add_tail((struct list_head *)&ops->list, list);
101744 if (ops->init || (ops->id && ops->size)) {
101745 for_each_net(net) {
101746 error = ops_init(ops, net);
101747@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
101748
101749 out_undo:
101750 /* If I have an error cleanup all namespaces I initialized */
101751- list_del(&ops->list);
101752+ pax_list_del((struct list_head *)&ops->list);
101753 ops_exit_list(ops, &net_exit_list);
101754 ops_free_list(ops, &net_exit_list);
101755 return error;
101756@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101757 struct net *net;
101758 LIST_HEAD(net_exit_list);
101759
101760- list_del(&ops->list);
101761+ pax_list_del((struct list_head *)&ops->list);
101762 for_each_net(net)
101763 list_add_tail(&net->exit_list, &net_exit_list);
101764 ops_exit_list(ops, &net_exit_list);
101765@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
101766 mutex_lock(&net_mutex);
101767 error = register_pernet_operations(&pernet_list, ops);
101768 if (!error && (first_device == &pernet_list))
101769- first_device = &ops->list;
101770+ first_device = (struct list_head *)&ops->list;
101771 mutex_unlock(&net_mutex);
101772 return error;
101773 }
101774diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101775index e0ad5d1..04fa7f7 100644
101776--- a/net/core/netpoll.c
101777+++ b/net/core/netpoll.c
101778@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101779 struct udphdr *udph;
101780 struct iphdr *iph;
101781 struct ethhdr *eth;
101782- static atomic_t ip_ident;
101783+ static atomic_unchecked_t ip_ident;
101784 struct ipv6hdr *ip6h;
101785
101786 udp_len = len + sizeof(*udph);
101787@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101788 put_unaligned(0x45, (unsigned char *)iph);
101789 iph->tos = 0;
101790 put_unaligned(htons(ip_len), &(iph->tot_len));
101791- iph->id = htons(atomic_inc_return(&ip_ident));
101792+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101793 iph->frag_off = 0;
101794 iph->ttl = 64;
101795 iph->protocol = IPPROTO_UDP;
101796diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101797index da934fc..d82fded 100644
101798--- a/net/core/pktgen.c
101799+++ b/net/core/pktgen.c
101800@@ -3752,7 +3752,7 @@ static int __net_init pg_net_init(struct net *net)
101801 pn->net = net;
101802 INIT_LIST_HEAD(&pn->pktgen_threads);
101803 pn->pktgen_exiting = false;
101804- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101805+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101806 if (!pn->proc_dir) {
101807 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101808 return -ENODEV;
101809diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101810index 446cbaf..255153c 100644
101811--- a/net/core/rtnetlink.c
101812+++ b/net/core/rtnetlink.c
101813@@ -60,7 +60,7 @@ struct rtnl_link {
101814 rtnl_doit_func doit;
101815 rtnl_dumpit_func dumpit;
101816 rtnl_calcit_func calcit;
101817-};
101818+} __no_const;
101819
101820 static DEFINE_MUTEX(rtnl_mutex);
101821
101822@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101823 * to use the ops for creating device. So do not
101824 * fill up dellink as well. That disables rtnl_dellink.
101825 */
101826- if (ops->setup && !ops->dellink)
101827- ops->dellink = unregister_netdevice_queue;
101828+ if (ops->setup && !ops->dellink) {
101829+ pax_open_kernel();
101830+ *(void **)&ops->dellink = unregister_netdevice_queue;
101831+ pax_close_kernel();
101832+ }
101833
101834- list_add_tail(&ops->list, &link_ops);
101835+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101836 return 0;
101837 }
101838 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101839@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101840 for_each_net(net) {
101841 __rtnl_kill_links(net, ops);
101842 }
101843- list_del(&ops->list);
101844+ pax_list_del((struct list_head *)&ops->list);
101845 }
101846 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101847
101848@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101849 (dev->ifalias &&
101850 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101851 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101852- atomic_read(&dev->carrier_changes)))
101853+ atomic_read_unchecked(&dev->carrier_changes)))
101854 goto nla_put_failure;
101855
101856 if (1) {
101857@@ -2102,6 +2105,10 @@ replay:
101858 if (IS_ERR(dest_net))
101859 return PTR_ERR(dest_net);
101860
101861+ err = -EPERM;
101862+ if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
101863+ goto out;
101864+
101865 dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
101866 if (IS_ERR(dev)) {
101867 err = PTR_ERR(dev);
101868diff --git a/net/core/scm.c b/net/core/scm.c
101869index 3b6899b..cf36238 100644
101870--- a/net/core/scm.c
101871+++ b/net/core/scm.c
101872@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
101873 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101874 {
101875 struct cmsghdr __user *cm
101876- = (__force struct cmsghdr __user *)msg->msg_control;
101877+ = (struct cmsghdr __force_user *)msg->msg_control;
101878 struct cmsghdr cmhdr;
101879 int cmlen = CMSG_LEN(len);
101880 int err;
101881@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101882 err = -EFAULT;
101883 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101884 goto out;
101885- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101886+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101887 goto out;
101888 cmlen = CMSG_SPACE(len);
101889 if (msg->msg_controllen < cmlen)
101890@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
101891 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101892 {
101893 struct cmsghdr __user *cm
101894- = (__force struct cmsghdr __user*)msg->msg_control;
101895+ = (struct cmsghdr __force_user *)msg->msg_control;
101896
101897 int fdmax = 0;
101898 int fdnum = scm->fp->count;
101899@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101900 if (fdnum < fdmax)
101901 fdmax = fdnum;
101902
101903- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101904+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101905 i++, cmfptr++)
101906 {
101907 struct socket *sock;
101908diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101909index 395c15b..7f39726 100644
101910--- a/net/core/skbuff.c
101911+++ b/net/core/skbuff.c
101912@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
101913 __wsum skb_checksum(const struct sk_buff *skb, int offset,
101914 int len, __wsum csum)
101915 {
101916- const struct skb_checksum_ops ops = {
101917+ static const struct skb_checksum_ops ops = {
101918 .update = csum_partial_ext,
101919 .combine = csum_block_add_ext,
101920 };
101921@@ -3363,12 +3363,14 @@ void __init skb_init(void)
101922 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
101923 sizeof(struct sk_buff),
101924 0,
101925- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101926+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101927+ SLAB_NO_SANITIZE,
101928 NULL);
101929 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
101930 sizeof(struct sk_buff_fclones),
101931 0,
101932- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101933+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101934+ SLAB_NO_SANITIZE,
101935 NULL);
101936 }
101937
101938diff --git a/net/core/sock.c b/net/core/sock.c
101939index 1c7a33d..a3817e2 100644
101940--- a/net/core/sock.c
101941+++ b/net/core/sock.c
101942@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101943 struct sk_buff_head *list = &sk->sk_receive_queue;
101944
101945 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
101946- atomic_inc(&sk->sk_drops);
101947+ atomic_inc_unchecked(&sk->sk_drops);
101948 trace_sock_rcvqueue_full(sk, skb);
101949 return -ENOMEM;
101950 }
101951@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101952 return err;
101953
101954 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
101955- atomic_inc(&sk->sk_drops);
101956+ atomic_inc_unchecked(&sk->sk_drops);
101957 return -ENOBUFS;
101958 }
101959
101960@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101961 skb_dst_force(skb);
101962
101963 spin_lock_irqsave(&list->lock, flags);
101964- skb->dropcount = atomic_read(&sk->sk_drops);
101965+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
101966 __skb_queue_tail(list, skb);
101967 spin_unlock_irqrestore(&list->lock, flags);
101968
101969@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101970 skb->dev = NULL;
101971
101972 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
101973- atomic_inc(&sk->sk_drops);
101974+ atomic_inc_unchecked(&sk->sk_drops);
101975 goto discard_and_relse;
101976 }
101977 if (nested)
101978@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101979 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
101980 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
101981 bh_unlock_sock(sk);
101982- atomic_inc(&sk->sk_drops);
101983+ atomic_inc_unchecked(&sk->sk_drops);
101984 goto discard_and_relse;
101985 }
101986
101987@@ -888,6 +888,7 @@ set_rcvbuf:
101988 }
101989 break;
101990
101991+#ifndef GRKERNSEC_BPF_HARDEN
101992 case SO_ATTACH_BPF:
101993 ret = -EINVAL;
101994 if (optlen == sizeof(u32)) {
101995@@ -900,7 +901,7 @@ set_rcvbuf:
101996 ret = sk_attach_bpf(ufd, sk);
101997 }
101998 break;
101999-
102000+#endif
102001 case SO_DETACH_FILTER:
102002 ret = sk_detach_filter(sk);
102003 break;
102004@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102005 struct timeval tm;
102006 } v;
102007
102008- int lv = sizeof(int);
102009- int len;
102010+ unsigned int lv = sizeof(int);
102011+ unsigned int len;
102012
102013 if (get_user(len, optlen))
102014 return -EFAULT;
102015- if (len < 0)
102016+ if (len > INT_MAX)
102017 return -EINVAL;
102018
102019 memset(&v, 0, sizeof(v));
102020@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102021
102022 case SO_PEERNAME:
102023 {
102024- char address[128];
102025+ char address[_K_SS_MAXSIZE];
102026
102027 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102028 return -ENOTCONN;
102029- if (lv < len)
102030+ if (lv < len || sizeof address < len)
102031 return -EINVAL;
102032 if (copy_to_user(optval, address, len))
102033 return -EFAULT;
102034@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102035
102036 if (len > lv)
102037 len = lv;
102038- if (copy_to_user(optval, &v, len))
102039+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102040 return -EFAULT;
102041 lenout:
102042 if (put_user(len, optlen))
102043@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102044 */
102045 smp_wmb();
102046 atomic_set(&sk->sk_refcnt, 1);
102047- atomic_set(&sk->sk_drops, 0);
102048+ atomic_set_unchecked(&sk->sk_drops, 0);
102049 }
102050 EXPORT_SYMBOL(sock_init_data);
102051
102052@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102053 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102054 int level, int type)
102055 {
102056+ struct sock_extended_err ee;
102057 struct sock_exterr_skb *serr;
102058 struct sk_buff *skb;
102059 int copied, err;
102060@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102061 sock_recv_timestamp(msg, sk, skb);
102062
102063 serr = SKB_EXT_ERR(skb);
102064- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102065+ ee = serr->ee;
102066+ put_cmsg(msg, level, type, sizeof ee, &ee);
102067
102068 msg->msg_flags |= MSG_ERRQUEUE;
102069 err = copied;
102070diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102071index ad704c7..ca48aff 100644
102072--- a/net/core/sock_diag.c
102073+++ b/net/core/sock_diag.c
102074@@ -9,26 +9,33 @@
102075 #include <linux/inet_diag.h>
102076 #include <linux/sock_diag.h>
102077
102078-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102079+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102080 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102081 static DEFINE_MUTEX(sock_diag_table_mutex);
102082
102083 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102084 {
102085+#ifndef CONFIG_GRKERNSEC_HIDESYM
102086 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102087 cookie[1] != INET_DIAG_NOCOOKIE) &&
102088 ((u32)(unsigned long)sk != cookie[0] ||
102089 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102090 return -ESTALE;
102091 else
102092+#endif
102093 return 0;
102094 }
102095 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102096
102097 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102098 {
102099+#ifdef CONFIG_GRKERNSEC_HIDESYM
102100+ cookie[0] = 0;
102101+ cookie[1] = 0;
102102+#else
102103 cookie[0] = (u32)(unsigned long)sk;
102104 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102105+#endif
102106 }
102107 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102108
102109@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102110 mutex_lock(&sock_diag_table_mutex);
102111 if (sock_diag_handlers[hndl->family])
102112 err = -EBUSY;
102113- else
102114+ else {
102115+ pax_open_kernel();
102116 sock_diag_handlers[hndl->family] = hndl;
102117+ pax_close_kernel();
102118+ }
102119 mutex_unlock(&sock_diag_table_mutex);
102120
102121 return err;
102122@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102123
102124 mutex_lock(&sock_diag_table_mutex);
102125 BUG_ON(sock_diag_handlers[family] != hnld);
102126+ pax_open_kernel();
102127 sock_diag_handlers[family] = NULL;
102128+ pax_close_kernel();
102129 mutex_unlock(&sock_diag_table_mutex);
102130 }
102131 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102132diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102133index 31baba2..c71485b 100644
102134--- a/net/core/sysctl_net_core.c
102135+++ b/net/core/sysctl_net_core.c
102136@@ -34,7 +34,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102137 {
102138 unsigned int orig_size, size;
102139 int ret, i;
102140- struct ctl_table tmp = {
102141+ ctl_table_no_const tmp = {
102142 .data = &size,
102143 .maxlen = sizeof(size),
102144 .mode = table->mode
102145@@ -202,7 +202,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102146 void __user *buffer, size_t *lenp, loff_t *ppos)
102147 {
102148 char id[IFNAMSIZ];
102149- struct ctl_table tbl = {
102150+ ctl_table_no_const tbl = {
102151 .data = id,
102152 .maxlen = IFNAMSIZ,
102153 };
102154@@ -220,7 +220,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102155 static int proc_do_rss_key(struct ctl_table *table, int write,
102156 void __user *buffer, size_t *lenp, loff_t *ppos)
102157 {
102158- struct ctl_table fake_table;
102159+ ctl_table_no_const fake_table;
102160 char buf[NETDEV_RSS_KEY_LEN * 3];
102161
102162 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102163@@ -284,7 +284,7 @@ static struct ctl_table net_core_table[] = {
102164 .mode = 0444,
102165 .proc_handler = proc_do_rss_key,
102166 },
102167-#ifdef CONFIG_BPF_JIT
102168+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102169 {
102170 .procname = "bpf_jit_enable",
102171 .data = &bpf_jit_enable,
102172@@ -400,13 +400,12 @@ static struct ctl_table netns_core_table[] = {
102173
102174 static __net_init int sysctl_core_net_init(struct net *net)
102175 {
102176- struct ctl_table *tbl;
102177+ ctl_table_no_const *tbl = NULL;
102178
102179 net->core.sysctl_somaxconn = SOMAXCONN;
102180
102181- tbl = netns_core_table;
102182 if (!net_eq(net, &init_net)) {
102183- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102184+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102185 if (tbl == NULL)
102186 goto err_dup;
102187
102188@@ -416,17 +415,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102189 if (net->user_ns != &init_user_ns) {
102190 tbl[0].procname = NULL;
102191 }
102192- }
102193-
102194- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102195+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102196+ } else
102197+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102198 if (net->core.sysctl_hdr == NULL)
102199 goto err_reg;
102200
102201 return 0;
102202
102203 err_reg:
102204- if (tbl != netns_core_table)
102205- kfree(tbl);
102206+ kfree(tbl);
102207 err_dup:
102208 return -ENOMEM;
102209 }
102210@@ -441,7 +439,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102211 kfree(tbl);
102212 }
102213
102214-static __net_initdata struct pernet_operations sysctl_core_ops = {
102215+static __net_initconst struct pernet_operations sysctl_core_ops = {
102216 .init = sysctl_core_net_init,
102217 .exit = sysctl_core_net_exit,
102218 };
102219diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102220index 8102286..a0c2755 100644
102221--- a/net/decnet/af_decnet.c
102222+++ b/net/decnet/af_decnet.c
102223@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102224 .sysctl_rmem = sysctl_decnet_rmem,
102225 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102226 .obj_size = sizeof(struct dn_sock),
102227+ .slab_flags = SLAB_USERCOPY,
102228 };
102229
102230 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102231diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102232index 4400da7..3429972 100644
102233--- a/net/decnet/dn_dev.c
102234+++ b/net/decnet/dn_dev.c
102235@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102236 .extra1 = &min_t3,
102237 .extra2 = &max_t3
102238 },
102239- {0}
102240+ { }
102241 },
102242 };
102243
102244diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102245index 5325b54..a0d4d69 100644
102246--- a/net/decnet/sysctl_net_decnet.c
102247+++ b/net/decnet/sysctl_net_decnet.c
102248@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102249
102250 if (len > *lenp) len = *lenp;
102251
102252- if (copy_to_user(buffer, addr, len))
102253+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102254 return -EFAULT;
102255
102256 *lenp = len;
102257@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102258
102259 if (len > *lenp) len = *lenp;
102260
102261- if (copy_to_user(buffer, devname, len))
102262+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102263 return -EFAULT;
102264
102265 *lenp = len;
102266diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102267index a2c7e4c..3dc9f67 100644
102268--- a/net/hsr/hsr_netlink.c
102269+++ b/net/hsr/hsr_netlink.c
102270@@ -102,7 +102,7 @@ nla_put_failure:
102271 return -EMSGSIZE;
102272 }
102273
102274-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102275+static struct rtnl_link_ops hsr_link_ops = {
102276 .kind = "hsr",
102277 .maxtype = IFLA_HSR_MAX,
102278 .policy = hsr_policy,
102279diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
102280index 27eaa65..7083217 100644
102281--- a/net/ieee802154/6lowpan_rtnl.c
102282+++ b/net/ieee802154/6lowpan_rtnl.c
102283@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102284 dev_put(real_dev);
102285 }
102286
102287-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102288+static struct rtnl_link_ops lowpan_link_ops = {
102289 .kind = "lowpan",
102290 .priv_size = sizeof(struct lowpan_dev_info),
102291 .setup = lowpan_setup,
102292diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102293index 9d980ed..7d01e12 100644
102294--- a/net/ieee802154/reassembly.c
102295+++ b/net/ieee802154/reassembly.c
102296@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102297
102298 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102299 {
102300- struct ctl_table *table;
102301+ ctl_table_no_const *table = NULL;
102302 struct ctl_table_header *hdr;
102303 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102304 net_ieee802154_lowpan(net);
102305
102306- table = lowpan_frags_ns_ctl_table;
102307 if (!net_eq(net, &init_net)) {
102308- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102309+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102310 GFP_KERNEL);
102311 if (table == NULL)
102312 goto err_alloc;
102313@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102314 /* Don't export sysctls to unprivileged users */
102315 if (net->user_ns != &init_user_ns)
102316 table[0].procname = NULL;
102317- }
102318-
102319- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102320+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102321+ } else
102322+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102323 if (hdr == NULL)
102324 goto err_reg;
102325
102326@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102327 return 0;
102328
102329 err_reg:
102330- if (!net_eq(net, &init_net))
102331- kfree(table);
102332+ kfree(table);
102333 err_alloc:
102334 return -ENOMEM;
102335 }
102336diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102337index a44773c..a6ae415 100644
102338--- a/net/ipv4/af_inet.c
102339+++ b/net/ipv4/af_inet.c
102340@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102341 return ip_recv_error(sk, msg, len, addr_len);
102342 #if IS_ENABLED(CONFIG_IPV6)
102343 if (sk->sk_family == AF_INET6)
102344- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102345+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102346 #endif
102347 return -EINVAL;
102348 }
102349diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102350index 214882e..ec032f6 100644
102351--- a/net/ipv4/devinet.c
102352+++ b/net/ipv4/devinet.c
102353@@ -69,7 +69,8 @@
102354
102355 static struct ipv4_devconf ipv4_devconf = {
102356 .data = {
102357- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102358+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102359+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102360 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102361 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102362 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102363@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102364
102365 static struct ipv4_devconf ipv4_devconf_dflt = {
102366 .data = {
102367- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102368+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102369+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102370 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102371 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102372 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102373@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102374 idx = 0;
102375 head = &net->dev_index_head[h];
102376 rcu_read_lock();
102377- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102378+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102379 net->dev_base_seq;
102380 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102381 if (idx < s_idx)
102382@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102383 idx = 0;
102384 head = &net->dev_index_head[h];
102385 rcu_read_lock();
102386- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102387+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102388 net->dev_base_seq;
102389 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102390 if (idx < s_idx)
102391@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102392 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102393 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102394
102395-static struct devinet_sysctl_table {
102396+static const struct devinet_sysctl_table {
102397 struct ctl_table_header *sysctl_header;
102398 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102399 } devinet_sysctl = {
102400@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102401 int err;
102402 struct ipv4_devconf *all, *dflt;
102403 #ifdef CONFIG_SYSCTL
102404- struct ctl_table *tbl = ctl_forward_entry;
102405+ ctl_table_no_const *tbl = NULL;
102406 struct ctl_table_header *forw_hdr;
102407 #endif
102408
102409@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102410 goto err_alloc_dflt;
102411
102412 #ifdef CONFIG_SYSCTL
102413- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102414+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102415 if (tbl == NULL)
102416 goto err_alloc_ctl;
102417
102418@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102419 goto err_reg_dflt;
102420
102421 err = -ENOMEM;
102422- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102423+ if (!net_eq(net, &init_net))
102424+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102425+ else
102426+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102427 if (forw_hdr == NULL)
102428 goto err_reg_ctl;
102429 net->ipv4.forw_hdr = forw_hdr;
102430@@ -2287,8 +2292,7 @@ err_reg_ctl:
102431 err_reg_dflt:
102432 __devinet_sysctl_unregister(all);
102433 err_reg_all:
102434- if (tbl != ctl_forward_entry)
102435- kfree(tbl);
102436+ kfree(tbl);
102437 err_alloc_ctl:
102438 #endif
102439 if (dflt != &ipv4_devconf_dflt)
102440diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102441index 23104a3..9f5570b 100644
102442--- a/net/ipv4/fib_frontend.c
102443+++ b/net/ipv4/fib_frontend.c
102444@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102445 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102446 fib_sync_up(dev);
102447 #endif
102448- atomic_inc(&net->ipv4.dev_addr_genid);
102449+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102450 rt_cache_flush(dev_net(dev));
102451 break;
102452 case NETDEV_DOWN:
102453 fib_del_ifaddr(ifa, NULL);
102454- atomic_inc(&net->ipv4.dev_addr_genid);
102455+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102456 if (ifa->ifa_dev->ifa_list == NULL) {
102457 /* Last address was deleted from this interface.
102458 * Disable IP.
102459@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102460 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102461 fib_sync_up(dev);
102462 #endif
102463- atomic_inc(&net->ipv4.dev_addr_genid);
102464+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102465 rt_cache_flush(net);
102466 break;
102467 case NETDEV_DOWN:
102468diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102469index f99f41b..1879da9 100644
102470--- a/net/ipv4/fib_semantics.c
102471+++ b/net/ipv4/fib_semantics.c
102472@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102473 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102474 nh->nh_gw,
102475 nh->nh_parent->fib_scope);
102476- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102477+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102478
102479 return nh->nh_saddr;
102480 }
102481diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102482index 9111a4e..3576905 100644
102483--- a/net/ipv4/inet_hashtables.c
102484+++ b/net/ipv4/inet_hashtables.c
102485@@ -18,6 +18,7 @@
102486 #include <linux/sched.h>
102487 #include <linux/slab.h>
102488 #include <linux/wait.h>
102489+#include <linux/security.h>
102490
102491 #include <net/inet_connection_sock.h>
102492 #include <net/inet_hashtables.h>
102493@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102494 return inet_ehashfn(net, laddr, lport, faddr, fport);
102495 }
102496
102497+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102498+
102499 /*
102500 * Allocate and initialize a new local port bind bucket.
102501 * The bindhash mutex for snum's hash chain must be held here.
102502@@ -554,6 +557,8 @@ ok:
102503 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102504 spin_unlock(&head->lock);
102505
102506+ gr_update_task_in_ip_table(inet_sk(sk));
102507+
102508 if (tw) {
102509 inet_twsk_deschedule(tw, death_row);
102510 while (twrefcnt) {
102511diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102512index 241afd7..31b95d5 100644
102513--- a/net/ipv4/inetpeer.c
102514+++ b/net/ipv4/inetpeer.c
102515@@ -461,7 +461,7 @@ relookup:
102516 if (p) {
102517 p->daddr = *daddr;
102518 atomic_set(&p->refcnt, 1);
102519- atomic_set(&p->rid, 0);
102520+ atomic_set_unchecked(&p->rid, 0);
102521 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102522 p->rate_tokens = 0;
102523 /* 60*HZ is arbitrary, but chosen enough high so that the first
102524diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102525index e5b6d0d..187c8b0 100644
102526--- a/net/ipv4/ip_fragment.c
102527+++ b/net/ipv4/ip_fragment.c
102528@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
102529 return 0;
102530
102531 start = qp->rid;
102532- end = atomic_inc_return(&peer->rid);
102533+ end = atomic_inc_return_unchecked(&peer->rid);
102534 qp->rid = end;
102535
102536 rc = qp->q.fragments && (end - start) > max;
102537@@ -745,12 +745,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102538
102539 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102540 {
102541- struct ctl_table *table;
102542+ ctl_table_no_const *table = NULL;
102543 struct ctl_table_header *hdr;
102544
102545- table = ip4_frags_ns_ctl_table;
102546 if (!net_eq(net, &init_net)) {
102547- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102548+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102549 if (table == NULL)
102550 goto err_alloc;
102551
102552@@ -764,9 +763,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102553 /* Don't export sysctls to unprivileged users */
102554 if (net->user_ns != &init_user_ns)
102555 table[0].procname = NULL;
102556- }
102557+ hdr = register_net_sysctl(net, "net/ipv4", table);
102558+ } else
102559+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102560
102561- hdr = register_net_sysctl(net, "net/ipv4", table);
102562 if (hdr == NULL)
102563 goto err_reg;
102564
102565@@ -774,8 +774,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102566 return 0;
102567
102568 err_reg:
102569- if (!net_eq(net, &init_net))
102570- kfree(table);
102571+ kfree(table);
102572 err_alloc:
102573 return -ENOMEM;
102574 }
102575diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102576index 4f4bf5b..2c936fe 100644
102577--- a/net/ipv4/ip_gre.c
102578+++ b/net/ipv4/ip_gre.c
102579@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102580 module_param(log_ecn_error, bool, 0644);
102581 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102582
102583-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102584+static struct rtnl_link_ops ipgre_link_ops;
102585 static int ipgre_tunnel_init(struct net_device *dev);
102586
102587 static int ipgre_net_id __read_mostly;
102588@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102589 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
102590 };
102591
102592-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102593+static struct rtnl_link_ops ipgre_link_ops = {
102594 .kind = "gre",
102595 .maxtype = IFLA_GRE_MAX,
102596 .policy = ipgre_policy,
102597@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102598 .fill_info = ipgre_fill_info,
102599 };
102600
102601-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102602+static struct rtnl_link_ops ipgre_tap_ops = {
102603 .kind = "gretap",
102604 .maxtype = IFLA_GRE_MAX,
102605 .policy = ipgre_policy,
102606diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102607index 3d4da2c..40f9c29 100644
102608--- a/net/ipv4/ip_input.c
102609+++ b/net/ipv4/ip_input.c
102610@@ -147,6 +147,10 @@
102611 #include <linux/mroute.h>
102612 #include <linux/netlink.h>
102613
102614+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102615+extern int grsec_enable_blackhole;
102616+#endif
102617+
102618 /*
102619 * Process Router Attention IP option (RFC 2113)
102620 */
102621@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102622 if (!raw) {
102623 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102624 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102625+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102626+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102627+#endif
102628 icmp_send(skb, ICMP_DEST_UNREACH,
102629 ICMP_PROT_UNREACH, 0);
102630 }
102631diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102632index 6b85adb..cd7e5d3 100644
102633--- a/net/ipv4/ip_sockglue.c
102634+++ b/net/ipv4/ip_sockglue.c
102635@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102636 len = min_t(unsigned int, len, opt->optlen);
102637 if (put_user(len, optlen))
102638 return -EFAULT;
102639- if (copy_to_user(optval, opt->__data, len))
102640+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102641+ copy_to_user(optval, opt->__data, len))
102642 return -EFAULT;
102643 return 0;
102644 }
102645@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102646 if (sk->sk_type != SOCK_STREAM)
102647 return -ENOPROTOOPT;
102648
102649- msg.msg_control = (__force void *) optval;
102650+ msg.msg_control = (__force_kernel void *) optval;
102651 msg.msg_controllen = len;
102652 msg.msg_flags = flags;
102653
102654diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102655index 1a7e979..fd05aa4 100644
102656--- a/net/ipv4/ip_vti.c
102657+++ b/net/ipv4/ip_vti.c
102658@@ -45,7 +45,7 @@
102659 #include <net/net_namespace.h>
102660 #include <net/netns/generic.h>
102661
102662-static struct rtnl_link_ops vti_link_ops __read_mostly;
102663+static struct rtnl_link_ops vti_link_ops;
102664
102665 static int vti_net_id __read_mostly;
102666 static int vti_tunnel_init(struct net_device *dev);
102667@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102668 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102669 };
102670
102671-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102672+static struct rtnl_link_ops vti_link_ops = {
102673 .kind = "vti",
102674 .maxtype = IFLA_VTI_MAX,
102675 .policy = vti_policy,
102676diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102677index 7fa18bc..bea16af 100644
102678--- a/net/ipv4/ipconfig.c
102679+++ b/net/ipv4/ipconfig.c
102680@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102681
102682 mm_segment_t oldfs = get_fs();
102683 set_fs(get_ds());
102684- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102685+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102686 set_fs(oldfs);
102687 return res;
102688 }
102689@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102690
102691 mm_segment_t oldfs = get_fs();
102692 set_fs(get_ds());
102693- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102694+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102695 set_fs(oldfs);
102696 return res;
102697 }
102698@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102699
102700 mm_segment_t oldfs = get_fs();
102701 set_fs(get_ds());
102702- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102703+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102704 set_fs(oldfs);
102705 return res;
102706 }
102707diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102708index 40403114..c35c647 100644
102709--- a/net/ipv4/ipip.c
102710+++ b/net/ipv4/ipip.c
102711@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102712 static int ipip_net_id __read_mostly;
102713
102714 static int ipip_tunnel_init(struct net_device *dev);
102715-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102716+static struct rtnl_link_ops ipip_link_ops;
102717
102718 static int ipip_err(struct sk_buff *skb, u32 info)
102719 {
102720@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102721 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
102722 };
102723
102724-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102725+static struct rtnl_link_ops ipip_link_ops = {
102726 .kind = "ipip",
102727 .maxtype = IFLA_IPTUN_MAX,
102728 .policy = ipip_policy,
102729diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102730index f95b6f9..2ee2097 100644
102731--- a/net/ipv4/netfilter/arp_tables.c
102732+++ b/net/ipv4/netfilter/arp_tables.c
102733@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102734 #endif
102735
102736 static int get_info(struct net *net, void __user *user,
102737- const int *len, int compat)
102738+ int len, int compat)
102739 {
102740 char name[XT_TABLE_MAXNAMELEN];
102741 struct xt_table *t;
102742 int ret;
102743
102744- if (*len != sizeof(struct arpt_getinfo)) {
102745- duprintf("length %u != %Zu\n", *len,
102746+ if (len != sizeof(struct arpt_getinfo)) {
102747+ duprintf("length %u != %Zu\n", len,
102748 sizeof(struct arpt_getinfo));
102749 return -EINVAL;
102750 }
102751@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102752 info.size = private->size;
102753 strcpy(info.name, name);
102754
102755- if (copy_to_user(user, &info, *len) != 0)
102756+ if (copy_to_user(user, &info, len) != 0)
102757 ret = -EFAULT;
102758 else
102759 ret = 0;
102760@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102761
102762 switch (cmd) {
102763 case ARPT_SO_GET_INFO:
102764- ret = get_info(sock_net(sk), user, len, 1);
102765+ ret = get_info(sock_net(sk), user, *len, 1);
102766 break;
102767 case ARPT_SO_GET_ENTRIES:
102768 ret = compat_get_entries(sock_net(sk), user, len);
102769@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102770
102771 switch (cmd) {
102772 case ARPT_SO_GET_INFO:
102773- ret = get_info(sock_net(sk), user, len, 0);
102774+ ret = get_info(sock_net(sk), user, *len, 0);
102775 break;
102776
102777 case ARPT_SO_GET_ENTRIES:
102778diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102779index 99e810f..3711b81 100644
102780--- a/net/ipv4/netfilter/ip_tables.c
102781+++ b/net/ipv4/netfilter/ip_tables.c
102782@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102783 #endif
102784
102785 static int get_info(struct net *net, void __user *user,
102786- const int *len, int compat)
102787+ int len, int compat)
102788 {
102789 char name[XT_TABLE_MAXNAMELEN];
102790 struct xt_table *t;
102791 int ret;
102792
102793- if (*len != sizeof(struct ipt_getinfo)) {
102794- duprintf("length %u != %zu\n", *len,
102795+ if (len != sizeof(struct ipt_getinfo)) {
102796+ duprintf("length %u != %zu\n", len,
102797 sizeof(struct ipt_getinfo));
102798 return -EINVAL;
102799 }
102800@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102801 info.size = private->size;
102802 strcpy(info.name, name);
102803
102804- if (copy_to_user(user, &info, *len) != 0)
102805+ if (copy_to_user(user, &info, len) != 0)
102806 ret = -EFAULT;
102807 else
102808 ret = 0;
102809@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102810
102811 switch (cmd) {
102812 case IPT_SO_GET_INFO:
102813- ret = get_info(sock_net(sk), user, len, 1);
102814+ ret = get_info(sock_net(sk), user, *len, 1);
102815 break;
102816 case IPT_SO_GET_ENTRIES:
102817 ret = compat_get_entries(sock_net(sk), user, len);
102818@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102819
102820 switch (cmd) {
102821 case IPT_SO_GET_INFO:
102822- ret = get_info(sock_net(sk), user, len, 0);
102823+ ret = get_info(sock_net(sk), user, *len, 0);
102824 break;
102825
102826 case IPT_SO_GET_ENTRIES:
102827diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102828index e90f83a..3e6acca 100644
102829--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102830+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102831@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102832 spin_lock_init(&cn->lock);
102833
102834 #ifdef CONFIG_PROC_FS
102835- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102836+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102837 if (!cn->procdir) {
102838 pr_err("Unable to proc dir entry\n");
102839 return -ENOMEM;
102840diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102841index 2a3720f..d32b565 100644
102842--- a/net/ipv4/ping.c
102843+++ b/net/ipv4/ping.c
102844@@ -59,7 +59,7 @@ struct ping_table {
102845 };
102846
102847 static struct ping_table ping_table;
102848-struct pingv6_ops pingv6_ops;
102849+struct pingv6_ops *pingv6_ops;
102850 EXPORT_SYMBOL_GPL(pingv6_ops);
102851
102852 static u16 ping_port_rover;
102853@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
102854 kgid_t low, high;
102855 int ret = 0;
102856
102857+ if (sk->sk_family == AF_INET6)
102858+ sk->sk_ipv6only = 1;
102859+
102860 inet_get_ping_group_range_net(net, &low, &high);
102861 if (gid_lte(low, group) && gid_lte(group, high))
102862 return 0;
102863@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102864 if (addr_len < sizeof(*addr))
102865 return -EINVAL;
102866
102867+ if (addr->sin_family != AF_INET &&
102868+ !(addr->sin_family == AF_UNSPEC &&
102869+ addr->sin_addr.s_addr == htonl(INADDR_ANY)))
102870+ return -EAFNOSUPPORT;
102871+
102872 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
102873 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
102874
102875@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102876 return -EINVAL;
102877
102878 if (addr->sin6_family != AF_INET6)
102879- return -EINVAL;
102880+ return -EAFNOSUPPORT;
102881
102882 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
102883 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
102884@@ -350,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102885 return -ENODEV;
102886 }
102887 }
102888- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102889+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102890 scoped);
102891 rcu_read_unlock();
102892
102893@@ -558,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102894 }
102895 #if IS_ENABLED(CONFIG_IPV6)
102896 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102897- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102898+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102899 #endif
102900 }
102901
102902@@ -576,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102903 info, (u8 *)icmph);
102904 #if IS_ENABLED(CONFIG_IPV6)
102905 } else if (family == AF_INET6) {
102906- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102907+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102908 info, (u8 *)icmph);
102909 #endif
102910 }
102911@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
102912 if (msg->msg_namelen < sizeof(*usin))
102913 return -EINVAL;
102914 if (usin->sin_family != AF_INET)
102915- return -EINVAL;
102916+ return -EAFNOSUPPORT;
102917 daddr = usin->sin_addr.s_addr;
102918 /* no remote port */
102919 } else {
102920@@ -911,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102921 }
102922
102923 if (inet6_sk(sk)->rxopt.all)
102924- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102925+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102926 if (skb->protocol == htons(ETH_P_IPV6) &&
102927 inet6_sk(sk)->rxopt.all)
102928- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102929+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102930 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102931 ip_cmsg_recv(msg, skb);
102932 #endif
102933@@ -1109,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
102934 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
102935 0, sock_i_ino(sp),
102936 atomic_read(&sp->sk_refcnt), sp,
102937- atomic_read(&sp->sk_drops));
102938+ atomic_read_unchecked(&sp->sk_drops));
102939 }
102940
102941 static int ping_v4_seq_show(struct seq_file *seq, void *v)
102942diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
102943index 0bb68df..59405fc 100644
102944--- a/net/ipv4/raw.c
102945+++ b/net/ipv4/raw.c
102946@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
102947 int raw_rcv(struct sock *sk, struct sk_buff *skb)
102948 {
102949 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
102950- atomic_inc(&sk->sk_drops);
102951+ atomic_inc_unchecked(&sk->sk_drops);
102952 kfree_skb(skb);
102953 return NET_RX_DROP;
102954 }
102955@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
102956
102957 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
102958 {
102959+ struct icmp_filter filter;
102960+
102961 if (optlen > sizeof(struct icmp_filter))
102962 optlen = sizeof(struct icmp_filter);
102963- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
102964+ if (copy_from_user(&filter, optval, optlen))
102965 return -EFAULT;
102966+ raw_sk(sk)->filter = filter;
102967 return 0;
102968 }
102969
102970 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
102971 {
102972 int len, ret = -EFAULT;
102973+ struct icmp_filter filter;
102974
102975 if (get_user(len, optlen))
102976 goto out;
102977@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
102978 if (len > sizeof(struct icmp_filter))
102979 len = sizeof(struct icmp_filter);
102980 ret = -EFAULT;
102981- if (put_user(len, optlen) ||
102982- copy_to_user(optval, &raw_sk(sk)->filter, len))
102983+ filter = raw_sk(sk)->filter;
102984+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
102985 goto out;
102986 ret = 0;
102987 out: return ret;
102988@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
102989 0, 0L, 0,
102990 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
102991 0, sock_i_ino(sp),
102992- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
102993+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
102994 }
102995
102996 static int raw_seq_show(struct seq_file *seq, void *v)
102997diff --git a/net/ipv4/route.c b/net/ipv4/route.c
102998index 52e1f2b..e736cb4 100644
102999--- a/net/ipv4/route.c
103000+++ b/net/ipv4/route.c
103001@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
103002
103003 static int rt_cache_seq_open(struct inode *inode, struct file *file)
103004 {
103005- return seq_open(file, &rt_cache_seq_ops);
103006+ return seq_open_restrict(file, &rt_cache_seq_ops);
103007 }
103008
103009 static const struct file_operations rt_cache_seq_fops = {
103010@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
103011
103012 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
103013 {
103014- return seq_open(file, &rt_cpu_seq_ops);
103015+ return seq_open_restrict(file, &rt_cpu_seq_ops);
103016 }
103017
103018 static const struct file_operations rt_cpu_seq_fops = {
103019@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
103020
103021 static int rt_acct_proc_open(struct inode *inode, struct file *file)
103022 {
103023- return single_open(file, rt_acct_proc_show, NULL);
103024+ return single_open_restrict(file, rt_acct_proc_show, NULL);
103025 }
103026
103027 static const struct file_operations rt_acct_proc_fops = {
103028@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
103029
103030 #define IP_IDENTS_SZ 2048u
103031 struct ip_ident_bucket {
103032- atomic_t id;
103033+ atomic_unchecked_t id;
103034 u32 stamp32;
103035 };
103036
103037-static struct ip_ident_bucket *ip_idents __read_mostly;
103038+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103039
103040 /* In order to protect privacy, we add a perturbation to identifiers
103041 * if one generator is seldom used. This makes hard for an attacker
103042@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103043 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103044 delta = prandom_u32_max(now - old);
103045
103046- return atomic_add_return(segs + delta, &bucket->id) - segs;
103047+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103048 }
103049 EXPORT_SYMBOL(ip_idents_reserve);
103050
103051@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103052 .maxlen = sizeof(int),
103053 .mode = 0200,
103054 .proc_handler = ipv4_sysctl_rtcache_flush,
103055+ .extra1 = &init_net,
103056 },
103057 { },
103058 };
103059
103060 static __net_init int sysctl_route_net_init(struct net *net)
103061 {
103062- struct ctl_table *tbl;
103063+ ctl_table_no_const *tbl = NULL;
103064
103065- tbl = ipv4_route_flush_table;
103066 if (!net_eq(net, &init_net)) {
103067- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103068+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103069 if (tbl == NULL)
103070 goto err_dup;
103071
103072 /* Don't export sysctls to unprivileged users */
103073 if (net->user_ns != &init_user_ns)
103074 tbl[0].procname = NULL;
103075- }
103076- tbl[0].extra1 = net;
103077+ tbl[0].extra1 = net;
103078+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103079+ } else
103080+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103081
103082- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103083 if (net->ipv4.route_hdr == NULL)
103084 goto err_reg;
103085 return 0;
103086
103087 err_reg:
103088- if (tbl != ipv4_route_flush_table)
103089- kfree(tbl);
103090+ kfree(tbl);
103091 err_dup:
103092 return -ENOMEM;
103093 }
103094@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103095
103096 static __net_init int rt_genid_init(struct net *net)
103097 {
103098- atomic_set(&net->ipv4.rt_genid, 0);
103099- atomic_set(&net->fnhe_genid, 0);
103100+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103101+ atomic_set_unchecked(&net->fnhe_genid, 0);
103102 get_random_bytes(&net->ipv4.dev_addr_genid,
103103 sizeof(net->ipv4.dev_addr_genid));
103104 return 0;
103105@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
103106 {
103107 int rc = 0;
103108
103109- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103110- if (!ip_idents)
103111- panic("IP: failed to allocate ip_idents\n");
103112-
103113- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103114+ prandom_bytes(ip_idents, sizeof(ip_idents));
103115
103116 #ifdef CONFIG_IP_ROUTE_CLASSID
103117 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
103118diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103119index e0ee384..e2688d9 100644
103120--- a/net/ipv4/sysctl_net_ipv4.c
103121+++ b/net/ipv4/sysctl_net_ipv4.c
103122@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103123 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103124 int ret;
103125 int range[2];
103126- struct ctl_table tmp = {
103127+ ctl_table_no_const tmp = {
103128 .data = &range,
103129 .maxlen = sizeof(range),
103130 .mode = table->mode,
103131@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103132 int ret;
103133 gid_t urange[2];
103134 kgid_t low, high;
103135- struct ctl_table tmp = {
103136+ ctl_table_no_const tmp = {
103137 .data = &urange,
103138 .maxlen = sizeof(urange),
103139 .mode = table->mode,
103140@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103141 void __user *buffer, size_t *lenp, loff_t *ppos)
103142 {
103143 char val[TCP_CA_NAME_MAX];
103144- struct ctl_table tbl = {
103145+ ctl_table_no_const tbl = {
103146 .data = val,
103147 .maxlen = TCP_CA_NAME_MAX,
103148 };
103149@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103150 void __user *buffer, size_t *lenp,
103151 loff_t *ppos)
103152 {
103153- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103154+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103155 int ret;
103156
103157 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103158@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103159 void __user *buffer, size_t *lenp,
103160 loff_t *ppos)
103161 {
103162- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103163+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103164 int ret;
103165
103166 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103167@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103168 void __user *buffer, size_t *lenp,
103169 loff_t *ppos)
103170 {
103171- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103172+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103173 struct tcp_fastopen_context *ctxt;
103174 int ret;
103175 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103176@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
103177
103178 static __net_init int ipv4_sysctl_init_net(struct net *net)
103179 {
103180- struct ctl_table *table;
103181+ ctl_table_no_const *table = NULL;
103182
103183- table = ipv4_net_table;
103184 if (!net_eq(net, &init_net)) {
103185 int i;
103186
103187- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103188+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103189 if (table == NULL)
103190 goto err_alloc;
103191
103192@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103193 table[i].data += (void *)net - (void *)&init_net;
103194 }
103195
103196- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103197+ if (!net_eq(net, &init_net))
103198+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103199+ else
103200+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103201 if (net->ipv4.ipv4_hdr == NULL)
103202 goto err_reg;
103203
103204diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103205index 075ab4d..623bb9d 100644
103206--- a/net/ipv4/tcp_input.c
103207+++ b/net/ipv4/tcp_input.c
103208@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103209 * without any lock. We want to make sure compiler wont store
103210 * intermediate values in this location.
103211 */
103212- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103213+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103214 sk->sk_max_pacing_rate);
103215 }
103216
103217@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103218 * simplifies code)
103219 */
103220 static void
103221-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103222+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103223 struct sk_buff *head, struct sk_buff *tail,
103224 u32 start, u32 end)
103225 {
103226@@ -5506,6 +5506,7 @@ discard:
103227 tcp_paws_reject(&tp->rx_opt, 0))
103228 goto discard_and_undo;
103229
103230+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103231 if (th->syn) {
103232 /* We see SYN without ACK. It is attempt of
103233 * simultaneous connect with crossed SYNs.
103234@@ -5556,6 +5557,7 @@ discard:
103235 goto discard;
103236 #endif
103237 }
103238+#endif
103239 /* "fifth, if neither of the SYN or RST bits is set then
103240 * drop the segment and return."
103241 */
103242@@ -5602,7 +5604,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103243 goto discard;
103244
103245 if (th->syn) {
103246- if (th->fin)
103247+ if (th->fin || th->urg || th->psh)
103248 goto discard;
103249 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103250 return 1;
103251diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103252index d22f544..62f6787 100644
103253--- a/net/ipv4/tcp_ipv4.c
103254+++ b/net/ipv4/tcp_ipv4.c
103255@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103256 int sysctl_tcp_low_latency __read_mostly;
103257 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103258
103259+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103260+extern int grsec_enable_blackhole;
103261+#endif
103262+
103263 #ifdef CONFIG_TCP_MD5SIG
103264 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103265 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103266@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103267 return 0;
103268
103269 reset:
103270+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103271+ if (!grsec_enable_blackhole)
103272+#endif
103273 tcp_v4_send_reset(rsk, skb);
103274 discard:
103275 kfree_skb(skb);
103276@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103277 TCP_SKB_CB(skb)->sacked = 0;
103278
103279 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103280- if (!sk)
103281+ if (!sk) {
103282+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103283+ ret = 1;
103284+#endif
103285 goto no_tcp_socket;
103286-
103287+ }
103288 process:
103289- if (sk->sk_state == TCP_TIME_WAIT)
103290+ if (sk->sk_state == TCP_TIME_WAIT) {
103291+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103292+ ret = 2;
103293+#endif
103294 goto do_time_wait;
103295+ }
103296
103297 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103298 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103299@@ -1698,6 +1712,10 @@ csum_error:
103300 bad_packet:
103301 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103302 } else {
103303+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103304+ if (!grsec_enable_blackhole || (ret == 1 &&
103305+ (skb->dev->flags & IFF_LOOPBACK)))
103306+#endif
103307 tcp_v4_send_reset(NULL, skb);
103308 }
103309
103310diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103311index 63d2680..2db9d6b 100644
103312--- a/net/ipv4/tcp_minisocks.c
103313+++ b/net/ipv4/tcp_minisocks.c
103314@@ -27,6 +27,10 @@
103315 #include <net/inet_common.h>
103316 #include <net/xfrm.h>
103317
103318+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103319+extern int grsec_enable_blackhole;
103320+#endif
103321+
103322 int sysctl_tcp_syncookies __read_mostly = 1;
103323 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103324
103325@@ -739,7 +743,10 @@ embryonic_reset:
103326 * avoid becoming vulnerable to outside attack aiming at
103327 * resetting legit local connections.
103328 */
103329- req->rsk_ops->send_reset(sk, skb);
103330+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103331+ if (!grsec_enable_blackhole)
103332+#endif
103333+ req->rsk_ops->send_reset(sk, skb);
103334 } else if (fastopen) { /* received a valid RST pkt */
103335 reqsk_fastopen_remove(sk, req, true);
103336 tcp_reset(sk);
103337diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103338index ebf5ff5..4d1ff32 100644
103339--- a/net/ipv4/tcp_probe.c
103340+++ b/net/ipv4/tcp_probe.c
103341@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103342 if (cnt + width >= len)
103343 break;
103344
103345- if (copy_to_user(buf + cnt, tbuf, width))
103346+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103347 return -EFAULT;
103348 cnt += width;
103349 }
103350diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103351index 1829c7f..c0b3d52 100644
103352--- a/net/ipv4/tcp_timer.c
103353+++ b/net/ipv4/tcp_timer.c
103354@@ -22,6 +22,10 @@
103355 #include <linux/gfp.h>
103356 #include <net/tcp.h>
103357
103358+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103359+extern int grsec_lastack_retries;
103360+#endif
103361+
103362 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103363 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103364 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103365@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
103366 }
103367 }
103368
103369+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103370+ if ((sk->sk_state == TCP_LAST_ACK) &&
103371+ (grsec_lastack_retries > 0) &&
103372+ (grsec_lastack_retries < retry_until))
103373+ retry_until = grsec_lastack_retries;
103374+#endif
103375+
103376 if (retransmits_timed_out(sk, retry_until,
103377 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103378 /* Has it gone just too far? */
103379diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103380index 13b4dcf..b866a2a 100644
103381--- a/net/ipv4/udp.c
103382+++ b/net/ipv4/udp.c
103383@@ -87,6 +87,7 @@
103384 #include <linux/types.h>
103385 #include <linux/fcntl.h>
103386 #include <linux/module.h>
103387+#include <linux/security.h>
103388 #include <linux/socket.h>
103389 #include <linux/sockios.h>
103390 #include <linux/igmp.h>
103391@@ -114,6 +115,10 @@
103392 #include <net/busy_poll.h>
103393 #include "udp_impl.h"
103394
103395+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103396+extern int grsec_enable_blackhole;
103397+#endif
103398+
103399 struct udp_table udp_table __read_mostly;
103400 EXPORT_SYMBOL(udp_table);
103401
103402@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103403 return true;
103404 }
103405
103406+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103407+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103408+
103409 /*
103410 * This routine is called by the ICMP module when it gets some
103411 * sort of error condition. If err < 0 then the socket should
103412@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103413 dport = usin->sin_port;
103414 if (dport == 0)
103415 return -EINVAL;
103416+
103417+ err = gr_search_udp_sendmsg(sk, usin);
103418+ if (err)
103419+ return err;
103420 } else {
103421 if (sk->sk_state != TCP_ESTABLISHED)
103422 return -EDESTADDRREQ;
103423+
103424+ err = gr_search_udp_sendmsg(sk, NULL);
103425+ if (err)
103426+ return err;
103427+
103428 daddr = inet->inet_daddr;
103429 dport = inet->inet_dport;
103430 /* Open fast path for connected socket.
103431@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
103432 IS_UDPLITE(sk));
103433 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103434 IS_UDPLITE(sk));
103435- atomic_inc(&sk->sk_drops);
103436+ atomic_inc_unchecked(&sk->sk_drops);
103437 __skb_unlink(skb, rcvq);
103438 __skb_queue_tail(&list_kill, skb);
103439 }
103440@@ -1275,6 +1292,10 @@ try_again:
103441 if (!skb)
103442 goto out;
103443
103444+ err = gr_search_udp_recvmsg(sk, skb);
103445+ if (err)
103446+ goto out_free;
103447+
103448 ulen = skb->len - sizeof(struct udphdr);
103449 copied = len;
103450 if (copied > ulen)
103451@@ -1307,7 +1328,7 @@ try_again:
103452 if (unlikely(err)) {
103453 trace_kfree_skb(skb, udp_recvmsg);
103454 if (!peeked) {
103455- atomic_inc(&sk->sk_drops);
103456+ atomic_inc_unchecked(&sk->sk_drops);
103457 UDP_INC_STATS_USER(sock_net(sk),
103458 UDP_MIB_INERRORS, is_udplite);
103459 }
103460@@ -1605,7 +1626,7 @@ csum_error:
103461 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103462 drop:
103463 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103464- atomic_inc(&sk->sk_drops);
103465+ atomic_inc_unchecked(&sk->sk_drops);
103466 kfree_skb(skb);
103467 return -1;
103468 }
103469@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103470 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103471
103472 if (!skb1) {
103473- atomic_inc(&sk->sk_drops);
103474+ atomic_inc_unchecked(&sk->sk_drops);
103475 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103476 IS_UDPLITE(sk));
103477 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103478@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103479 goto csum_error;
103480
103481 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103482+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103483+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103484+#endif
103485 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103486
103487 /*
103488@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103489 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103490 0, sock_i_ino(sp),
103491 atomic_read(&sp->sk_refcnt), sp,
103492- atomic_read(&sp->sk_drops));
103493+ atomic_read_unchecked(&sp->sk_drops));
103494 }
103495
103496 int udp4_seq_show(struct seq_file *seq, void *v)
103497diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103498index 6156f68..d6ab46d 100644
103499--- a/net/ipv4/xfrm4_policy.c
103500+++ b/net/ipv4/xfrm4_policy.c
103501@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103502 fl4->flowi4_tos = iph->tos;
103503 }
103504
103505-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103506+static int xfrm4_garbage_collect(struct dst_ops *ops)
103507 {
103508 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103509
103510- xfrm4_policy_afinfo.garbage_collect(net);
103511+ xfrm_garbage_collect_deferred(net);
103512 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103513 }
103514
103515@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103516
103517 static int __net_init xfrm4_net_init(struct net *net)
103518 {
103519- struct ctl_table *table;
103520+ ctl_table_no_const *table = NULL;
103521 struct ctl_table_header *hdr;
103522
103523- table = xfrm4_policy_table;
103524 if (!net_eq(net, &init_net)) {
103525- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103526+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103527 if (!table)
103528 goto err_alloc;
103529
103530 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103531- }
103532-
103533- hdr = register_net_sysctl(net, "net/ipv4", table);
103534+ hdr = register_net_sysctl(net, "net/ipv4", table);
103535+ } else
103536+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103537 if (!hdr)
103538 goto err_reg;
103539
103540@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103541 return 0;
103542
103543 err_reg:
103544- if (!net_eq(net, &init_net))
103545- kfree(table);
103546+ kfree(table);
103547 err_alloc:
103548 return -ENOMEM;
103549 }
103550diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103551index f7c8bbe..534fa31 100644
103552--- a/net/ipv6/addrconf.c
103553+++ b/net/ipv6/addrconf.c
103554@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103555 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103556 .mtu6 = IPV6_MIN_MTU,
103557 .accept_ra = 1,
103558- .accept_redirects = 1,
103559+ .accept_redirects = 0,
103560 .autoconf = 1,
103561 .force_mld_version = 0,
103562 .mldv1_unsolicited_report_interval = 10 * HZ,
103563@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103564 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103565 .mtu6 = IPV6_MIN_MTU,
103566 .accept_ra = 1,
103567- .accept_redirects = 1,
103568+ .accept_redirects = 0,
103569 .autoconf = 1,
103570 .force_mld_version = 0,
103571 .mldv1_unsolicited_report_interval = 10 * HZ,
103572@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103573 idx = 0;
103574 head = &net->dev_index_head[h];
103575 rcu_read_lock();
103576- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103577+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103578 net->dev_base_seq;
103579 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103580 if (idx < s_idx)
103581@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103582 p.iph.ihl = 5;
103583 p.iph.protocol = IPPROTO_IPV6;
103584 p.iph.ttl = 64;
103585- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103586+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103587
103588 if (ops->ndo_do_ioctl) {
103589 mm_segment_t oldfs = get_fs();
103590@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
103591 .release = seq_release_net,
103592 };
103593
103594+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103595+extern void unregister_ipv6_seq_ops_addr(void);
103596+
103597 static int __net_init if6_proc_net_init(struct net *net)
103598 {
103599- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103600+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103601+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103602+ unregister_ipv6_seq_ops_addr();
103603 return -ENOMEM;
103604+ }
103605 return 0;
103606 }
103607
103608 static void __net_exit if6_proc_net_exit(struct net *net)
103609 {
103610 remove_proc_entry("if_inet6", net->proc_net);
103611+ unregister_ipv6_seq_ops_addr();
103612 }
103613
103614 static struct pernet_operations if6_proc_net_ops = {
103615@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103616 s_ip_idx = ip_idx = cb->args[2];
103617
103618 rcu_read_lock();
103619- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103620+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103621 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103622 idx = 0;
103623 head = &net->dev_index_head[h];
103624@@ -4572,6 +4579,22 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
103625 return 0;
103626 }
103627
103628+static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
103629+ [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
103630+ [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
103631+};
103632+
103633+static int inet6_validate_link_af(const struct net_device *dev,
103634+ const struct nlattr *nla)
103635+{
103636+ struct nlattr *tb[IFLA_INET6_MAX + 1];
103637+
103638+ if (dev && !__in6_dev_get(dev))
103639+ return -EAFNOSUPPORT;
103640+
103641+ return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
103642+}
103643+
103644 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
103645 {
103646 int err = -EINVAL;
103647@@ -4824,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103648 rt_genid_bump_ipv6(net);
103649 break;
103650 }
103651- atomic_inc(&net->ipv6.dev_addr_genid);
103652+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103653 }
103654
103655 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103656@@ -4844,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103657 int *valp = ctl->data;
103658 int val = *valp;
103659 loff_t pos = *ppos;
103660- struct ctl_table lctl;
103661+ ctl_table_no_const lctl;
103662 int ret;
103663
103664 /*
103665@@ -4929,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103666 int *valp = ctl->data;
103667 int val = *valp;
103668 loff_t pos = *ppos;
103669- struct ctl_table lctl;
103670+ ctl_table_no_const lctl;
103671 int ret;
103672
103673 /*
103674@@ -5393,6 +5416,7 @@ static struct rtnl_af_ops inet6_ops = {
103675 .family = AF_INET6,
103676 .fill_link_af = inet6_fill_link_af,
103677 .get_link_af_size = inet6_get_link_af_size,
103678+ .validate_link_af = inet6_validate_link_af,
103679 .set_link_af = inet6_set_link_af,
103680 };
103681
103682diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103683index e8c4400..a4cd5da 100644
103684--- a/net/ipv6/af_inet6.c
103685+++ b/net/ipv6/af_inet6.c
103686@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103687 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103688 net->ipv6.sysctl.flowlabel_consistency = 1;
103689 net->ipv6.sysctl.auto_flowlabels = 0;
103690- atomic_set(&net->ipv6.fib6_sernum, 1);
103691+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
103692
103693 err = ipv6_init_mibs(net);
103694 if (err)
103695diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103696index 49f5e73..ae02d54 100644
103697--- a/net/ipv6/datagram.c
103698+++ b/net/ipv6/datagram.c
103699@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103700 0,
103701 sock_i_ino(sp),
103702 atomic_read(&sp->sk_refcnt), sp,
103703- atomic_read(&sp->sk_drops));
103704+ atomic_read_unchecked(&sp->sk_drops));
103705 }
103706diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103707index d674152..fb5a01d 100644
103708--- a/net/ipv6/icmp.c
103709+++ b/net/ipv6/icmp.c
103710@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103711
103712 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103713 {
103714- struct ctl_table *table;
103715+ ctl_table_no_const *table;
103716
103717 table = kmemdup(ipv6_icmp_table_template,
103718 sizeof(ipv6_icmp_table_template),
103719diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
103720index f1c6d5e..faabef6 100644
103721--- a/net/ipv6/ip6_fib.c
103722+++ b/net/ipv6/ip6_fib.c
103723@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
103724 int new, old;
103725
103726 do {
103727- old = atomic_read(&net->ipv6.fib6_sernum);
103728+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
103729 new = old < INT_MAX ? old + 1 : 1;
103730- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
103731+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
103732 old, new) != old);
103733 return new;
103734 }
103735diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103736index 01ccc28..66861c7 100644
103737--- a/net/ipv6/ip6_gre.c
103738+++ b/net/ipv6/ip6_gre.c
103739@@ -71,8 +71,8 @@ struct ip6gre_net {
103740 struct net_device *fb_tunnel_dev;
103741 };
103742
103743-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103744-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103745+static struct rtnl_link_ops ip6gre_link_ops;
103746+static struct rtnl_link_ops ip6gre_tap_ops;
103747 static int ip6gre_tunnel_init(struct net_device *dev);
103748 static void ip6gre_tunnel_setup(struct net_device *dev);
103749 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103750@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103751 }
103752
103753
103754-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103755+static struct inet6_protocol ip6gre_protocol = {
103756 .handler = ip6gre_rcv,
103757 .err_handler = ip6gre_err,
103758 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103759@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103760 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103761 };
103762
103763-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103764+static struct rtnl_link_ops ip6gre_link_ops = {
103765 .kind = "ip6gre",
103766 .maxtype = IFLA_GRE_MAX,
103767 .policy = ip6gre_policy,
103768@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103769 .fill_info = ip6gre_fill_info,
103770 };
103771
103772-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103773+static struct rtnl_link_ops ip6gre_tap_ops = {
103774 .kind = "ip6gretap",
103775 .maxtype = IFLA_GRE_MAX,
103776 .policy = ip6gre_policy,
103777diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103778index 92b3da5..77837b8 100644
103779--- a/net/ipv6/ip6_tunnel.c
103780+++ b/net/ipv6/ip6_tunnel.c
103781@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103782
103783 static int ip6_tnl_dev_init(struct net_device *dev);
103784 static void ip6_tnl_dev_setup(struct net_device *dev);
103785-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103786+static struct rtnl_link_ops ip6_link_ops;
103787
103788 static int ip6_tnl_net_id __read_mostly;
103789 struct ip6_tnl_net {
103790@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103791 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103792 };
103793
103794-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103795+static struct rtnl_link_ops ip6_link_ops = {
103796 .kind = "ip6tnl",
103797 .maxtype = IFLA_IPTUN_MAX,
103798 .policy = ip6_tnl_policy,
103799diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103800index ace10d0..97a8b49 100644
103801--- a/net/ipv6/ip6_vti.c
103802+++ b/net/ipv6/ip6_vti.c
103803@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103804
103805 static int vti6_dev_init(struct net_device *dev);
103806 static void vti6_dev_setup(struct net_device *dev);
103807-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103808+static struct rtnl_link_ops vti6_link_ops;
103809
103810 static int vti6_net_id __read_mostly;
103811 struct vti6_net {
103812@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103813 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103814 };
103815
103816-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103817+static struct rtnl_link_ops vti6_link_ops = {
103818 .kind = "vti6",
103819 .maxtype = IFLA_VTI_MAX,
103820 .policy = vti6_policy,
103821diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103822index 66980d8d..8aef0d1 100644
103823--- a/net/ipv6/ipv6_sockglue.c
103824+++ b/net/ipv6/ipv6_sockglue.c
103825@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103826 if (sk->sk_type != SOCK_STREAM)
103827 return -ENOPROTOOPT;
103828
103829- msg.msg_control = optval;
103830+ msg.msg_control = (void __force_kernel *)optval;
103831 msg.msg_controllen = len;
103832 msg.msg_flags = flags;
103833
103834diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103835index e080fbb..412b3cf 100644
103836--- a/net/ipv6/netfilter/ip6_tables.c
103837+++ b/net/ipv6/netfilter/ip6_tables.c
103838@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103839 #endif
103840
103841 static int get_info(struct net *net, void __user *user,
103842- const int *len, int compat)
103843+ int len, int compat)
103844 {
103845 char name[XT_TABLE_MAXNAMELEN];
103846 struct xt_table *t;
103847 int ret;
103848
103849- if (*len != sizeof(struct ip6t_getinfo)) {
103850- duprintf("length %u != %zu\n", *len,
103851+ if (len != sizeof(struct ip6t_getinfo)) {
103852+ duprintf("length %u != %zu\n", len,
103853 sizeof(struct ip6t_getinfo));
103854 return -EINVAL;
103855 }
103856@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103857 info.size = private->size;
103858 strcpy(info.name, name);
103859
103860- if (copy_to_user(user, &info, *len) != 0)
103861+ if (copy_to_user(user, &info, len) != 0)
103862 ret = -EFAULT;
103863 else
103864 ret = 0;
103865@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103866
103867 switch (cmd) {
103868 case IP6T_SO_GET_INFO:
103869- ret = get_info(sock_net(sk), user, len, 1);
103870+ ret = get_info(sock_net(sk), user, *len, 1);
103871 break;
103872 case IP6T_SO_GET_ENTRIES:
103873 ret = compat_get_entries(sock_net(sk), user, len);
103874@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103875
103876 switch (cmd) {
103877 case IP6T_SO_GET_INFO:
103878- ret = get_info(sock_net(sk), user, len, 0);
103879+ ret = get_info(sock_net(sk), user, *len, 0);
103880 break;
103881
103882 case IP6T_SO_GET_ENTRIES:
103883diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103884index 6f187c8..34b367f 100644
103885--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103886+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103887@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103888
103889 static int nf_ct_frag6_sysctl_register(struct net *net)
103890 {
103891- struct ctl_table *table;
103892+ ctl_table_no_const *table = NULL;
103893 struct ctl_table_header *hdr;
103894
103895- table = nf_ct_frag6_sysctl_table;
103896 if (!net_eq(net, &init_net)) {
103897- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103898+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103899 GFP_KERNEL);
103900 if (table == NULL)
103901 goto err_alloc;
103902@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103903 table[2].data = &net->nf_frag.frags.high_thresh;
103904 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103905 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103906- }
103907-
103908- hdr = register_net_sysctl(net, "net/netfilter", table);
103909+ hdr = register_net_sysctl(net, "net/netfilter", table);
103910+ } else
103911+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103912 if (hdr == NULL)
103913 goto err_reg;
103914
103915@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103916 return 0;
103917
103918 err_reg:
103919- if (!net_eq(net, &init_net))
103920- kfree(table);
103921+ kfree(table);
103922 err_alloc:
103923 return -ENOMEM;
103924 }
103925diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103926index 2d31483..47aba96 100644
103927--- a/net/ipv6/ping.c
103928+++ b/net/ipv6/ping.c
103929@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103930
103931 if (msg->msg_name) {
103932 DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
103933- if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
103934- u->sin6_family != AF_INET6) {
103935+ if (msg->msg_namelen < sizeof(*u))
103936 return -EINVAL;
103937+ if (u->sin6_family != AF_INET6) {
103938+ return -EAFNOSUPPORT;
103939 }
103940 if (sk->sk_bound_dev_if &&
103941 sk->sk_bound_dev_if != u->sin6_scope_id) {
103942@@ -241,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
103943 };
103944 #endif
103945
103946+static struct pingv6_ops real_pingv6_ops = {
103947+ .ipv6_recv_error = ipv6_recv_error,
103948+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103949+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103950+ .icmpv6_err_convert = icmpv6_err_convert,
103951+ .ipv6_icmp_error = ipv6_icmp_error,
103952+ .ipv6_chk_addr = ipv6_chk_addr,
103953+};
103954+
103955+static struct pingv6_ops dummy_pingv6_ops = {
103956+ .ipv6_recv_error = dummy_ipv6_recv_error,
103957+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103958+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103959+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103960+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103961+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103962+};
103963+
103964 int __init pingv6_init(void)
103965 {
103966 #ifdef CONFIG_PROC_FS
103967@@ -248,13 +267,7 @@ int __init pingv6_init(void)
103968 if (ret)
103969 return ret;
103970 #endif
103971- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
103972- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
103973- pingv6_ops.ip6_datagram_recv_specific_ctl =
103974- ip6_datagram_recv_specific_ctl;
103975- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
103976- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
103977- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
103978+ pingv6_ops = &real_pingv6_ops;
103979 return inet6_register_protosw(&pingv6_protosw);
103980 }
103981
103982@@ -263,14 +276,9 @@ int __init pingv6_init(void)
103983 */
103984 void pingv6_exit(void)
103985 {
103986- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
103987- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
103988- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
103989- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
103990- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
103991- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
103992 #ifdef CONFIG_PROC_FS
103993 unregister_pernet_subsys(&ping_v6_net_ops);
103994 #endif
103995+ pingv6_ops = &dummy_pingv6_ops;
103996 inet6_unregister_protosw(&pingv6_protosw);
103997 }
103998diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
103999index 679253d0..70b653c 100644
104000--- a/net/ipv6/proc.c
104001+++ b/net/ipv6/proc.c
104002@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
104003 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
104004 goto proc_snmp6_fail;
104005
104006- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
104007+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
104008 if (!net->mib.proc_net_devsnmp6)
104009 goto proc_dev_snmp6_fail;
104010 return 0;
104011diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
104012index ee25631..3c3ac5d 100644
104013--- a/net/ipv6/raw.c
104014+++ b/net/ipv6/raw.c
104015@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
104016 {
104017 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
104018 skb_checksum_complete(skb)) {
104019- atomic_inc(&sk->sk_drops);
104020+ atomic_inc_unchecked(&sk->sk_drops);
104021 kfree_skb(skb);
104022 return NET_RX_DROP;
104023 }
104024@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104025 struct raw6_sock *rp = raw6_sk(sk);
104026
104027 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
104028- atomic_inc(&sk->sk_drops);
104029+ atomic_inc_unchecked(&sk->sk_drops);
104030 kfree_skb(skb);
104031 return NET_RX_DROP;
104032 }
104033@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104034
104035 if (inet->hdrincl) {
104036 if (skb_checksum_complete(skb)) {
104037- atomic_inc(&sk->sk_drops);
104038+ atomic_inc_unchecked(&sk->sk_drops);
104039 kfree_skb(skb);
104040 return NET_RX_DROP;
104041 }
104042@@ -609,7 +609,7 @@ out:
104043 return err;
104044 }
104045
104046-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
104047+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
104048 struct flowi6 *fl6, struct dst_entry **dstp,
104049 unsigned int flags)
104050 {
104051@@ -916,12 +916,15 @@ do_confirm:
104052 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
104053 char __user *optval, int optlen)
104054 {
104055+ struct icmp6_filter filter;
104056+
104057 switch (optname) {
104058 case ICMPV6_FILTER:
104059 if (optlen > sizeof(struct icmp6_filter))
104060 optlen = sizeof(struct icmp6_filter);
104061- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
104062+ if (copy_from_user(&filter, optval, optlen))
104063 return -EFAULT;
104064+ raw6_sk(sk)->filter = filter;
104065 return 0;
104066 default:
104067 return -ENOPROTOOPT;
104068@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104069 char __user *optval, int __user *optlen)
104070 {
104071 int len;
104072+ struct icmp6_filter filter;
104073
104074 switch (optname) {
104075 case ICMPV6_FILTER:
104076@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104077 len = sizeof(struct icmp6_filter);
104078 if (put_user(len, optlen))
104079 return -EFAULT;
104080- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
104081+ filter = raw6_sk(sk)->filter;
104082+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
104083 return -EFAULT;
104084 return 0;
104085 default:
104086diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104087index d7d70e6..bd5e9fc 100644
104088--- a/net/ipv6/reassembly.c
104089+++ b/net/ipv6/reassembly.c
104090@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104091
104092 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104093 {
104094- struct ctl_table *table;
104095+ ctl_table_no_const *table = NULL;
104096 struct ctl_table_header *hdr;
104097
104098- table = ip6_frags_ns_ctl_table;
104099 if (!net_eq(net, &init_net)) {
104100- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104101+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104102 if (table == NULL)
104103 goto err_alloc;
104104
104105@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104106 /* Don't export sysctls to unprivileged users */
104107 if (net->user_ns != &init_user_ns)
104108 table[0].procname = NULL;
104109- }
104110+ hdr = register_net_sysctl(net, "net/ipv6", table);
104111+ } else
104112+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104113
104114- hdr = register_net_sysctl(net, "net/ipv6", table);
104115 if (hdr == NULL)
104116 goto err_reg;
104117
104118@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104119 return 0;
104120
104121 err_reg:
104122- if (!net_eq(net, &init_net))
104123- kfree(table);
104124+ kfree(table);
104125 err_alloc:
104126 return -ENOMEM;
104127 }
104128diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104129index 49596535..663a24a 100644
104130--- a/net/ipv6/route.c
104131+++ b/net/ipv6/route.c
104132@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
104133
104134 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104135 {
104136- struct ctl_table *table;
104137+ ctl_table_no_const *table;
104138
104139 table = kmemdup(ipv6_route_table_template,
104140 sizeof(ipv6_route_table_template),
104141diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104142index cdbfe5a..e13eb31 100644
104143--- a/net/ipv6/sit.c
104144+++ b/net/ipv6/sit.c
104145@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104146 static void ipip6_dev_free(struct net_device *dev);
104147 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104148 __be32 *v4dst);
104149-static struct rtnl_link_ops sit_link_ops __read_mostly;
104150+static struct rtnl_link_ops sit_link_ops;
104151
104152 static int sit_net_id __read_mostly;
104153 struct sit_net {
104154@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104155 unregister_netdevice_queue(dev, head);
104156 }
104157
104158-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104159+static struct rtnl_link_ops sit_link_ops = {
104160 .kind = "sit",
104161 .maxtype = IFLA_IPTUN_MAX,
104162 .policy = ipip6_policy,
104163diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104164index c5c10fa..2577d51 100644
104165--- a/net/ipv6/sysctl_net_ipv6.c
104166+++ b/net/ipv6/sysctl_net_ipv6.c
104167@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
104168
104169 static int __net_init ipv6_sysctl_net_init(struct net *net)
104170 {
104171- struct ctl_table *ipv6_table;
104172+ ctl_table_no_const *ipv6_table;
104173 struct ctl_table *ipv6_route_table;
104174 struct ctl_table *ipv6_icmp_table;
104175 int err;
104176diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104177index 9c0b54e..5e7bd8f 100644
104178--- a/net/ipv6/tcp_ipv6.c
104179+++ b/net/ipv6/tcp_ipv6.c
104180@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104181 }
104182 }
104183
104184+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104185+extern int grsec_enable_blackhole;
104186+#endif
104187+
104188 static void tcp_v6_hash(struct sock *sk)
104189 {
104190 if (sk->sk_state != TCP_CLOSE) {
104191@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104192 return 0;
104193
104194 reset:
104195+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104196+ if (!grsec_enable_blackhole)
104197+#endif
104198 tcp_v6_send_reset(sk, skb);
104199 discard:
104200 if (opt_skb)
104201@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104202
104203 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
104204 inet6_iif(skb));
104205- if (!sk)
104206+ if (!sk) {
104207+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104208+ ret = 1;
104209+#endif
104210 goto no_tcp_socket;
104211+ }
104212
104213 process:
104214- if (sk->sk_state == TCP_TIME_WAIT)
104215+ if (sk->sk_state == TCP_TIME_WAIT) {
104216+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104217+ ret = 2;
104218+#endif
104219 goto do_time_wait;
104220+ }
104221
104222 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104223 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104224@@ -1499,6 +1514,10 @@ csum_error:
104225 bad_packet:
104226 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104227 } else {
104228+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104229+ if (!grsec_enable_blackhole || (ret == 1 &&
104230+ (skb->dev->flags & IFF_LOOPBACK)))
104231+#endif
104232 tcp_v6_send_reset(NULL, skb);
104233 }
104234
104235diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104236index 189dc4a..458bec0 100644
104237--- a/net/ipv6/udp.c
104238+++ b/net/ipv6/udp.c
104239@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104240 udp_ipv6_hash_secret + net_hash_mix(net));
104241 }
104242
104243+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104244+extern int grsec_enable_blackhole;
104245+#endif
104246+
104247 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104248 {
104249 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104250@@ -448,7 +452,7 @@ try_again:
104251 if (unlikely(err)) {
104252 trace_kfree_skb(skb, udpv6_recvmsg);
104253 if (!peeked) {
104254- atomic_inc(&sk->sk_drops);
104255+ atomic_inc_unchecked(&sk->sk_drops);
104256 if (is_udp4)
104257 UDP_INC_STATS_USER(sock_net(sk),
104258 UDP_MIB_INERRORS,
104259@@ -714,7 +718,7 @@ csum_error:
104260 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104261 drop:
104262 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104263- atomic_inc(&sk->sk_drops);
104264+ atomic_inc_unchecked(&sk->sk_drops);
104265 kfree_skb(skb);
104266 return -1;
104267 }
104268@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104269 if (likely(skb1 == NULL))
104270 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104271 if (!skb1) {
104272- atomic_inc(&sk->sk_drops);
104273+ atomic_inc_unchecked(&sk->sk_drops);
104274 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104275 IS_UDPLITE(sk));
104276 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104277@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104278 goto csum_error;
104279
104280 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104281+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104282+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104283+#endif
104284 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104285
104286 kfree_skb(skb);
104287diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104288index 48bf5a0..691985a 100644
104289--- a/net/ipv6/xfrm6_policy.c
104290+++ b/net/ipv6/xfrm6_policy.c
104291@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104292 }
104293 }
104294
104295-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104296+static int xfrm6_garbage_collect(struct dst_ops *ops)
104297 {
104298 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104299
104300- xfrm6_policy_afinfo.garbage_collect(net);
104301+ xfrm_garbage_collect_deferred(net);
104302 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104303 }
104304
104305@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104306
104307 static int __net_init xfrm6_net_init(struct net *net)
104308 {
104309- struct ctl_table *table;
104310+ ctl_table_no_const *table = NULL;
104311 struct ctl_table_header *hdr;
104312
104313- table = xfrm6_policy_table;
104314 if (!net_eq(net, &init_net)) {
104315- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104316+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104317 if (!table)
104318 goto err_alloc;
104319
104320 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104321- }
104322+ hdr = register_net_sysctl(net, "net/ipv6", table);
104323+ } else
104324+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104325
104326- hdr = register_net_sysctl(net, "net/ipv6", table);
104327 if (!hdr)
104328 goto err_reg;
104329
104330@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104331 return 0;
104332
104333 err_reg:
104334- if (!net_eq(net, &init_net))
104335- kfree(table);
104336+ kfree(table);
104337 err_alloc:
104338 return -ENOMEM;
104339 }
104340diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104341index c1d247e..9e5949d 100644
104342--- a/net/ipx/ipx_proc.c
104343+++ b/net/ipx/ipx_proc.c
104344@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104345 struct proc_dir_entry *p;
104346 int rc = -ENOMEM;
104347
104348- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104349+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104350
104351 if (!ipx_proc_dir)
104352 goto out;
104353diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104354index 40695b9..c1f2cef 100644
104355--- a/net/irda/ircomm/ircomm_tty.c
104356+++ b/net/irda/ircomm/ircomm_tty.c
104357@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104358 add_wait_queue(&port->open_wait, &wait);
104359
104360 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104361- __FILE__, __LINE__, tty->driver->name, port->count);
104362+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104363
104364 spin_lock_irqsave(&port->lock, flags);
104365- port->count--;
104366+ atomic_dec(&port->count);
104367 port->blocked_open++;
104368 spin_unlock_irqrestore(&port->lock, flags);
104369
104370@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104371 }
104372
104373 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104374- __FILE__, __LINE__, tty->driver->name, port->count);
104375+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104376
104377 schedule();
104378 }
104379@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104380
104381 spin_lock_irqsave(&port->lock, flags);
104382 if (!tty_hung_up_p(filp))
104383- port->count++;
104384+ atomic_inc(&port->count);
104385 port->blocked_open--;
104386 spin_unlock_irqrestore(&port->lock, flags);
104387
104388 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104389- __FILE__, __LINE__, tty->driver->name, port->count);
104390+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104391
104392 if (!retval)
104393 port->flags |= ASYNC_NORMAL_ACTIVE;
104394@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104395
104396 /* ++ is not atomic, so this should be protected - Jean II */
104397 spin_lock_irqsave(&self->port.lock, flags);
104398- self->port.count++;
104399+ atomic_inc(&self->port.count);
104400 spin_unlock_irqrestore(&self->port.lock, flags);
104401 tty_port_tty_set(&self->port, tty);
104402
104403 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104404- self->line, self->port.count);
104405+ self->line, atomic_read(&self->port.count));
104406
104407 /* Not really used by us, but lets do it anyway */
104408 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104409@@ -959,7 +959,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104410 tty_kref_put(port->tty);
104411 }
104412 port->tty = NULL;
104413- port->count = 0;
104414+ atomic_set(&port->count, 0);
104415 spin_unlock_irqrestore(&port->lock, flags);
104416
104417 wake_up_interruptible(&port->open_wait);
104418@@ -1306,7 +1306,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104419 seq_putc(m, '\n');
104420
104421 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104422- seq_printf(m, "Open count: %d\n", self->port.count);
104423+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104424 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104425 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104426
104427diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104428index b9ac598..f88cc56 100644
104429--- a/net/irda/irproc.c
104430+++ b/net/irda/irproc.c
104431@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104432 {
104433 int i;
104434
104435- proc_irda = proc_mkdir("irda", init_net.proc_net);
104436+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104437 if (proc_irda == NULL)
104438 return;
104439
104440diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104441index 2e9953b..ed06350 100644
104442--- a/net/iucv/af_iucv.c
104443+++ b/net/iucv/af_iucv.c
104444@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104445 {
104446 char name[12];
104447
104448- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104449+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104450 while (__iucv_get_sock_by_name(name)) {
104451 sprintf(name, "%08x",
104452- atomic_inc_return(&iucv_sk_list.autobind_name));
104453+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104454 }
104455 memcpy(iucv->src_name, name, 8);
104456 }
104457diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104458index 2a6a1fd..6c112b0 100644
104459--- a/net/iucv/iucv.c
104460+++ b/net/iucv/iucv.c
104461@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104462 return NOTIFY_OK;
104463 }
104464
104465-static struct notifier_block __refdata iucv_cpu_notifier = {
104466+static struct notifier_block iucv_cpu_notifier = {
104467 .notifier_call = iucv_cpu_notify,
104468 };
104469
104470diff --git a/net/key/af_key.c b/net/key/af_key.c
104471index f8ac939..1e189bf 100644
104472--- a/net/key/af_key.c
104473+++ b/net/key/af_key.c
104474@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104475 static u32 get_acqseq(void)
104476 {
104477 u32 res;
104478- static atomic_t acqseq;
104479+ static atomic_unchecked_t acqseq;
104480
104481 do {
104482- res = atomic_inc_return(&acqseq);
104483+ res = atomic_inc_return_unchecked(&acqseq);
104484 } while (!res);
104485 return res;
104486 }
104487diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104488index 781b3a2..73a7434 100644
104489--- a/net/l2tp/l2tp_eth.c
104490+++ b/net/l2tp/l2tp_eth.c
104491@@ -42,12 +42,12 @@ struct l2tp_eth {
104492 struct sock *tunnel_sock;
104493 struct l2tp_session *session;
104494 struct list_head list;
104495- atomic_long_t tx_bytes;
104496- atomic_long_t tx_packets;
104497- atomic_long_t tx_dropped;
104498- atomic_long_t rx_bytes;
104499- atomic_long_t rx_packets;
104500- atomic_long_t rx_errors;
104501+ atomic_long_unchecked_t tx_bytes;
104502+ atomic_long_unchecked_t tx_packets;
104503+ atomic_long_unchecked_t tx_dropped;
104504+ atomic_long_unchecked_t rx_bytes;
104505+ atomic_long_unchecked_t rx_packets;
104506+ atomic_long_unchecked_t rx_errors;
104507 };
104508
104509 /* via l2tp_session_priv() */
104510@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104511 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104512
104513 if (likely(ret == NET_XMIT_SUCCESS)) {
104514- atomic_long_add(len, &priv->tx_bytes);
104515- atomic_long_inc(&priv->tx_packets);
104516+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104517+ atomic_long_inc_unchecked(&priv->tx_packets);
104518 } else {
104519- atomic_long_inc(&priv->tx_dropped);
104520+ atomic_long_inc_unchecked(&priv->tx_dropped);
104521 }
104522 return NETDEV_TX_OK;
104523 }
104524@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104525 {
104526 struct l2tp_eth *priv = netdev_priv(dev);
104527
104528- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104529- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104530- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104531- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104532- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104533- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104534+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104535+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104536+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104537+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104538+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104539+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104540 return stats;
104541 }
104542
104543@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104544 nf_reset(skb);
104545
104546 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104547- atomic_long_inc(&priv->rx_packets);
104548- atomic_long_add(data_len, &priv->rx_bytes);
104549+ atomic_long_inc_unchecked(&priv->rx_packets);
104550+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104551 } else {
104552- atomic_long_inc(&priv->rx_errors);
104553+ atomic_long_inc_unchecked(&priv->rx_errors);
104554 }
104555 return;
104556
104557 error:
104558- atomic_long_inc(&priv->rx_errors);
104559+ atomic_long_inc_unchecked(&priv->rx_errors);
104560 kfree_skb(skb);
104561 }
104562
104563diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104564index 1a3c7e0..80f8b0c 100644
104565--- a/net/llc/llc_proc.c
104566+++ b/net/llc/llc_proc.c
104567@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104568 int rc = -ENOMEM;
104569 struct proc_dir_entry *p;
104570
104571- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104572+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104573 if (!llc_proc_dir)
104574 goto out;
104575
104576diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104577index e75d5c5..429fc95 100644
104578--- a/net/mac80211/cfg.c
104579+++ b/net/mac80211/cfg.c
104580@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104581 ret = ieee80211_vif_use_channel(sdata, chandef,
104582 IEEE80211_CHANCTX_EXCLUSIVE);
104583 }
104584- } else if (local->open_count == local->monitors) {
104585+ } else if (local_read(&local->open_count) == local->monitors) {
104586 local->_oper_chandef = *chandef;
104587 ieee80211_hw_config(local, 0);
104588 }
104589@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104590 else
104591 local->probe_req_reg--;
104592
104593- if (!local->open_count)
104594+ if (!local_read(&local->open_count))
104595 break;
104596
104597 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104598@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104599 if (chanctx_conf) {
104600 *chandef = sdata->vif.bss_conf.chandef;
104601 ret = 0;
104602- } else if (local->open_count > 0 &&
104603- local->open_count == local->monitors &&
104604+ } else if (local_read(&local->open_count) > 0 &&
104605+ local_read(&local->open_count) == local->monitors &&
104606 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104607 if (local->use_chanctx)
104608 *chandef = local->monitor_chandef;
104609diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104610index cc6e964..029a3a3 100644
104611--- a/net/mac80211/ieee80211_i.h
104612+++ b/net/mac80211/ieee80211_i.h
104613@@ -29,6 +29,7 @@
104614 #include <net/ieee80211_radiotap.h>
104615 #include <net/cfg80211.h>
104616 #include <net/mac80211.h>
104617+#include <asm/local.h>
104618 #include "key.h"
104619 #include "sta_info.h"
104620 #include "debug.h"
104621@@ -1114,7 +1115,7 @@ struct ieee80211_local {
104622 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104623 spinlock_t queue_stop_reason_lock;
104624
104625- int open_count;
104626+ local_t open_count;
104627 int monitors, cooked_mntrs;
104628 /* number of interfaces with corresponding FIF_ flags */
104629 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104630diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104631index 4173553..e3b5a3f 100644
104632--- a/net/mac80211/iface.c
104633+++ b/net/mac80211/iface.c
104634@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104635 break;
104636 }
104637
104638- if (local->open_count == 0) {
104639+ if (local_read(&local->open_count) == 0) {
104640 res = drv_start(local);
104641 if (res)
104642 goto err_del_bss;
104643@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104644 res = drv_add_interface(local, sdata);
104645 if (res)
104646 goto err_stop;
104647- } else if (local->monitors == 0 && local->open_count == 0) {
104648+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104649 res = ieee80211_add_virtual_monitor(local);
104650 if (res)
104651 goto err_stop;
104652@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104653 atomic_inc(&local->iff_promiscs);
104654
104655 if (coming_up)
104656- local->open_count++;
104657+ local_inc(&local->open_count);
104658
104659 if (hw_reconf_flags)
104660 ieee80211_hw_config(local, hw_reconf_flags);
104661@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104662 err_del_interface:
104663 drv_remove_interface(local, sdata);
104664 err_stop:
104665- if (!local->open_count)
104666+ if (!local_read(&local->open_count))
104667 drv_stop(local);
104668 err_del_bss:
104669 sdata->bss = NULL;
104670@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104671 }
104672
104673 if (going_down)
104674- local->open_count--;
104675+ local_dec(&local->open_count);
104676
104677 switch (sdata->vif.type) {
104678 case NL80211_IFTYPE_AP_VLAN:
104679@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104680 }
104681 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104682
104683- if (local->open_count == 0)
104684+ if (local_read(&local->open_count) == 0)
104685 ieee80211_clear_tx_pending(local);
104686
104687 /*
104688@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104689 if (cancel_scan)
104690 flush_delayed_work(&local->scan_work);
104691
104692- if (local->open_count == 0) {
104693+ if (local_read(&local->open_count) == 0) {
104694 ieee80211_stop_device(local);
104695
104696 /* no reconfiguring after stop! */
104697@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104698 ieee80211_configure_filter(local);
104699 ieee80211_hw_config(local, hw_reconf_flags);
104700
104701- if (local->monitors == local->open_count)
104702+ if (local->monitors == local_read(&local->open_count))
104703 ieee80211_add_virtual_monitor(local);
104704 }
104705
104706diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104707index 6ab99da..f9502d4 100644
104708--- a/net/mac80211/main.c
104709+++ b/net/mac80211/main.c
104710@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104711 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104712 IEEE80211_CONF_CHANGE_POWER);
104713
104714- if (changed && local->open_count) {
104715+ if (changed && local_read(&local->open_count)) {
104716 ret = drv_config(local, changed);
104717 /*
104718 * Goal:
104719diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104720index 4a95fe3..0bfd713 100644
104721--- a/net/mac80211/pm.c
104722+++ b/net/mac80211/pm.c
104723@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104724 struct ieee80211_sub_if_data *sdata;
104725 struct sta_info *sta;
104726
104727- if (!local->open_count)
104728+ if (!local_read(&local->open_count))
104729 goto suspend;
104730
104731 ieee80211_scan_cancel(local);
104732@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104733 cancel_work_sync(&local->dynamic_ps_enable_work);
104734 del_timer_sync(&local->dynamic_ps_timer);
104735
104736- local->wowlan = wowlan && local->open_count;
104737+ local->wowlan = wowlan && local_read(&local->open_count);
104738 if (local->wowlan) {
104739 int err = drv_suspend(local, wowlan);
104740 if (err < 0) {
104741@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104742 WARN_ON(!list_empty(&local->chanctx_list));
104743
104744 /* stop hardware - this must stop RX */
104745- if (local->open_count)
104746+ if (local_read(&local->open_count))
104747 ieee80211_stop_device(local);
104748
104749 suspend:
104750diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104751index d53355b..21f583a 100644
104752--- a/net/mac80211/rate.c
104753+++ b/net/mac80211/rate.c
104754@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104755
104756 ASSERT_RTNL();
104757
104758- if (local->open_count)
104759+ if (local_read(&local->open_count))
104760 return -EBUSY;
104761
104762 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104763diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
104764index 058686a..097821b 100644
104765--- a/net/mac80211/tx.c
104766+++ b/net/mac80211/tx.c
104767@@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
104768 if (tx->sdata->control_port_no_encrypt)
104769 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
104770 info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
104771+ info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
104772 }
104773
104774 return TX_CONTINUE;
104775diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104776index 974ebe7..57bcd3c 100644
104777--- a/net/mac80211/util.c
104778+++ b/net/mac80211/util.c
104779@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104780 }
104781 #endif
104782 /* everything else happens only if HW was up & running */
104783- if (!local->open_count)
104784+ if (!local_read(&local->open_count))
104785 goto wake_up;
104786
104787 /*
104788@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104789 local->in_reconfig = false;
104790 barrier();
104791
104792- if (local->monitors == local->open_count && local->monitors > 0)
104793+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104794 ieee80211_add_virtual_monitor(local);
104795
104796 /*
104797diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104798index b02660f..c0f791c 100644
104799--- a/net/netfilter/Kconfig
104800+++ b/net/netfilter/Kconfig
104801@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
104802
104803 To compile it as a module, choose M here. If unsure, say N.
104804
104805+config NETFILTER_XT_MATCH_GRADM
104806+ tristate '"gradm" match support'
104807+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104808+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104809+ ---help---
104810+ The gradm match allows to match on grsecurity RBAC being enabled.
104811+ It is useful when iptables rules are applied early on bootup to
104812+ prevent connections to the machine (except from a trusted host)
104813+ while the RBAC system is disabled.
104814+
104815 config NETFILTER_XT_MATCH_HASHLIMIT
104816 tristate '"hashlimit" match support'
104817 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104818diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104819index 89f73a9..e4e5bd9 100644
104820--- a/net/netfilter/Makefile
104821+++ b/net/netfilter/Makefile
104822@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104823 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104824 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104825 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104826+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104827 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104828 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104829 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104830diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104831index d259da3..6a32b2c 100644
104832--- a/net/netfilter/ipset/ip_set_core.c
104833+++ b/net/netfilter/ipset/ip_set_core.c
104834@@ -1952,7 +1952,7 @@ done:
104835 return ret;
104836 }
104837
104838-static struct nf_sockopt_ops so_set __read_mostly = {
104839+static struct nf_sockopt_ops so_set = {
104840 .pf = PF_INET,
104841 .get_optmin = SO_IP_SET,
104842 .get_optmax = SO_IP_SET + 1,
104843diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104844index b0f7b62..0541842 100644
104845--- a/net/netfilter/ipvs/ip_vs_conn.c
104846+++ b/net/netfilter/ipvs/ip_vs_conn.c
104847@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104848 /* Increase the refcnt counter of the dest */
104849 ip_vs_dest_hold(dest);
104850
104851- conn_flags = atomic_read(&dest->conn_flags);
104852+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104853 if (cp->protocol != IPPROTO_UDP)
104854 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104855 flags = cp->flags;
104856@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
104857
104858 cp->control = NULL;
104859 atomic_set(&cp->n_control, 0);
104860- atomic_set(&cp->in_pkts, 0);
104861+ atomic_set_unchecked(&cp->in_pkts, 0);
104862
104863 cp->packet_xmit = NULL;
104864 cp->app = NULL;
104865@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104866
104867 /* Don't drop the entry if its number of incoming packets is not
104868 located in [0, 8] */
104869- i = atomic_read(&cp->in_pkts);
104870+ i = atomic_read_unchecked(&cp->in_pkts);
104871 if (i > 8 || i < 0) return 0;
104872
104873 if (!todrop_rate[i]) return 0;
104874diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104875index b87ca32..76c7799 100644
104876--- a/net/netfilter/ipvs/ip_vs_core.c
104877+++ b/net/netfilter/ipvs/ip_vs_core.c
104878@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104879 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104880 /* do not touch skb anymore */
104881
104882- atomic_inc(&cp->in_pkts);
104883+ atomic_inc_unchecked(&cp->in_pkts);
104884 ip_vs_conn_put(cp);
104885 return ret;
104886 }
104887@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104888 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104889 pkts = sysctl_sync_threshold(ipvs);
104890 else
104891- pkts = atomic_add_return(1, &cp->in_pkts);
104892+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104893
104894 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104895 ip_vs_sync_conn(net, cp, pkts);
104896diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104897index b8295a4..17ff579 100644
104898--- a/net/netfilter/ipvs/ip_vs_ctl.c
104899+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104900@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104901 */
104902 ip_vs_rs_hash(ipvs, dest);
104903 }
104904- atomic_set(&dest->conn_flags, conn_flags);
104905+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104906
104907 /* bind the service */
104908 old_svc = rcu_dereference_protected(dest->svc, 1);
104909@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104910 * align with netns init in ip_vs_control_net_init()
104911 */
104912
104913-static struct ctl_table vs_vars[] = {
104914+static ctl_table_no_const vs_vars[] __read_only = {
104915 {
104916 .procname = "amemthresh",
104917 .maxlen = sizeof(int),
104918@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104919 " %-7s %-6d %-10d %-10d\n",
104920 &dest->addr.in6,
104921 ntohs(dest->port),
104922- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104923+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104924 atomic_read(&dest->weight),
104925 atomic_read(&dest->activeconns),
104926 atomic_read(&dest->inactconns));
104927@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104928 "%-7s %-6d %-10d %-10d\n",
104929 ntohl(dest->addr.ip),
104930 ntohs(dest->port),
104931- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104932+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104933 atomic_read(&dest->weight),
104934 atomic_read(&dest->activeconns),
104935 atomic_read(&dest->inactconns));
104936@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104937
104938 entry.addr = dest->addr.ip;
104939 entry.port = dest->port;
104940- entry.conn_flags = atomic_read(&dest->conn_flags);
104941+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104942 entry.weight = atomic_read(&dest->weight);
104943 entry.u_threshold = dest->u_threshold;
104944 entry.l_threshold = dest->l_threshold;
104945@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104946 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104947 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104948 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104949- (atomic_read(&dest->conn_flags) &
104950+ (atomic_read_unchecked(&dest->conn_flags) &
104951 IP_VS_CONN_F_FWD_MASK)) ||
104952 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104953 atomic_read(&dest->weight)) ||
104954@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104955 {
104956 int idx;
104957 struct netns_ipvs *ipvs = net_ipvs(net);
104958- struct ctl_table *tbl;
104959+ ctl_table_no_const *tbl;
104960
104961 atomic_set(&ipvs->dropentry, 0);
104962 spin_lock_init(&ipvs->dropentry_lock);
104963diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104964index 127f140..553d652 100644
104965--- a/net/netfilter/ipvs/ip_vs_lblc.c
104966+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104967@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104968 * IPVS LBLC sysctl table
104969 */
104970 #ifdef CONFIG_SYSCTL
104971-static struct ctl_table vs_vars_table[] = {
104972+static ctl_table_no_const vs_vars_table[] __read_only = {
104973 {
104974 .procname = "lblc_expiration",
104975 .data = NULL,
104976diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
104977index 2229d2d..b32b785 100644
104978--- a/net/netfilter/ipvs/ip_vs_lblcr.c
104979+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
104980@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
104981 * IPVS LBLCR sysctl table
104982 */
104983
104984-static struct ctl_table vs_vars_table[] = {
104985+static ctl_table_no_const vs_vars_table[] __read_only = {
104986 {
104987 .procname = "lblcr_expiration",
104988 .data = NULL,
104989diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
104990index c47ffd7..d233a81 100644
104991--- a/net/netfilter/ipvs/ip_vs_sync.c
104992+++ b/net/netfilter/ipvs/ip_vs_sync.c
104993@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
104994 cp = cp->control;
104995 if (cp) {
104996 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104997- pkts = atomic_add_return(1, &cp->in_pkts);
104998+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104999 else
105000 pkts = sysctl_sync_threshold(ipvs);
105001 ip_vs_sync_conn(net, cp->control, pkts);
105002@@ -771,7 +771,7 @@ control:
105003 if (!cp)
105004 return;
105005 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105006- pkts = atomic_add_return(1, &cp->in_pkts);
105007+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105008 else
105009 pkts = sysctl_sync_threshold(ipvs);
105010 goto sloop;
105011@@ -900,7 +900,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
105012
105013 if (opt)
105014 memcpy(&cp->in_seq, opt, sizeof(*opt));
105015- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105016+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105017 cp->state = state;
105018 cp->old_state = cp->state;
105019 /*
105020diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
105021index 3aedbda..6a63567 100644
105022--- a/net/netfilter/ipvs/ip_vs_xmit.c
105023+++ b/net/netfilter/ipvs/ip_vs_xmit.c
105024@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
105025 else
105026 rc = NF_ACCEPT;
105027 /* do not touch skb anymore */
105028- atomic_inc(&cp->in_pkts);
105029+ atomic_inc_unchecked(&cp->in_pkts);
105030 goto out;
105031 }
105032
105033@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
105034 else
105035 rc = NF_ACCEPT;
105036 /* do not touch skb anymore */
105037- atomic_inc(&cp->in_pkts);
105038+ atomic_inc_unchecked(&cp->in_pkts);
105039 goto out;
105040 }
105041
105042diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105043index a4b5e2a..13b1de3 100644
105044--- a/net/netfilter/nf_conntrack_acct.c
105045+++ b/net/netfilter/nf_conntrack_acct.c
105046@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105047 #ifdef CONFIG_SYSCTL
105048 static int nf_conntrack_acct_init_sysctl(struct net *net)
105049 {
105050- struct ctl_table *table;
105051+ ctl_table_no_const *table;
105052
105053 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105054 GFP_KERNEL);
105055diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105056index 46d1b26..b7f3b76 100644
105057--- a/net/netfilter/nf_conntrack_core.c
105058+++ b/net/netfilter/nf_conntrack_core.c
105059@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
105060 #define DYING_NULLS_VAL ((1<<30)+1)
105061 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105062
105063+#ifdef CONFIG_GRKERNSEC_HIDESYM
105064+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105065+#endif
105066+
105067 int nf_conntrack_init_net(struct net *net)
105068 {
105069 int ret = -ENOMEM;
105070@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
105071 if (!net->ct.stat)
105072 goto err_pcpu_lists;
105073
105074+#ifdef CONFIG_GRKERNSEC_HIDESYM
105075+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105076+#else
105077 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105078+#endif
105079 if (!net->ct.slabname)
105080 goto err_slabname;
105081
105082diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105083index 4e78c57..ec8fb74 100644
105084--- a/net/netfilter/nf_conntrack_ecache.c
105085+++ b/net/netfilter/nf_conntrack_ecache.c
105086@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105087 #ifdef CONFIG_SYSCTL
105088 static int nf_conntrack_event_init_sysctl(struct net *net)
105089 {
105090- struct ctl_table *table;
105091+ ctl_table_no_const *table;
105092
105093 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105094 GFP_KERNEL);
105095diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105096index bd9d315..989947e 100644
105097--- a/net/netfilter/nf_conntrack_helper.c
105098+++ b/net/netfilter/nf_conntrack_helper.c
105099@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105100
105101 static int nf_conntrack_helper_init_sysctl(struct net *net)
105102 {
105103- struct ctl_table *table;
105104+ ctl_table_no_const *table;
105105
105106 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105107 GFP_KERNEL);
105108diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105109index b65d586..beec902 100644
105110--- a/net/netfilter/nf_conntrack_proto.c
105111+++ b/net/netfilter/nf_conntrack_proto.c
105112@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105113
105114 static void
105115 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105116- struct ctl_table **table,
105117+ ctl_table_no_const **table,
105118 unsigned int users)
105119 {
105120 if (users > 0)
105121diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105122index fc823fa..8311af3 100644
105123--- a/net/netfilter/nf_conntrack_standalone.c
105124+++ b/net/netfilter/nf_conntrack_standalone.c
105125@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105126
105127 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105128 {
105129- struct ctl_table *table;
105130+ ctl_table_no_const *table;
105131
105132 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105133 GFP_KERNEL);
105134diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105135index 7a394df..bd91a8a 100644
105136--- a/net/netfilter/nf_conntrack_timestamp.c
105137+++ b/net/netfilter/nf_conntrack_timestamp.c
105138@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105139 #ifdef CONFIG_SYSCTL
105140 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105141 {
105142- struct ctl_table *table;
105143+ ctl_table_no_const *table;
105144
105145 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105146 GFP_KERNEL);
105147diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105148index 43c926c..a5731d8 100644
105149--- a/net/netfilter/nf_log.c
105150+++ b/net/netfilter/nf_log.c
105151@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
105152
105153 #ifdef CONFIG_SYSCTL
105154 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105155-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105156+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105157
105158 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105159 void __user *buffer, size_t *lenp, loff_t *ppos)
105160@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105161 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105162 mutex_unlock(&nf_log_mutex);
105163 } else {
105164+ ctl_table_no_const nf_log_table = *table;
105165+
105166 mutex_lock(&nf_log_mutex);
105167 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
105168 if (!logger)
105169- table->data = "NONE";
105170+ nf_log_table.data = "NONE";
105171 else
105172- table->data = logger->name;
105173- r = proc_dostring(table, write, buffer, lenp, ppos);
105174+ nf_log_table.data = logger->name;
105175+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105176 mutex_unlock(&nf_log_mutex);
105177 }
105178
105179diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105180index c68c1e5..8b5d670 100644
105181--- a/net/netfilter/nf_sockopt.c
105182+++ b/net/netfilter/nf_sockopt.c
105183@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105184 }
105185 }
105186
105187- list_add(&reg->list, &nf_sockopts);
105188+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105189 out:
105190 mutex_unlock(&nf_sockopt_mutex);
105191 return ret;
105192@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105193 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105194 {
105195 mutex_lock(&nf_sockopt_mutex);
105196- list_del(&reg->list);
105197+ pax_list_del((struct list_head *)&reg->list);
105198 mutex_unlock(&nf_sockopt_mutex);
105199 }
105200 EXPORT_SYMBOL(nf_unregister_sockopt);
105201diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105202index 11d85b3..7fcc420 100644
105203--- a/net/netfilter/nfnetlink_log.c
105204+++ b/net/netfilter/nfnetlink_log.c
105205@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
105206 struct nfnl_log_net {
105207 spinlock_t instances_lock;
105208 struct hlist_head instance_table[INSTANCE_BUCKETS];
105209- atomic_t global_seq;
105210+ atomic_unchecked_t global_seq;
105211 };
105212
105213 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105214@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105215 /* global sequence number */
105216 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105217 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105218- htonl(atomic_inc_return(&log->global_seq))))
105219+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105220 goto nla_put_failure;
105221
105222 if (data_len) {
105223diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105224new file mode 100644
105225index 0000000..c566332
105226--- /dev/null
105227+++ b/net/netfilter/xt_gradm.c
105228@@ -0,0 +1,51 @@
105229+/*
105230+ * gradm match for netfilter
105231